搜档网
当前位置:搜档网 › FFMPEG+SDL2.0解码视频播放器

FFMPEG+SDL2.0解码视频播放器

FFMPEG+SDL2.0解码视频播放器
FFMPEG+SDL2.0解码视频播放器

#include "stdafx.h"

extern "C"{

#include "libavcodec\avcodec.h"

#include "libavformat\avformat.h"

#include "libswscale\swscale.h"

#include "sdl\SDL.h"

#include "sdl\SDL_test_images.h"

#include "sdl\SDL_thread.h"

}

#include

#include

/*

void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame){

FILE *pFile;

char szFilename[32];

int y;

sprintf(szFilename, "frame%d.ppm", iFrame);

pFile = fopen(szFilename, "wb");

if (pFile == NULL){

cout << "open file:" << szFilename << " error." << endl;

return;

}

//Write header

}*/

//SDL2.lib;swscale.lib;postproc.lib;swresample.lib;avcodec.lib;avdevice.lib;avfilter.lib;avformat.li b;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uu id.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)

int _tmain(int argc, char *agrv[]){

av_register_all(); //注册了所有的文件格式和编解码的库,它们将被自动的使用在被打开的合适格式的文件上

A VFormatContext *pFormatCtx;

pFormatCtx = avformat_alloc_context();

char filepath[] = "D:\\迅雷下载\\12.rmvb";

//Open an input stream and read the header

if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0){

printf("Can't open the file\n");

SDL_Delay(500);

return -1;

}

//Retrieve stream information

if (avformat_find_stream_info(pFormatCtx, NULL) < 0){

printf("Couldn't find stream information.\n");

return -1;

}

int i, videoIndex;

A VCodecContext *pCodecCtx;

A VCodec *pCodec;

//Find the first video stream

videoIndex = -1;

for (i = 0; i < pFormatCtx->nb_streams; i++){//视音频流的个数

if (pFormatCtx->streams[i]->codec->codec_type == A VMEDIA_TYPE_VIDEO){ videoIndex = i;

break;

}

}

if (videoIndex == -1)

return -1;

//Get a pointer to the codec context for the video stream

//流中关于编解码器的信息就是被我们叫做"codec context"(编解码器上下文)

//的东西。这里面包含了流中所使用的关于编解码器的所有信

pCodecCtx = pFormatCtx->streams[videoIndex]->codec;

//Find the decoder for the video stream

pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

if (pCodec == NULL){

printf("Unsupported codec!\n");

return -1;

}

//Open codec

if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){

printf("Could not open codec.\n");

return -1;

}

//allocate video frame and set its fileds to default value

A VFrame *pFrame, *pFrameYUV;

pFrame = av_frame_alloc();

pFrameYUV = av_frame_alloc();

//即使我们申请了一帧的内存,当转换的时候,我们仍然需要一个地方来放置原始

//的数据。我们使用avpicture_get_size 来获得我们需要的大小,然后手工申请

//内存空间:

uint8_t *out_buffer;

int numBytes;

numBytes = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

//av_malloc 是ffmpeg 的malloc,用来实现一个简单的malloc 的包装,这样来保

//证内存地址是对齐的(4 字节对齐或者2 字节对齐)。它并不能保护你不被内

//存泄漏,重复释放或者其它malloc 的问题所困扰。

out_buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

//Assign appropriate parts of buffer to image planes in pFrameYUV

//Note that pFrameYUV is an A VFrame, but A VFrame is a superset of A VPicture

avpicture_fill((A VPicture*)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

//----------------SDL--------------------------------------//

if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)){ printf("Could not initialize SDL -%s\n", SDL_GetError());

exit(1);

}

SDL_Window *window = nullptr;

window = SDL_CreateWindow("MyPlayer", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,

pCodecCtx->width, pCodecCtx->height, SDL_WINDOW_SHOWN);

if (!window){

printf("Could not initialize SDL -%s\n", SDL_GetError());

return 1;

}

SDL_Renderer *ren = nullptr;

ren = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);

if (ren == nullptr){

printf("%s\n", SDL_GetError() );

return -1;

}

SDL_Texture *texture = nullptr;

texture = SDL_CreateTexture(ren, SDL_PIXELFORMA T_YV12,

SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);

SDL_Rect rect;

rect.x = 0, rect.y = 0;

rect.w = pCodecCtx->width;

rect.h = pCodecCtx->height;

//*************************************************************//

//通过读取包来读取整个视频流,然后把它解码成帧,最后转换格式并且保存

int frameFinished;

//int psize = pCodecCtx->width * pCodecCtx->height;

A VPacket packet;

av_new_packet(&packet, numBytes);

//output file information

printf(" ---------------------------------\n");

av_dump_format(pFormatCtx, 0, filepath, 0);

printf("-----------------------------------------\n");

i = 0;

int ret;

static struct SwsContext *img_convert_ctx;

img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,

pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P,

SWS_BICUBIC, NULL, NULL, NULL);

//Read the next frame of a stream

while (av_read_frame(pFormatCtx, &packet) >= 0){

//Is this a packet from the video stream?

if (packet.stream_index == videoIndex){

//decode video frame of size packet.size from packet.data into picture

ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

//Did we get a video frame?

if (ret >= 0){

//Convert the image from its native format to YUV

if (frameFinished){

sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data,

pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);

SDL_UpdateYUVTexture(texture, &rect, pFrameYUV->data[0], pFrameYUV->linesize[0],

pFrameYUV->data[1], pFrameYUV->linesize[1], pFrameYUV->data[2], pFrameYUV->linesize[2]);

SDL_RenderClear(ren);

SDL_RenderCopy(ren, texture, &rect, &rect);

SDL_RenderPresent(ren);

}

SDL_Delay(50);

}

else{

printf("%s\n", "decode error");

return -1;

}

}

}

av_free_packet(&packet);

SDL_Event event;

SDL_PollEvent(&event);

switch (event.type){

case SDL_QUIT:

SDL_Quit();

exit(0);

break;

default:

break;

}

SDL_DestroyTexture(texture);

av_frame_free(&pFrame);

av_frame_free(&pFrameYUV);

avcodec_close(pCodecCtx);

avformat_close_input(&pFormatCtx);

return 0;

}

相关主题