#include <Windows.h>
#include <thread>
#include <mutex>
#include <future>
#include <gdiplus.h>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libswscale/swscale.h>
}
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "gdiplus.lib")
// wird in jedem frame neu gesetzt, sollte aber immer gleich bleiben
AVFrame* curFrame = nullptr;
// gleich wie bei curFrame
unsigned int frameWidth = 0;
// gleich wie bei curFrame
unsigned int frameHeight = 0;
// synchronisiert curFrame->data
std::mutex packetLock;
// handle des globalen Fensters
HWND hWnd;
// indikator für den decoder thread um so bald wie möglich zu beenden
std::atomic_bool isRunning;
void decodeThread(AVCodecContext* codecCtx, AVFormatContext* context, int videoStream) {
// in videoFrame wird das encodierte Bild gespeichert
AVFrame* videoFrame = av_frame_alloc();
// in imageFrame wird videoFrame decodiert
AVFrame* imageFrame = av_frame_alloc();
// Dieses Format funktioniert bei meinem Video, keine Ahnung, ob das immer so ist für h264
codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
// Wir wollen unser bild als 32bpp RGB haben, dafür erstellen wir hier einen puffer
int size = avpicture_get_size(PIX_FMT_RGB32, codecCtx->width, codecCtx->height);
unsigned char* buffer = (unsigned char*) av_malloc(size);
// buffer an imageFrame geben
avpicture_fill((AVPicture*) imageFrame, buffer, PIX_FMT_RGB32, codecCtx->width, codecCtx->height);
// was den filter anbelangt, wie üblich: Genauigkeit vs Performance
auto swsCtx = sws_getContext(codecCtx->width, codecCtx->height, codecCtx->pix_fmt, codecCtx->width, codecCtx->height, PIX_FMT_RGB32, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
AVPacket packet;
while (isRunning && av_read_frame(context, &packet) >= 0) {
if (packet.stream_index != videoStream) {
av_free_packet(&packet);
continue;
}
int hasImage = 0;
avcodec_decode_video2(codecCtx, videoFrame, &hasImage, &packet);
if (hasImage != 0) {
packetLock.lock();
// sobald wir im kritischen Bereich sind können wir den Inhalt von videoFrame dekodieren nach imageFrame
sws_scale(swsCtx, videoFrame->data, videoFrame->linesize, 0, codecCtx->height, imageFrame->data, imageFrame->linesize);
frameWidth = codecCtx->width;
frameHeight = codecCtx->height;
packetLock.unlock();
curFrame = imageFrame;
RECT dirtyRect = { 0 };
GetClientRect(hWnd, &dirtyRect);
// fenster neu zeichnen lassen
InvalidateRect(hWnd, &dirtyRect, FALSE);
UpdateWindow(hWnd);
}
av_free_packet(&packet);
}
av_frame_free(&videoFrame);
av_frame_free(&imageFrame);
av_free(buffer);
sws_freeContext(swsCtx);
}
LRESULT CALLBACK WndProc(HWND hWindow, UINT uMsg, WPARAM wParam, LPARAM lParam) {
switch (uMsg) {
case WM_CLOSE:
PostQuitMessage(0);
break;
case WM_PAINT:
{
if (curFrame == nullptr) {
break;
}
PAINTSTRUCT ps;
HDC hDc = BeginPaint(hWindow, &ps);
Gdiplus::Bitmap bmp(frameWidth, frameHeight, PixelFormat32bppARGB);
Gdiplus::Graphics* g = Gdiplus::Graphics::FromHDC(hDc);
Gdiplus::BitmapData data;
bmp.LockBits(nullptr, 0, PixelFormat32bppARGB, &data);
packetLock.lock();
memcpy(data.Scan0, curFrame->data[0], frameHeight * curFrame->linesize[0]);
packetLock.unlock();
bmp.UnlockBits(&data);
RECT rcDst = { 0 };
GetClientRect(hWindow, &rcDst);
g->DrawImage(&bmp, 0, 0, rcDst.right - rcDst.left, rcDst.bottom - rcDst.top);
g->Flush();
EndPaint(hWindow, &ps);
delete g;
}
return 0;
}
return DefWindowProc(hWindow, uMsg, wParam, lParam);
}
int main() {
ULONG_PTR token;
Gdiplus::GdiplusStartupInput input;
Gdiplus::GdiplusStartup(&token, &input, nullptr);
av_register_all();
AVFormatContext* context = nullptr;
auto res = avformat_open_input(&context, "Lush & Simon feat. Rico & Miella - Drag Me To The Ground (Official Music Video).mp4", nullptr, nullptr);
if (res != 0) {
return -1;
}
int videoStream = -1;
for (int i = 0; i < context->nb_streams; ++i) {
if (context->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStream = i;
break;
}
}
if (videoStream < 0) {
return -1;
}
AVCodecContext* codecCtx = context->streams[videoStream]->codec;
AVCodec* codec = avcodec_find_decoder(codecCtx->codec_id);
avcodec_open2(codecCtx, codec, nullptr);
WNDCLASS wc = { 0 };
wc.hCursor = LoadCursor(nullptr, IDC_ARROW);
wc.hIcon = LoadIcon(nullptr, IDI_APPLICATION);
wc.hInstance = GetModuleHandle(nullptr);
wc.lpfnWndProc = WndProc;
wc.lpszClassName = "x264WindowClass";
wc.style = CS_HREDRAW | CS_VREDRAW;
RegisterClass(&wc);
hWnd = CreateWindow("x264WindowClass", "x264", WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, 960, 540, nullptr, nullptr, wc.hInstance, nullptr);
ShowWindow(hWnd, SW_SHOW);
isRunning = true;
std::thread decoder(std::bind(&decodeThread, codecCtx, context, videoStream));
MSG msg;
while (GetMessage(&msg, nullptr, 0, 0) > 0) {
TranslateMessage(&msg);
DispatchMessage(&msg);
}
isRunning = false;
decoder.join();
avcodec_close(codecCtx);
avformat_close_input(&context);
}