[video_core] ffmpeg 7.1.1 fix (#224)
All checks were successful
eden-build / source (push) Successful in 3m24s
eden-build / android (push) Successful in 24m44s
eden-build / linux (push) Successful in 22m8s
eden-build / windows (msvc) (push) Successful in 30m15s

Simplified and add AVERROR_EOF check, seems to do the trick.
Fixed Blasphemous 2 and Burnout Paradise crash and more

Co-authored-by: MaranBr <maranbr@outlook.com>
Co-authored-by: MaranBr <maranbr@fake.com>
Reviewed-on: https://git.eden-emu.dev/eden-emu/eden/pulls/224
Co-authored-by: Maufeat <sahyno1996@gmail.com>
Co-committed-by: Maufeat <sahyno1996@gmail.com>
This commit is contained in:
Maufeat 2025-06-28 13:34:32 +00:00 committed by JPikachu
parent dceb3c2206
commit c88d0b3967

View file

@ -34,7 +34,7 @@ constexpr std::array PreferredGpuDecoders = {
AV_HWDEVICE_TYPE_VAAPI, AV_HWDEVICE_TYPE_VAAPI,
AV_HWDEVICE_TYPE_VDPAU, AV_HWDEVICE_TYPE_VDPAU,
#endif #endif
AV_HWDEVICE_TYPE_VULKAN AV_HWDEVICE_TYPE_VULKAN,
}; };
AVPixelFormat GetGpuFormat(AVCodecContext* codec_context, const AVPixelFormat* pix_fmts) { AVPixelFormat GetGpuFormat(AVCodecContext* codec_context, const AVPixelFormat* pix_fmts) {
@ -184,7 +184,6 @@ bool HardwareContext::InitializeWithType(AVHWDeviceType type) {
DecoderContext::DecoderContext(const Decoder& decoder) : m_decoder{decoder} { DecoderContext::DecoderContext(const Decoder& decoder) : m_decoder{decoder} {
m_codec_context = avcodec_alloc_context3(m_decoder.GetCodec()); m_codec_context = avcodec_alloc_context3(m_decoder.GetCodec());
av_opt_set(m_codec_context->priv_data, "preset", "veryfast", 0);
av_opt_set(m_codec_context->priv_data, "tune", "zerolatency", 0); av_opt_set(m_codec_context->priv_data, "tune", "zerolatency", 0);
m_codec_context->thread_count = 0; m_codec_context->thread_count = 0;
m_codec_context->thread_type &= ~FF_THREAD_FRAME; m_codec_context->thread_type &= ~FF_THREAD_FRAME;
@ -218,17 +217,7 @@ bool DecoderContext::SendPacket(const Packet& packet) {
m_temp_frame = std::make_shared<Frame>(); m_temp_frame = std::make_shared<Frame>();
m_got_frame = 0; m_got_frame = 0;
if (!m_codec_context->hw_device_ctx && m_codec_context->codec_id == AV_CODEC_ID_H264) { if (const int ret = avcodec_send_packet(m_codec_context, packet.GetPacket()); ret < 0 && ret != AVERROR_EOF) {
m_decode_order = true;
auto* codec{ffcodec(m_decoder.GetCodec())};
if (const int ret = codec->cb.decode(m_codec_context, m_temp_frame->GetFrame(), &m_got_frame, packet.GetPacket()); ret < 0) {
LOG_DEBUG(Service_NVDRV, "avcodec_send_packet error {}", AVError(ret));
return false;
}
return true;
}
if (const int ret = avcodec_send_packet(m_codec_context, packet.GetPacket()); ret < 0) {
LOG_ERROR(HW_GPU, "avcodec_send_packet error: {}", AVError(ret)); LOG_ERROR(HW_GPU, "avcodec_send_packet error: {}", AVError(ret));
return false; return false;
} }
@ -237,64 +226,35 @@ bool DecoderContext::SendPacket(const Packet& packet) {
} }
std::shared_ptr<Frame> DecoderContext::ReceiveFrame() { std::shared_ptr<Frame> DecoderContext::ReceiveFrame() {
if (!m_codec_context->hw_device_ctx && m_codec_context->codec_id == AV_CODEC_ID_H264) { auto receive = [&](AVFrame* dst) -> bool {
m_decode_order = true; if (const int ret = avcodec_receive_frame(m_codec_context, dst); ret < 0) {
auto* codec{ffcodec(m_decoder.GetCodec())}; LOG_ERROR(HW_GPU, "avcodec_receive_frame error: {}", AVError(ret));
int ret{0}; return false;
if (m_got_frame == 0) {
Packet packet{{}};
auto* pkt = packet.GetPacket();
pkt->data = nullptr;
pkt->size = 0;
ret = codec->cb.decode(m_codec_context, m_temp_frame->GetFrame(), &m_got_frame, pkt);
m_codec_context->has_b_frames = 0;
} }
return true;
};
if (m_got_frame == 0 || ret < 0) { if (m_codec_context->hw_device_ctx) {
LOG_ERROR(Service_NVDRV, "Failed to receive a frame! error {}", ret); // If we have a hardware context, make a separate frame here to receive the
// hardware result before sending it to the output.
Frame intermediate_frame;
if (!receive(intermediate_frame.GetFrame())) {
return {};
}
m_temp_frame->SetFormat(PreferredGpuFormat);
if (int ret = av_hwframe_transfer_data(m_temp_frame->GetFrame(), intermediate_frame.GetFrame(), 0); ret < 0) {
LOG_ERROR(HW_GPU, "av_hwframe_transfer_data error: {}", AVError(ret));
return {}; return {};
} }
} else { } else {
const auto ReceiveImpl = [&](AVFrame* frame) { // Otherwise, decode the frame as normal.
if (const int ret = avcodec_receive_frame(m_codec_context, frame); ret < 0) { if (!receive(m_temp_frame->GetFrame())) {
LOG_ERROR(HW_GPU, "avcodec_receive_frame error: {}", AVError(ret)); return {};
return false;
}
return true;
};
if (m_codec_context->hw_device_ctx) {
// If we have a hardware context, make a separate frame here to receive the
// hardware result before sending it to the output.
Frame intermediate_frame;
if (!ReceiveImpl(intermediate_frame.GetFrame())) {
return {};
}
m_temp_frame->SetFormat(PreferredGpuFormat);
if (const int ret = av_hwframe_transfer_data(m_temp_frame->GetFrame(), intermediate_frame.GetFrame(), 0); ret < 0) {
LOG_ERROR(HW_GPU, "av_hwframe_transfer_data error: {}", AVError(ret));
return {};
}
} else {
// Otherwise, decode the frame as normal.
if (!ReceiveImpl(m_temp_frame->GetFrame())) {
return {};
}
} }
} }
#if defined(FF_API_INTERLACED_FRAME) || LIBAVUTIL_VERSION_MAJOR >= 59
if (m_temp_frame->GetFrame()->flags & AV_FRAME_FLAG_INTERLACED)
m_temp_frame->GetFrame()->flags &= ~AV_FRAME_FLAG_INTERLACED;
else
m_temp_frame->GetFrame()->flags |= AV_FRAME_FLAG_INTERLACED;
#else
m_temp_frame->GetFrame()->interlaced_frame = !m_temp_frame->GetFrame()->interlaced_frame;
#endif
return std::move(m_temp_frame); return std::move(m_temp_frame);
} }