我完全迷路了。我正在 try catch 30 个屏幕截图并将它们放入 Windows 10 下的 FFMPEG 视频中。它一直告诉我 [swscaler @ 073890a0] 错误的 src 图像指针。结果视频完全是绿色的。如果我使用 video=screen-capture-recorder 将格式更改为 dshow,视频看起来大部分都是垃圾。这是我的简短代码。我完全被困住了,甚至不知道该往哪个方向看。
主窗口.h
#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <QMainWindow>
#include <QFuture>
#include <QFutureWatcher>
#include <QMutex>
#include <QMutexLocker>
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavcodec/avfft.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavutil/opt.h"
#include "libavutil/common.h"
#include "libavutil/channel_layout.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/file.h"
#include "libswscale/swscale.h"
}
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
MainWindow(QWidget *parent = 0);
~MainWindow();
private:
AVFormatContext *inputFormatContext = nullptr;
AVFormatContext *outFormatContext = nullptr;
AVStream* videoStream = nullptr;
AVDictionary* options = nullptr;
AVCodec* outCodec = nullptr;
AVCodec* inputCodec = nullptr;
AVCodecContext* inputCodecContext = nullptr;
AVCodecContext* outCodecContext = nullptr;
SwsContext* swsContext = nullptr;
private:
void init();
void initOutFile();
void collectFrame();
};
#endif // MAINWINDOW_H
主窗口.cpp
#include "MainWindow.h"
#include <QGuiApplication>
#include <QLabel>
#include <QScreen>
#include <QTimer>
#include <QLayout>
#include <QImage>
#include <QtConcurrent/QtConcurrent>
#include <QThreadPool>
#include "ScreenCapture.h"
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
{
resize(800, 600);
auto label = new QLabel();
label->setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
auto layout = new QHBoxLayout();
layout->addWidget(label);
auto widget = new QWidget();
widget->setLayout(layout);
setCentralWidget(widget);
init();
initOutFile();
collectFrame();
}
MainWindow::~MainWindow()
{
avformat_close_input(&inputFormatContext);
avformat_free_context(inputFormatContext);
QThreadPool::globalInstance()->waitForDone();
}
void MainWindow::init()
{
av_register_all();
avcodec_register_all();
avdevice_register_all();
avformat_network_init();
auto screen = QGuiApplication::screens()[0];
QRect geometry = screen->geometry();
inputFormatContext = avformat_alloc_context();
options = NULL;
av_dict_set(&options, "framerate", "30", NULL);
av_dict_set(&options, "offset_x", QString::number(geometry.x()).toLatin1().data(), NULL);
av_dict_set(&options, "offset_y", QString::number(geometry.y()).toLatin1().data(), NULL);
av_dict_set(&options, "video_size", QString(QString::number(geometry.width()) + "x" + QString::number(geometry.height())).toLatin1().data(), NULL);
av_dict_set(&options, "show_region", "1", NULL);
AVInputFormat* inputFormat = av_find_input_format("gdigrab");
avformat_open_input(&inputFormatContext, "desktop", inputFormat, &options);
int videoStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
inputCodecContext = inputFormatContext->streams[videoStreamIndex]->codec;
inputCodecContext->width = geometry.width();
inputCodecContext->height = geometry.height();
inputCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
inputCodec = avcodec_find_decoder(inputCodecContext->codec_id);
avcodec_open2(inputCodecContext, inputCodec, NULL);
}
void MainWindow::initOutFile()
{
const char* filename = "C:/Temp/output.mp4";
avformat_alloc_output_context2(&outFormatContext, NULL, NULL, filename);
outCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
videoStream = avformat_new_stream(outFormatContext, outCodec);
videoStream->time_base = {1, 30};
outCodecContext = videoStream->codec;
outCodecContext->codec_id = AV_CODEC_ID_MPEG4;
outCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
outCodecContext->bit_rate = 400000;
outCodecContext->width = inputCodecContext->width;
outCodecContext->height = inputCodecContext->height;
outCodecContext->gop_size = 3;
outCodecContext->max_b_frames = 2;
outCodecContext->time_base = videoStream->time_base;
if (outFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
outCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_open2(outCodecContext, outCodec, NULL);
if (!(outFormatContext->flags & AVFMT_NOFILE))
avio_open2(&outFormatContext->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);
swsContext = sws_getContext(inputCodecContext->width,
inputCodecContext->height,
inputCodecContext->pix_fmt,
outCodecContext->width,
outCodecContext->height,
outCodecContext->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
avformat_write_header(outFormatContext, &options);
}
void MainWindow::collectFrame()
{
AVFrame* frame = av_frame_alloc();
frame->data[0] = NULL;
frame->width = inputCodecContext->width;
frame->height = inputCodecContext->height;
frame->format = inputCodecContext->pix_fmt;
av_image_alloc(frame->data, frame->linesize, inputCodecContext->width, inputCodecContext->height, (AVPixelFormat)frame->format, 32);
AVFrame* outFrame = av_frame_alloc();
outFrame->data[0] = NULL;
outFrame->width = outCodecContext->width;
outFrame->height = outCodecContext->height;
outFrame->format = outCodecContext->pix_fmt;
av_image_alloc(outFrame->data, outFrame->linesize, outCodecContext->width, outCodecContext->height, (AVPixelFormat)outFrame->format, 32);
int bufferSize = av_image_get_buffer_size(outCodecContext->pix_fmt,
outCodecContext->width,
outCodecContext->height,
24);
uint8_t* outBuffer = (uint8_t*)av_malloc(bufferSize);
avpicture_fill((AVPicture*)outFrame, outBuffer,
AV_PIX_FMT_YUV420P,
outCodecContext->width, outCodecContext->height);
int frameCount = 30;
int count = 0;
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(packet);
while(av_read_frame(inputFormatContext, packet) >= 0)
{
if(packet->stream_index == videoStream->index)
{
int frameFinished = 0;
avcodec_decode_video2(inputCodecContext, frame, &frameFinished, packet);
if(frameFinished)
{
if(++count > frameCount)
{
qDebug() << "FINISHED!";
break;
}
sws_scale(swsContext, frame->data, frame->linesize, 0, inputCodecContext->height, outFrame->data, outFrame->linesize);
AVPacket outPacket;
av_init_packet(&outPacket);
outPacket.data = NULL;
outPacket.size = 0;
int got_picture = 0;
avcodec_encode_video2(outCodecContext, &outPacket, outFrame, &got_picture);
if(got_picture)
{
if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);
av_write_frame(outFormatContext , &outPacket);
}
av_packet_unref(&outPacket);
}
}
}
av_write_trailer(outFormatContext);
av_free(outBuffer);
}
最佳答案
我认为问题在于您使用了一些不必要的代码和一些已弃用的函数,这些行是不必要的:
int bufferSize = av_image_get_buffer_size(outCodecContext->pix_fmt,
outCodecContext->width,
outCodecContext->height,
24);
uint8_t* outBuffer = (uint8_t*)av_malloc(bufferSize);
avpicture_fill((AVPicture*)outFrame, outBuffer,
AV_PIX_FMT_YUV420P,
outCodecContext->width, outCodecContext->height);
这样做会破坏已经正常的框架,也会导致内存泄漏问题。因为您已经使用 av_image_alloc 为 yuv420p FlatBuffers 分配了正确的空间,尽管您没有检查它的返回代码以防失败。您可能会保留缓冲区大小用于其他目的。哦,别忘了也删除 av_free(outBuffer)。
这些应该被替换:
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(packet);
用这个:
AVPacket *packet = av_packet_alloc(); // also inits to defaults
if (packet == NULL) {
//hande error
}
其他的是,您的 avcodec_decode_video2 和 avcodec_encode_video2 也已弃用,但应该仍然有效。最后,根据我的经验,av_interleaved_write_frame 比 av_write_frame 效果更好。
可在此处找到新的解码和编码 API 示例:https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples
希望对您有所帮助。
关于c++ - swscaler 错误的 src 图像指针,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/49068812/
大约一年前,我决定确保每个包含非唯一文本的Flash通知都将从模块中的方法中获取文本。我这样做的最初原因是为了避免一遍又一遍地输入相同的字符串。如果我想更改措辞,我可以在一个地方轻松完成,而且一遍又一遍地重复同一件事而出现拼写错误的可能性也会降低。我最终得到的是这样的:moduleMessagesdefformat_error_messages(errors)errors.map{|attribute,message|"Error:#{attribute.to_s.titleize}#{message}."}enddeferror_message_could_not_find(obje
我的瘦服务器配置了nginx,我的ROR应用程序正在它们上运行。在我发布代码更新时运行thinrestart会给我的应用程序带来一些停机时间。我试图弄清楚如何优雅地重启正在运行的Thin实例,但找不到好的解决方案。有没有人能做到这一点? 最佳答案 #Restartjustthethinserverdescribedbythatconfigsudothin-C/etc/thin/mysite.ymlrestartNginx将继续运行并代理请求。如果您将Nginx设置为使用多个上游服务器,例如server{listen80;server
我遵循MichaelHartl的“RubyonRails教程:学习Web开发”,并创建了检查用户名和电子邮件长度有效性的测试(名称最多50个字符,电子邮件最多255个字符)。test/helpers/application_helper_test.rb的内容是:require'test_helper'classApplicationHelperTest在运行bundleexecraketest时,所有测试都通过了,但我看到以下消息在最后被标记为错误:ERROR["test_full_title_helper",ApplicationHelperTest,1.820016791]test
我是rails的新手,想在form字段上应用验证。myviewsnew.html.erb.....模拟.rbclassSimulation{:in=>1..25,:message=>'Therowmustbebetween1and25'}end模拟Controller.rbclassSimulationsController我想检查模型类中row字段的整数范围,如果不在范围内则返回错误信息。我可以检查上面代码的范围,但无法返回错误消息提前致谢 最佳答案 关键是您使用的是模型表单,一种显示ActiveRecord模型实例属性的表单。c
我正在尝试编写一个将文件上传到AWS并公开该文件的Ruby脚本。我做了以下事情:s3=Aws::S3::Resource.new(credentials:Aws::Credentials.new(KEY,SECRET),region:'us-west-2')obj=s3.bucket('stg-db').object('key')obj.upload_file(filename)这似乎工作正常,除了该文件不是公开可用的,而且我无法获得它的公共(public)URL。但是当我登录到S3时,我可以正常查看我的文件。为了使其公开可用,我将最后一行更改为obj.upload_file(file
我克隆了一个rails仓库,我现在正尝试捆绑安装背景:OSXElCapitanruby2.2.3p173(2015-08-18修订版51636)[x86_64-darwin15]rails-v在您的Gemfile中列出的或native可用的任何gem源中找不到gem'pg(>=0)ruby'。运行bundleinstall以安装缺少的gem。bundleinstallFetchinggemmetadatafromhttps://rubygems.org/............Fetchingversionmetadatafromhttps://rubygems.org/...Fe
在Cooper的书BeginningRuby中,第166页有一个我无法重现的示例。classSongincludeComparableattr_accessor:lengthdef(other)@lengthother.lengthenddefinitialize(song_name,length)@song_name=song_name@length=lengthendenda=Song.new('Rockaroundtheclock',143)b=Song.new('BohemianRhapsody',544)c=Song.new('MinuteWaltz',60)a.betwee
我是Google云的新手,我正在尝试对其进行首次部署。我的第一个部署是RubyonRails项目。我基本上是在关注thisguideinthegoogleclouddocumentation.唯一的区别是我使用的是我自己的项目,而不是他们提供的“helloworld”项目。这是我的app.yaml文件runtime:customvm:trueentrypoint:bundleexecrackup-p8080-Eproductionconfig.ruresources:cpu:0.5memory_gb:1.3disk_size_gb:10当我转到我的项目目录并运行gcloudprevie
我有两个Rails模型,即Invoice和Invoice_details。一个Invoice_details属于Invoice,一个Invoice有多个Invoice_details。我无法使用accepts_nested_attributes_forinInvoice通过Invoice模型保存Invoice_details。我收到以下错误:(0.2ms)BEGIN(0.2ms)ROLLBACKCompleted422UnprocessableEntityin25ms(ActiveRecord:4.0ms)ActiveRecord::RecordInvalid(Validationfa
这个问题在这里已经有了答案:Arraysmisbehaving(1个回答)关闭6年前。是否应该这样,即我误解了,还是错误?a=Array.new(3,Array.new(3))a[1].fill('g')=>[["g","g","g"],["g","g","g"],["g","g","g"]]它不应该导致:=>[[nil,nil,nil],["g","g","g"],[nil,nil,nil]]