OpenShot Library | libopenshot  0.2.0
FFmpegWriter.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for FFmpegWriter class
4  * @author Jonathan Thomas <jonathan@openshot.org>, Fabrice Bellard
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2013 OpenShot Studios, LLC, Fabrice Bellard
9  * (http://www.openshotstudios.com). This file is part of
10  * OpenShot Library (http://www.openshot.org), an open-source project
11  * dedicated to delivering high quality video editing and animation solutions
12  * to the world.
13  *
14  * This file is originally based on the Libavformat API example, and then modified
15  * by the libopenshot project.
16  *
17  * OpenShot Library (libopenshot) is free software: you can redistribute it
18  * and/or modify it under the terms of the GNU Lesser General Public License
19  * as published by the Free Software Foundation, either version 3 of the
20  * License, or (at your option) any later version.
21  *
22  * OpenShot Library (libopenshot) is distributed in the hope that it will be
23  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25  * GNU Lesser General Public License for more details.
26  *
27  * You should have received a copy of the GNU Lesser General Public License
28  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
29  */
30 
31 #include "../include/FFmpegWriter.h"
32 
33 using namespace openshot;
34 
36  path(path), fmt(NULL), oc(NULL), audio_st(NULL), video_st(NULL), audio_pts(0), video_pts(0), samples(NULL),
37  audio_outbuf(NULL), audio_outbuf_size(0), audio_input_frame_size(0), audio_input_position(0),
38  initial_audio_input_frame_size(0), img_convert_ctx(NULL), cache_size(8), num_of_rescalers(32),
39  rescaler_position(0), video_codec(NULL), audio_codec(NULL), is_writing(false), write_video_count(0), write_audio_count(0),
40  original_sample_rate(0), original_channels(0), avr(NULL), avr_planar(NULL), is_open(false), prepare_streams(false),
41  write_header(false), write_trailer(false), audio_encoder_buffer_size(0), audio_encoder_buffer(NULL)
42 {
43 
44  // Disable audio & video (so they can be independently enabled)
45  info.has_audio = false;
46  info.has_video = false;
47 
48  // Initialize FFMpeg, and register all formats and codecs
49  av_register_all();
50 
51  // auto detect format
52  auto_detect_format();
53 }
54 
55 // Open the writer
57 {
58  // Open the writer
59  is_open = true;
60 
61  // Prepare streams (if needed)
62  if (!prepare_streams)
64 
65  // Write header (if needed)
66  if (!write_header)
67  WriteHeader();
68 }
69 
70 // auto detect format (from path)
71 void FFmpegWriter::auto_detect_format()
72 {
73  // Auto detect the output format from the name. default is mpeg.
74  fmt = av_guess_format(NULL, path.c_str(), NULL);
75  if (!fmt)
76  throw InvalidFormat("Could not deduce output format from file extension.", path);
77 
78  // Allocate the output media context
79  AV_OUTPUT_CONTEXT(&oc, path.c_str());
80  if (!oc)
81  throw OutOfMemory("Could not allocate memory for AVFormatContext.", path);
82 
83  // Set the AVOutputFormat for the current AVFormatContext
84  oc->oformat = fmt;
85 
86  // Update codec names
87  if (fmt->video_codec != AV_CODEC_ID_NONE && info.has_video)
88  // Update video codec name
89  info.vcodec = avcodec_find_encoder(fmt->video_codec)->name;
90 
91  if (fmt->audio_codec != AV_CODEC_ID_NONE && info.has_audio)
92  // Update audio codec name
93  info.acodec = avcodec_find_encoder(fmt->audio_codec)->name;
94 }
95 
96 // initialize streams
97 void FFmpegWriter::initialize_streams()
98 {
99  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::initialize_streams", "fmt->video_codec", fmt->video_codec, "fmt->audio_codec", fmt->audio_codec, "AV_CODEC_ID_NONE", AV_CODEC_ID_NONE, "", -1, "", -1, "", -1);
100 
101  // Add the audio and video streams using the default format codecs and initialize the codecs
102  video_st = NULL;
103  audio_st = NULL;
104  if (fmt->video_codec != AV_CODEC_ID_NONE && info.has_video)
105  // Add video stream
106  video_st = add_video_stream();
107 
108  if (fmt->audio_codec != AV_CODEC_ID_NONE && info.has_audio)
109  // Add audio stream
110  audio_st = add_audio_stream();
111 }
112 
113 // Set video export options
114 void FFmpegWriter::SetVideoOptions(bool has_video, string codec, Fraction fps, int width, int height, Fraction pixel_ratio, bool interlaced, bool top_field_first, int bit_rate)
115 {
116  // Set the video options
117  if (codec.length() > 0)
118  {
119  AVCodec *new_codec = avcodec_find_encoder_by_name(codec.c_str());
120  if (new_codec == NULL)
121  throw InvalidCodec("A valid video codec could not be found for this file.", path);
122  else {
123  // Set video codec
124  info.vcodec = new_codec->name;
125 
126  // Update video codec in fmt
127  fmt->video_codec = new_codec->id;
128  }
129  }
130  if (fps.num > 0)
131  {
132  // Set frames per second (if provided)
133  info.fps.num = fps.num;
134  info.fps.den = fps.den;
135 
136  // Set the timebase (inverse of fps)
139  }
140  if (width >= 1)
141  info.width = width;
142  if (height >= 1)
143  info.height = height;
144  if (pixel_ratio.num > 0)
145  {
146  info.pixel_ratio.num = pixel_ratio.num;
147  info.pixel_ratio.den = pixel_ratio.den;
148  }
149  if (bit_rate >= 1000)
150  info.video_bit_rate = bit_rate;
151 
152  info.interlaced_frame = interlaced;
153  info.top_field_first = top_field_first;
154 
155  // Calculate the DAR (display aspect ratio)
157 
158  // Reduce size fraction
159  size.Reduce();
160 
161  // Set the ratio based on the reduced fraction
162  info.display_ratio.num = size.num;
163  info.display_ratio.den = size.den;
164 
165  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::SetVideoOptions (" + codec + ")", "width", width, "height", height, "size.num", size.num, "size.den", size.den, "fps.num", fps.num, "fps.den", fps.den);
166 
167  // Enable / Disable video
168  info.has_video = has_video;
169 }
170 
171 // Set audio export options
172 void FFmpegWriter::SetAudioOptions(bool has_audio, string codec, int sample_rate, int channels, ChannelLayout channel_layout, int bit_rate)
173 {
174  // Set audio options
175  if (codec.length() > 0)
176  {
177  AVCodec *new_codec = avcodec_find_encoder_by_name(codec.c_str());
178  if (new_codec == NULL)
179  throw InvalidCodec("A valid audio codec could not be found for this file.", path);
180  else
181  {
182  // Set audio codec
183  info.acodec = new_codec->name;
184 
185  // Update audio codec in fmt
186  fmt->audio_codec = new_codec->id;
187  }
188  }
189  if (sample_rate > 7999)
190  info.sample_rate = sample_rate;
191  if (channels > 0)
192  info.channels = channels;
193  if (bit_rate > 999)
194  info.audio_bit_rate = bit_rate;
195  info.channel_layout = channel_layout;
196 
197  // init resample options (if zero)
198  if (original_sample_rate == 0)
199  original_sample_rate = info.sample_rate;
200  if (original_channels == 0)
201  original_channels = info.channels;
202 
203  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::SetAudioOptions (" + codec + ")", "sample_rate", sample_rate, "channels", channels, "bit_rate", bit_rate, "", -1, "", -1, "", -1);
204 
205  // Enable / Disable audio
206  info.has_audio = has_audio;
207 }
208 
209 // Set custom options (some codecs accept additional params)
210 void FFmpegWriter::SetOption(StreamType stream, string name, string value)
211 {
212  // Declare codec context
213  AVCodecContext *c = NULL;
214  AVStream *st = NULL;
215  stringstream convert(value);
216 
217  if (info.has_video && stream == VIDEO_STREAM && video_st) {
218  st = video_st;
219  // Get codec context
220  c = AV_GET_CODEC_PAR_CONTEXT(st, video_codec);
221  }
222  else if (info.has_audio && stream == AUDIO_STREAM && audio_st) {
223  st = audio_st;
224  // Get codec context
225  c = AV_GET_CODEC_PAR_CONTEXT(st, audio_codec);
226  }
227  else
228  throw NoStreamsFound("The stream was not found. Be sure to call PrepareStreams() first.", path);
229 
230  // Init AVOption
231  const AVOption *option = NULL;
232 
233  // Was a codec / stream found?
234  if (c)
235  // Find AVOption (if it exists)
236  option = AV_OPTION_FIND(c->priv_data, name.c_str());
237 
238  // Was option found?
239  if (option || (name == "g" || name == "qmin" || name == "qmax" || name == "max_b_frames" || name == "mb_decision" ||
240  name == "level" || name == "profile" || name == "slices" || name == "rc_min_rate" || name == "rc_max_rate"))
241  {
242  // Check for specific named options
243  if (name == "g")
244  // Set gop_size
245  convert >> c->gop_size;
246 
247  else if (name == "qmin")
248  // Minimum quantizer
249  convert >> c->qmin;
250 
251  else if (name == "qmax")
252  // Maximum quantizer
253  convert >> c->qmax;
254 
255  else if (name == "max_b_frames")
256  // Maximum number of B-frames between non-B-frames
257  convert >> c->max_b_frames;
258 
259  else if (name == "mb_decision")
260  // Macroblock decision mode
261  convert >> c->mb_decision;
262 
263  else if (name == "level")
264  // Set codec level
265  convert >> c->level;
266 
267  else if (name == "profile")
268  // Set codec profile
269  convert >> c->profile;
270 
271  else if (name == "slices")
272  // Indicates number of picture subdivisions
273  convert >> c->slices;
274 
275  else if (name == "rc_min_rate")
276  // Minimum bitrate
277  convert >> c->rc_min_rate;
278 
279  else if (name == "rc_max_rate")
280  // Maximum bitrate
281  convert >> c->rc_max_rate;
282 
283  else if (name == "rc_buffer_size")
284  // Buffer size
285  convert >> c->rc_buffer_size;
286 
287  else
288  // Set AVOption
289  AV_OPTION_SET(st, c->priv_data, name.c_str(), value.c_str(), c);
290 
291  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::SetOption (" + (string)name + ")", "stream == VIDEO_STREAM", stream == VIDEO_STREAM, "", -1, "", -1, "", -1, "", -1, "", -1);
292 
293  }
294  else
295  throw InvalidOptions("The option is not valid for this codec.", path);
296 
297 }
298 
299 /// Determine if codec name is valid
300 bool FFmpegWriter::IsValidCodec(string codec_name) {
301  // Initialize FFMpeg, and register all formats and codecs
302  av_register_all();
303 
304  // Find the codec (if any)
305  if (avcodec_find_encoder_by_name(codec_name.c_str()) == NULL)
306  return false;
307  else
308  return true;
309 }
310 
311 // Prepare & initialize streams and open codecs
313 {
314  if (!info.has_audio && !info.has_video)
315  throw InvalidOptions("No video or audio options have been set. You must set has_video or has_audio (or both).", path);
316 
317  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::PrepareStreams [" + path + "]", "info.has_audio", info.has_audio, "info.has_video", info.has_video, "", -1, "", -1, "", -1, "", -1);
318 
319  // Initialize the streams (i.e. add the streams)
320  initialize_streams();
321 
322  // Now that all the parameters are set, we can open the audio and video codecs and allocate the necessary encode buffers
323  if (info.has_video && video_st)
324  open_video(oc, video_st);
325  if (info.has_audio && audio_st)
326  open_audio(oc, audio_st);
327 
328  // Mark as 'prepared'
329  prepare_streams = true;
330 }
331 
332 // Write the file header (after the options are set)
334 {
335  if (!info.has_audio && !info.has_video)
336  throw InvalidOptions("No video or audio options have been set. You must set has_video or has_audio (or both).", path);
337 
338  // Open the output file, if needed
339  if (!(fmt->flags & AVFMT_NOFILE)) {
340  if (avio_open(&oc->pb, path.c_str(), AVIO_FLAG_WRITE) < 0)
341  throw InvalidFile("Could not open or write file.", path);
342  }
343 
344  // Force the output filename (which doesn't always happen for some reason)
345  snprintf(oc->filename, sizeof(oc->filename), "%s", path.c_str());
346 
347  // Write the stream header, if any
348  // TODO: add avoptions / parameters instead of NULL
349 
350  // Add general metadata (if any)
351  for(std::map<string, string>::iterator iter = info.metadata.begin(); iter != info.metadata.end(); ++iter)
352  {
353  av_dict_set(&oc->metadata, iter->first.c_str(), iter->second.c_str(), 0);
354  }
355 
356  if (avformat_write_header(oc, NULL) != 0) {
357  throw InvalidFile("Could not write header to file.", path);
358  };
359 
360  // Mark as 'written'
361  write_header = true;
362 
363  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::WriteHeader", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
364 }
365 
366 // Add a frame to the queue waiting to be encoded.
367 void FFmpegWriter::WriteFrame(std::shared_ptr<Frame> frame)
368 {
369  // Check for open reader (or throw exception)
370  if (!is_open)
371  throw WriterClosed("The FFmpegWriter is closed. Call Open() before calling this method.", path);
372 
373  // Add frame pointer to "queue", waiting to be processed the next
374  // time the WriteFrames() method is called.
375  if (info.has_video && video_st)
376  spooled_video_frames.push_back(frame);
377 
378  if (info.has_audio && audio_st)
379  spooled_audio_frames.push_back(frame);
380 
381  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::WriteFrame", "frame->number", frame->number, "spooled_video_frames.size()", spooled_video_frames.size(), "spooled_audio_frames.size()", spooled_audio_frames.size(), "cache_size", cache_size, "is_writing", is_writing, "", -1);
382 
383  // Write the frames once it reaches the correct cache size
384  if (spooled_video_frames.size() == cache_size || spooled_audio_frames.size() == cache_size)
385  {
386  // Is writer currently writing?
387  if (!is_writing)
388  // Write frames to video file
389  write_queued_frames();
390 
391  else
392  {
393  // Write frames to video file
394  write_queued_frames();
395  }
396  }
397 
398  // Keep track of the last frame added
399  last_frame = frame;
400 }
401 
402 // Write all frames in the queue to the video file.
403 void FFmpegWriter::write_queued_frames()
404 {
405  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_queued_frames", "spooled_video_frames.size()", spooled_video_frames.size(), "spooled_audio_frames.size()", spooled_audio_frames.size(), "", -1, "", -1, "", -1, "", -1);
406 
407  // Flip writing flag
408  is_writing = true;
409 
410  // Transfer spool to queue
411  queued_video_frames = spooled_video_frames;
412  queued_audio_frames = spooled_audio_frames;
413 
414  // Empty spool
415  spooled_video_frames.clear();
416  spooled_audio_frames.clear();
417 
418  // Set the number of threads in OpenMP
419  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
420  // Allow nested OpenMP sections
421  omp_set_nested(true);
422 
423  // Create blank exception
424  bool has_error_encoding_video = false;
425 
426  #pragma omp parallel
427  {
428  #pragma omp single
429  {
430  // Process all audio frames (in a separate thread)
431  if (info.has_audio && audio_st && !queued_audio_frames.empty())
432  write_audio_packets(false);
433 
434  // Loop through each queued image frame
435  while (!queued_video_frames.empty())
436  {
437  // Get front frame (from the queue)
438  std::shared_ptr<Frame> frame = queued_video_frames.front();
439 
440  // Add to processed queue
441  processed_frames.push_back(frame);
442 
443  // Encode and add the frame to the output file
444  if (info.has_video && video_st)
445  process_video_packet(frame);
446 
447  // Remove front item
448  queued_video_frames.pop_front();
449 
450  } // end while
451  } // end omp single
452 
453  #pragma omp single
454  {
455  // Loop back through the frames (in order), and write them to the video file
456  while (!processed_frames.empty())
457  {
458  // Get front frame (from the queue)
459  std::shared_ptr<Frame> frame = processed_frames.front();
460 
461  if (info.has_video && video_st)
462  {
463  // Add to deallocate queue (so we can remove the AVFrames when we are done)
464  deallocate_frames.push_back(frame);
465 
466  // Does this frame's AVFrame still exist
467  if (av_frames.count(frame))
468  {
469  // Get AVFrame
470  AVFrame *frame_final = av_frames[frame];
471 
472  // Write frame to video file
473  bool success = write_video_packet(frame, frame_final);
474  if (!success)
475  has_error_encoding_video = true;
476  }
477  }
478 
479  // Remove front item
480  processed_frames.pop_front();
481  }
482 
483  // Loop through, and deallocate AVFrames
484  while (!deallocate_frames.empty())
485  {
486  // Get front frame (from the queue)
487  std::shared_ptr<Frame> frame = deallocate_frames.front();
488 
489  // Does this frame's AVFrame still exist
490  if (av_frames.count(frame))
491  {
492  // Get AVFrame
493  AVFrame *av_frame = av_frames[frame];
494 
495  // Deallocate AVPicture and AVFrame
496  av_freep(&(av_frame->data[0]));
497  AV_FREE_FRAME(&av_frame);
498  av_frames.erase(frame);
499  }
500 
501  // Remove front item
502  deallocate_frames.pop_front();
503  }
504 
505  // Done writing
506  is_writing = false;
507 
508  } // end omp single
509  } // end omp parallel
510 
511  // Raise exception from main thread
512  if (has_error_encoding_video)
513  throw ErrorEncodingVideo("Error while writing raw video frame", -1);
514 }
515 
516 // Write a block of frames from a reader
517 void FFmpegWriter::WriteFrame(ReaderBase* reader, int64_t start, int64_t length)
518 {
519  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::WriteFrame (from Reader)", "start", start, "length", length, "", -1, "", -1, "", -1, "", -1);
520 
521  // Loop through each frame (and encoded it)
522  for (int64_t number = start; number <= length; number++)
523  {
524  // Get the frame
525  std::shared_ptr<Frame> f = reader->GetFrame(number);
526 
527  // Encode frame
528  WriteFrame(f);
529  }
530 }
531 
532 // Write the file trailer (after all frames are written)
534 {
535  // Write any remaining queued frames to video file
536  write_queued_frames();
537 
538  // Process final audio frame (if any)
539  if (info.has_audio && audio_st)
540  write_audio_packets(true);
541 
542  // Flush encoders (who sometimes hold on to frames)
543  flush_encoders();
544 
545  /* write the trailer, if any. The trailer must be written
546  * before you close the CodecContexts open when you wrote the
547  * header; otherwise write_trailer may try to use memory that
548  * was freed on av_codec_close() */
549  av_write_trailer(oc);
550 
551  // Mark as 'written'
552  write_trailer = true;
553 
554  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::WriteTrailer", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
555 }
556 
557 // Flush encoders
558 void FFmpegWriter::flush_encoders()
559 {
560  if (info.has_audio && audio_codec && AV_GET_CODEC_TYPE(audio_st) == AVMEDIA_TYPE_AUDIO && AV_GET_CODEC_ATTRIBUTES(audio_st, audio_codec)->frame_size <= 1)
561  return;
562 
563  int error_code = 0;
564  int stop_encoding = 1;
565 
566  // FLUSH VIDEO ENCODER
567  if (info.has_video)
568  for (;;) {
569 
570  // Increment PTS (in frames and scaled to the codec's timebase)
571  write_video_count += av_rescale_q(1, (AVRational){info.fps.den, info.fps.num}, video_codec->time_base);
572 
573  AVPacket pkt;
574  av_init_packet(&pkt);
575  pkt.data = NULL;
576  pkt.size = 0;
577 
578  // Pointer for video buffer (if using old FFmpeg version)
579  uint8_t *video_outbuf = NULL;
580 
581  /* encode the image */
582  int got_packet = 0;
583  int error_code = 0;
584 
585  #if IS_FFMPEG_3_2
586  #pragma omp critical (write_video_packet)
587  {
588  // Encode video packet (latest version of FFmpeg)
589  error_code = avcodec_send_frame(video_codec, NULL);
590  got_packet = 0;
591  }
592  #else
593 
594  #if LIBAVFORMAT_VERSION_MAJOR >= 54
595  // Encode video packet (older than FFmpeg 3.2)
596  error_code = avcodec_encode_video2(video_codec, &pkt, NULL, &got_packet);
597 
598  #else
599  // Encode video packet (even older version of FFmpeg)
600  int video_outbuf_size = 0;
601 
602  /* encode the image */
603  int out_size = avcodec_encode_video(video_codec, NULL, video_outbuf_size, NULL);
604 
605  /* if zero size, it means the image was buffered */
606  if (out_size > 0) {
607  if(video_codec->coded_frame->key_frame)
608  pkt.flags |= AV_PKT_FLAG_KEY;
609  pkt.data= video_outbuf;
610  pkt.size= out_size;
611 
612  // got data back (so encode this frame)
613  got_packet = 1;
614  }
615  #endif
616  #endif
617 
618  if (error_code < 0) {
619  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
620  }
621  if (!got_packet) {
622  stop_encoding = 1;
623  break;
624  }
625 
626  // Override PTS (in frames and scaled to the codec's timebase)
627  //pkt.pts = write_video_count;
628 
629  // set the timestamp
630  if (pkt.pts != AV_NOPTS_VALUE)
631  pkt.pts = av_rescale_q(pkt.pts, video_codec->time_base, video_st->time_base);
632  if (pkt.dts != AV_NOPTS_VALUE)
633  pkt.dts = av_rescale_q(pkt.dts, video_codec->time_base, video_st->time_base);
634  if (pkt.duration > 0)
635  pkt.duration = av_rescale_q(pkt.duration, video_codec->time_base, video_st->time_base);
636  pkt.stream_index = video_st->index;
637 
638  // Write packet
639  error_code = av_interleaved_write_frame(oc, &pkt);
640  if (error_code < 0) {
641  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
642  }
643 
644  // Deallocate memory (if needed)
645  if (video_outbuf)
646  av_freep(&video_outbuf);
647  }
648 
649  // FLUSH AUDIO ENCODER
650  if (info.has_audio)
651  for (;;) {
652 
653  // Increment PTS (in samples and scaled to the codec's timebase)
654 #if LIBAVFORMAT_VERSION_MAJOR >= 54
655  // for some reason, it requires me to multiply channels X 2
656  write_audio_count += av_rescale_q(audio_input_position / (audio_codec->channels * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)), (AVRational){1, info.sample_rate}, audio_codec->time_base);
657 #else
658  write_audio_count += av_rescale_q(audio_input_position / audio_codec->channels, (AVRational){1, info.sample_rate}, audio_codec->time_base);
659 #endif
660 
661  AVPacket pkt;
662  av_init_packet(&pkt);
663  pkt.data = NULL;
664  pkt.size = 0;
665  pkt.pts = pkt.dts = write_audio_count;
666 
667  /* encode the image */
668  int got_packet = 0;
669  #if IS_FFMPEG_3_2
670  avcodec_send_frame(audio_codec, NULL);
671  got_packet = 0;
672  #else
673  error_code = avcodec_encode_audio2(audio_codec, &pkt, NULL, &got_packet);
674  #endif
675  if (error_code < 0) {
676  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
677  }
678  if (!got_packet) {
679  stop_encoding = 1;
680  break;
681  }
682 
683  // Since the PTS can change during encoding, set the value again. This seems like a huge hack,
684  // but it fixes lots of PTS related issues when I do this.
685  pkt.pts = pkt.dts = write_audio_count;
686 
687  // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
688  if (pkt.pts != AV_NOPTS_VALUE)
689  pkt.pts = av_rescale_q(pkt.pts, audio_codec->time_base, audio_st->time_base);
690  if (pkt.dts != AV_NOPTS_VALUE)
691  pkt.dts = av_rescale_q(pkt.dts, audio_codec->time_base, audio_st->time_base);
692  if (pkt.duration > 0)
693  pkt.duration = av_rescale_q(pkt.duration, audio_codec->time_base, audio_st->time_base);
694 
695  // set stream
696  pkt.stream_index = audio_st->index;
697  pkt.flags |= AV_PKT_FLAG_KEY;
698 
699  // Write packet
700  error_code = av_interleaved_write_frame(oc, &pkt);
701  if (error_code < 0) {
702  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::flush_encoders ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
703  }
704 
705  // deallocate memory for packet
706  AV_FREE_PACKET(&pkt);
707  }
708 
709 
710 }
711 
712 // Close the video codec
713 void FFmpegWriter::close_video(AVFormatContext *oc, AVStream *st)
714 {
715  AV_FREE_CONTEXT(video_codec);
716  video_codec = NULL;
717 }
718 
719 // Close the audio codec
720 void FFmpegWriter::close_audio(AVFormatContext *oc, AVStream *st)
721 {
722  AV_FREE_CONTEXT(audio_codec);
723  audio_codec = NULL;
724 
725  // Clear buffers
726  delete[] samples;
727  delete[] audio_outbuf;
728  delete[] audio_encoder_buffer;
729  samples = NULL;
730  audio_outbuf = NULL;
731  audio_encoder_buffer = NULL;
732 
733  // Deallocate resample buffer
734  if (avr) {
735  avresample_close(avr);
736  avresample_free(&avr);
737  avr = NULL;
738  }
739 
740  if (avr_planar) {
741  avresample_close(avr_planar);
742  avresample_free(&avr_planar);
743  avr_planar = NULL;
744  }
745 }
746 
747 // Close the writer
749 {
750  // Write trailer (if needed)
751  if (!write_trailer)
752  WriteTrailer();
753 
754  // Close each codec
755  if (video_st)
756  close_video(oc, video_st);
757  if (audio_st)
758  close_audio(oc, audio_st);
759 
760  // Deallocate image scalers
761  if (image_rescalers.size() > 0)
762  RemoveScalers();
763 
764  // Free the streams
765  for (int i = 0; i < oc->nb_streams; i++) {
766  av_freep(AV_GET_CODEC_ATTRIBUTES(&oc->streams[i], &oc->streams[i]));
767  av_freep(&oc->streams[i]);
768  }
769 
770  if (!(fmt->flags & AVFMT_NOFILE)) {
771  /* close the output file */
772  avio_close(oc->pb);
773  }
774 
775  // Reset frame counters
776  write_video_count = 0;
777  write_audio_count = 0;
778 
779  // Free the context
780  av_freep(&oc);
781 
782  // Close writer
783  is_open = false;
784  prepare_streams = false;
785  write_header = false;
786  write_trailer = false;
787 
788  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
789 }
790 
791 // Add an AVFrame to the cache
792 void FFmpegWriter::add_avframe(std::shared_ptr<Frame> frame, AVFrame* av_frame)
793 {
794  // Add AVFrame to map (if it does not already exist)
795  if (!av_frames.count(frame))
796  {
797  // Add av_frame
798  av_frames[frame] = av_frame;
799  }
800  else
801  {
802  // Do not add, and deallocate this AVFrame
803  AV_FREE_FRAME(&av_frame);
804  }
805 }
806 
807 // Add an audio output stream
808 AVStream* FFmpegWriter::add_audio_stream()
809 {
810  AVCodecContext *c;
811  AVStream *st;
812 
813  // Find the audio codec
814  AVCodec *codec = avcodec_find_encoder_by_name(info.acodec.c_str());
815  if (codec == NULL)
816  throw InvalidCodec("A valid audio codec could not be found for this file.", path);
817 
818  // Create a new audio stream
819  AV_FORMAT_NEW_STREAM(oc, audio_codec, codec, st)
820 
821  c->codec_id = codec->id;
822 #if LIBAVFORMAT_VERSION_MAJOR >= 53
823  c->codec_type = AVMEDIA_TYPE_AUDIO;
824 #else
825  c->codec_type = CODEC_TYPE_AUDIO;
826 #endif
827 
828  // Set the sample parameters
829  c->bit_rate = info.audio_bit_rate;
830  c->channels = info.channels;
831 
832  // Set valid sample rate (or throw error)
833  if (codec->supported_samplerates) {
834  int i;
835  for (i = 0; codec->supported_samplerates[i] != 0; i++)
836  if (info.sample_rate == codec->supported_samplerates[i])
837  {
838  // Set the valid sample rate
839  c->sample_rate = info.sample_rate;
840  break;
841  }
842  if (codec->supported_samplerates[i] == 0)
843  throw InvalidSampleRate("An invalid sample rate was detected for this codec.", path);
844  } else
845  // Set sample rate
846  c->sample_rate = info.sample_rate;
847 
848 
849  // Set a valid number of channels (or throw error)
850  int channel_layout = info.channel_layout;
851  if (codec->channel_layouts) {
852  int i;
853  for (i = 0; codec->channel_layouts[i] != 0; i++)
854  if (channel_layout == codec->channel_layouts[i])
855  {
856  // Set valid channel layout
857  c->channel_layout = channel_layout;
858  break;
859  }
860  if (codec->channel_layouts[i] == 0)
861  throw InvalidChannels("An invalid channel layout was detected (i.e. MONO / STEREO).", path);
862  } else
863  // Set valid channel layout
864  c->channel_layout = channel_layout;
865 
866  // Choose a valid sample_fmt
867  if (codec->sample_fmts) {
868  for (int i = 0; codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++)
869  {
870  // Set sample format to 1st valid format (and then exit loop)
871  c->sample_fmt = codec->sample_fmts[i];
872  break;
873  }
874  }
875  if (c->sample_fmt == AV_SAMPLE_FMT_NONE) {
876  // Default if no sample formats found
877  c->sample_fmt = AV_SAMPLE_FMT_S16;
878  }
879 
880  // some formats want stream headers to be separate
881  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
882  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
883 
885  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_audio_stream", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->channels", c->channels, "c->sample_fmt", c->sample_fmt, "c->channel_layout", c->channel_layout, "c->sample_rate", c->sample_rate);
886 
887  return st;
888 }
889 
890 // Add a video output stream
891 AVStream* FFmpegWriter::add_video_stream()
892 {
893  AVCodecContext *c;
894  AVStream *st;
895 
896  // Find the video codec
897  AVCodec *codec = avcodec_find_encoder_by_name(info.vcodec.c_str());
898  if (codec == NULL)
899  throw InvalidCodec("A valid video codec could not be found for this file.", path);
900 
901  // Create a new video stream
902  AV_FORMAT_NEW_STREAM(oc, video_codec, codec, st)
903 
904  c->codec_id = codec->id;
905 #if LIBAVFORMAT_VERSION_MAJOR >= 53
906  c->codec_type = AVMEDIA_TYPE_VIDEO;
907 #else
908  c->codec_type = CODEC_TYPE_VIDEO;
909 #endif
910 
911  /* Init video encoder options */
912  c->bit_rate = info.video_bit_rate;
913 
914  //TODO: Implement variable bitrate feature (which actually works). This implementation throws
915  //invalid bitrate errors and rc buffer underflow errors, etc...
916  //c->rc_min_rate = info.video_bit_rate;
917  //c->rc_max_rate = info.video_bit_rate;
918  //c->rc_buffer_size = FFMAX(c->rc_max_rate, 15000000) * 112L / 15000000 * 16384;
919  //if ( !c->rc_initial_buffer_occupancy )
920  // c->rc_initial_buffer_occupancy = c->rc_buffer_size * 3/4;
921  c->qmin = 2;
922  c->qmax = 30;
923 
924  /* resolution must be a multiple of two */
925  // TODO: require /2 height and width
926  c->width = info.width;
927  c->height = info.height;
928 
929  /* time base: this is the fundamental unit of time (in seconds) in terms
930  of which frame timestamps are represented. for fixed-fps content,
931  timebase should be 1/framerate and timestamp increments should be
932  identically 1. */
933  c->time_base.num = info.video_timebase.num;
934  c->time_base.den = info.video_timebase.den;
935  #if LIBAVFORMAT_VERSION_MAJOR >= 56
936  c->framerate = av_inv_q(c->time_base);
937  #endif
938  st->avg_frame_rate = av_inv_q(c->time_base);
939  st->time_base.num = info.video_timebase.num;
940  st->time_base.den = info.video_timebase.den;
941 
942  c->gop_size = 12; /* TODO: add this to "info"... emit one intra frame every twelve frames at most */
943  c->max_b_frames = 10;
944  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO)
945  /* just for testing, we also add B frames */
946  c->max_b_frames = 2;
947  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO)
948  /* Needed to avoid using macroblocks in which some coeffs overflow.
949  This does not happen with normal video, it just happens here as
950  the motion of the chroma plane does not match the luma plane. */
951  c->mb_decision = 2;
952  // some formats want stream headers to be separate
953  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
954  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
955 
956  // Find all supported pixel formats for this codec
957  const PixelFormat* supported_pixel_formats = codec->pix_fmts;
958  while (supported_pixel_formats != NULL && *supported_pixel_formats != PIX_FMT_NONE) {
959  // Assign the 1st valid pixel format (if one is missing)
960  if (c->pix_fmt == PIX_FMT_NONE)
961  c->pix_fmt = *supported_pixel_formats;
962  ++supported_pixel_formats;
963  }
964 
965  // Codec doesn't have any pix formats?
966  if (c->pix_fmt == PIX_FMT_NONE) {
967  if(fmt->video_codec == AV_CODEC_ID_RAWVIDEO) {
968  // Raw video should use RGB24
969  c->pix_fmt = PIX_FMT_RGB24;
970  }
971  }
972 
974  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::add_video_stream (" + (string)fmt->name + " : " + (string)av_get_pix_fmt_name(c->pix_fmt) + ")", "c->codec_id", c->codec_id, "c->bit_rate", c->bit_rate, "c->pix_fmt", c->pix_fmt, "oc->oformat->flags", oc->oformat->flags, "", -1, "", -1);
975 
976  return st;
977 }
978 
979 // open audio codec
980 void FFmpegWriter::open_audio(AVFormatContext *oc, AVStream *st)
981 {
982  AVCodec *codec;
983  AV_GET_CODEC_FROM_STREAM(st, audio_codec)
984 
985  // Set number of threads equal to number of processors (not to exceed 16)
986  audio_codec->thread_count = min(OPEN_MP_NUM_PROCESSORS, 16);
987 
988  // Find the audio encoder
989  codec = avcodec_find_encoder_by_name(info.acodec.c_str());
990  if (!codec)
991  codec = avcodec_find_encoder(audio_codec->codec_id);
992  if (!codec)
993  throw InvalidCodec("Could not find codec", path);
994 
995  // Init options
996  AVDictionary *opts = NULL;
997  av_dict_set(&opts, "strict", "experimental", 0);
998 
999  // Open the codec
1000  if (avcodec_open2(audio_codec, codec, &opts) < 0)
1001  throw InvalidCodec("Could not open codec", path);
1002  AV_COPY_PARAMS_FROM_CONTEXT(st, audio_codec);
1003 
1004  // Free options
1005  av_dict_free(&opts);
1006 
1007  // Calculate the size of the input frame (i..e how many samples per packet), and the output buffer
1008  // TODO: Ugly hack for PCM codecs (will be removed ASAP with new PCM support to compute the input frame size in samples
1009  if (audio_codec->frame_size <= 1) {
1010  // No frame size found... so calculate
1011  audio_input_frame_size = 50000 / info.channels;
1012 
1013  int s = AV_FIND_DECODER_CODEC_ID(st);
1014  switch (s) {
1015  case AV_CODEC_ID_PCM_S16LE:
1016  case AV_CODEC_ID_PCM_S16BE:
1017  case AV_CODEC_ID_PCM_U16LE:
1018  case AV_CODEC_ID_PCM_U16BE:
1019  audio_input_frame_size >>= 1;
1020  break;
1021  default:
1022  break;
1023  }
1024  } else {
1025  // Set frame size based on the codec
1026  audio_input_frame_size = audio_codec->frame_size;
1027  }
1028 
1029  // Set the initial frame size (since it might change during resampling)
1030  initial_audio_input_frame_size = audio_input_frame_size;
1031 
1032  // Allocate array for samples
1033  samples = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE];
1034 
1035  // Set audio output buffer (used to store the encoded audio)
1036  audio_outbuf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
1037  audio_outbuf = new uint8_t[audio_outbuf_size];
1038 
1039  // Set audio packet encoding buffer
1040  audio_encoder_buffer_size = AUDIO_PACKET_ENCODING_SIZE;
1041  audio_encoder_buffer = new uint8_t[audio_encoder_buffer_size];
1042 
1043  // Add audio metadata (if any)
1044  for(std::map<string, string>::iterator iter = info.metadata.begin(); iter != info.metadata.end(); ++iter)
1045  {
1046  av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0);
1047  }
1048 
1049  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_audio", "audio_codec->thread_count", audio_codec->thread_count, "audio_input_frame_size", audio_input_frame_size, "buffer_size", AVCODEC_MAX_AUDIO_FRAME_SIZE + AV_INPUT_BUFFER_PADDING_SIZE, "", -1, "", -1, "", -1);
1050 
1051 }
1052 
1053 // open video codec
1054 void FFmpegWriter::open_video(AVFormatContext *oc, AVStream *st)
1055 {
1056  AVCodec *codec;
1057  AV_GET_CODEC_FROM_STREAM(st, video_codec)
1058 
1059  // Set number of threads equal to number of processors (not to exceed 16)
1060  video_codec->thread_count = min(OPEN_MP_NUM_PROCESSORS, 16);
1061 
1062  /* find the video encoder */
1063  codec = avcodec_find_encoder_by_name(info.vcodec.c_str());
1064  if (!codec)
1065  codec = avcodec_find_encoder(AV_FIND_DECODER_CODEC_ID(st));
1066  if (!codec)
1067  throw InvalidCodec("Could not find codec", path);
1068 
1069  /* Force max_b_frames to 0 in some cases (i.e. for mjpeg image sequences */
1070  if(video_codec->max_b_frames && video_codec->codec_id != AV_CODEC_ID_MPEG4 && video_codec->codec_id != AV_CODEC_ID_MPEG1VIDEO && video_codec->codec_id != AV_CODEC_ID_MPEG2VIDEO)
1071  video_codec->max_b_frames = 0;
1072 
1073  // Init options
1074  AVDictionary *opts = NULL;
1075  av_dict_set(&opts, "strict", "experimental", 0);
1076 
1077  /* open the codec */
1078  if (avcodec_open2(video_codec, codec, &opts) < 0)
1079  throw InvalidCodec("Could not open codec", path);
1080  AV_COPY_PARAMS_FROM_CONTEXT(st, video_codec);
1081 
1082  // Free options
1083  av_dict_free(&opts);
1084 
1085  // Add video metadata (if any)
1086  for(std::map<string, string>::iterator iter = info.metadata.begin(); iter != info.metadata.end(); ++iter)
1087  {
1088  av_dict_set(&st->metadata, iter->first.c_str(), iter->second.c_str(), 0);
1089  }
1090 
1091  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::open_video", "video_codec->thread_count", video_codec->thread_count, "", -1, "", -1, "", -1, "", -1, "", -1);
1092 
1093 }
1094 
1095 // write all queued frames' audio to the video file
1096 void FFmpegWriter::write_audio_packets(bool final)
1097 {
1098  #pragma omp task firstprivate(final)
1099  {
1100  // Init audio buffers / variables
1101  int total_frame_samples = 0;
1102  int frame_position = 0;
1103  int channels_in_frame = 0;
1104  int sample_rate_in_frame = 0;
1105  int samples_in_frame = 0;
1106  ChannelLayout channel_layout_in_frame = LAYOUT_MONO; // default channel layout
1107 
1108  // Create a new array (to hold all S16 audio samples, for the current queued frames
1109  int16_t* all_queued_samples = (int16_t*)av_malloc((sizeof(int16_t)*(queued_audio_frames.size() * AVCODEC_MAX_AUDIO_FRAME_SIZE)));
1110  int16_t* all_resampled_samples = NULL;
1111  int16_t* final_samples_planar = NULL;
1112  int16_t* final_samples = NULL;
1113 
1114  // Loop through each queued audio frame
1115  while (!queued_audio_frames.empty())
1116  {
1117  // Get front frame (from the queue)
1118  std::shared_ptr<Frame> frame = queued_audio_frames.front();
1119 
1120  // Get the audio details from this frame
1121  sample_rate_in_frame = frame->SampleRate();
1122  samples_in_frame = frame->GetAudioSamplesCount();
1123  channels_in_frame = frame->GetAudioChannelsCount();
1124  channel_layout_in_frame = frame->ChannelsLayout();
1125 
1126 
1127  // Get audio sample array
1128  float* frame_samples_float = NULL;
1129  // Get samples interleaved together (c1 c2 c1 c2 c1 c2)
1130  frame_samples_float = frame->GetInterleavedAudioSamples(sample_rate_in_frame, NULL, &samples_in_frame);
1131 
1132 
1133  // Calculate total samples
1134  total_frame_samples = samples_in_frame * channels_in_frame;
1135 
1136  // Translate audio sample values back to 16 bit integers
1137  for (int s = 0; s < total_frame_samples; s++, frame_position++)
1138  // Translate sample value and copy into buffer
1139  all_queued_samples[frame_position] = int(frame_samples_float[s] * (1 << 15));
1140 
1141 
1142  // Deallocate float array
1143  delete[] frame_samples_float;
1144 
1145  // Remove front item
1146  queued_audio_frames.pop_front();
1147 
1148  } // end while
1149 
1150 
1151  // Update total samples (since we've combined all queued frames)
1152  total_frame_samples = frame_position;
1153  int remaining_frame_samples = total_frame_samples;
1154  int samples_position = 0;
1155 
1156 
1157  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets", "final", final, "total_frame_samples", total_frame_samples, "channel_layout_in_frame", channel_layout_in_frame, "channels_in_frame", channels_in_frame, "samples_in_frame", samples_in_frame, "LAYOUT_MONO", LAYOUT_MONO);
1158 
1159  // Keep track of the original sample format
1160  AVSampleFormat output_sample_fmt = audio_codec->sample_fmt;
1161 
1162  AVFrame *audio_frame = NULL;
1163  if (!final) {
1164  // Create input frame (and allocate arrays)
1165  audio_frame = AV_ALLOCATE_FRAME();
1166  AV_RESET_FRAME(audio_frame);
1167  audio_frame->nb_samples = total_frame_samples / channels_in_frame;
1168 
1169  // Fill input frame with sample data
1170  avcodec_fill_audio_frame(audio_frame, channels_in_frame, AV_SAMPLE_FMT_S16, (uint8_t *) all_queued_samples,
1171  audio_encoder_buffer_size, 0);
1172 
1173  // Do not convert audio to planar format (yet). We need to keep everything interleaved at this point.
1174  switch (audio_codec->sample_fmt)
1175  {
1176  case AV_SAMPLE_FMT_FLTP:
1177  {
1178  output_sample_fmt = AV_SAMPLE_FMT_FLT;
1179  break;
1180  }
1181  case AV_SAMPLE_FMT_S32P:
1182  {
1183  output_sample_fmt = AV_SAMPLE_FMT_S32;
1184  break;
1185  }
1186  case AV_SAMPLE_FMT_S16P:
1187  {
1188  output_sample_fmt = AV_SAMPLE_FMT_S16;
1189  break;
1190  }
1191  case AV_SAMPLE_FMT_U8P:
1192  {
1193  output_sample_fmt = AV_SAMPLE_FMT_U8;
1194  break;
1195  }
1196  }
1197 
1198  // Update total samples & input frame size (due to bigger or smaller data types)
1199  total_frame_samples *= (float(info.sample_rate) / sample_rate_in_frame); // adjust for different byte sizes
1200  total_frame_samples *= (float(info.channels) / channels_in_frame); // adjust for different # of channels
1201 
1202  // Set remaining samples
1203  remaining_frame_samples = total_frame_samples;
1204 
1205  // Create output frame (and allocate arrays)
1206  AVFrame *audio_converted = AV_ALLOCATE_FRAME();
1207  AV_RESET_FRAME(audio_converted);
1208  audio_converted->nb_samples = total_frame_samples / channels_in_frame;
1209  av_samples_alloc(audio_converted->data, audio_converted->linesize, info.channels, audio_converted->nb_samples, output_sample_fmt, 0);
1210 
1211  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (1st resampling)", "in_sample_fmt", AV_SAMPLE_FMT_S16, "out_sample_fmt", output_sample_fmt, "in_sample_rate", sample_rate_in_frame, "out_sample_rate", info.sample_rate, "in_channels", channels_in_frame, "out_channels", info.channels);
1212 
1213  // setup resample context
1214  if (!avr) {
1215  avr = avresample_alloc_context();
1216  av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
1217  av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
1218  av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
1219  av_opt_set_int(avr, "out_sample_fmt", output_sample_fmt, 0); // planar not allowed here
1220  av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0);
1221  av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
1222  av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
1223  av_opt_set_int(avr, "out_channels", info.channels, 0);
1224  avresample_open(avr);
1225  }
1226  int nb_samples = 0;
1227 
1228  // Convert audio samples
1229  nb_samples = avresample_convert(avr, // audio resample context
1230  audio_converted->data, // output data pointers
1231  audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
1232  audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
1233  audio_frame->data, // input data pointers
1234  audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
1235  audio_frame->nb_samples); // number of input samples to convert
1236 
1237  // Create a new array (to hold all resampled S16 audio samples)
1238  all_resampled_samples = (int16_t*)av_malloc(sizeof(int16_t) * nb_samples * info.channels * (av_get_bytes_per_sample(output_sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)));
1239 
1240  // Copy audio samples over original samples
1241  memcpy(all_resampled_samples, audio_converted->data[0], nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt));
1242 
1243  // Remove converted audio
1244  av_freep(&(audio_frame->data[0]));
1245  AV_FREE_FRAME(&audio_frame);
1246  av_freep(&audio_converted->data[0]);
1247  AV_FREE_FRAME(&audio_converted);
1248  all_queued_samples = NULL; // this array cleared with above call
1249 
1250  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 1st resampling)", "nb_samples", nb_samples, "remaining_frame_samples", remaining_frame_samples, "", -1, "", -1, "", -1, "", -1);
1251  }
1252 
1253  // Loop until no more samples
1254  while (remaining_frame_samples > 0 || final) {
1255  // Get remaining samples needed for this packet
1256  int remaining_packet_samples = (audio_input_frame_size * info.channels) - audio_input_position;
1257 
1258  // Determine how many samples we need
1259  int diff = 0;
1260  if (remaining_frame_samples >= remaining_packet_samples)
1261  diff = remaining_packet_samples;
1262  else if (remaining_frame_samples < remaining_packet_samples)
1263  diff = remaining_frame_samples;
1264 
1265  // Copy frame samples into the packet samples array
1266  if (!final)
1267  //TODO: Make this more sane
1268  memcpy(samples + (audio_input_position * (av_get_bytes_per_sample(output_sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16))), all_resampled_samples + samples_position, diff * av_get_bytes_per_sample(output_sample_fmt));
1269 
1270  // Increment counters
1271  audio_input_position += diff;
1272  samples_position += diff * (av_get_bytes_per_sample(output_sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16));
1273  remaining_frame_samples -= diff;
1274  remaining_packet_samples -= diff;
1275 
1276  // Do we have enough samples to proceed?
1277  if (audio_input_position < (audio_input_frame_size * info.channels) && !final)
1278  // Not enough samples to encode... so wait until the next frame
1279  break;
1280 
1281  // Convert to planar (if needed by audio codec)
1282  AVFrame *frame_final = AV_ALLOCATE_FRAME();
1283  AV_RESET_FRAME(frame_final);
1284  if (av_sample_fmt_is_planar(audio_codec->sample_fmt))
1285  {
1286  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (2nd resampling for Planar formats)", "in_sample_fmt", output_sample_fmt, "out_sample_fmt", audio_codec->sample_fmt, "in_sample_rate", info.sample_rate, "out_sample_rate", info.sample_rate, "in_channels", info.channels, "out_channels", info.channels);
1287 
1288  // setup resample context
1289  if (!avr_planar) {
1290  avr_planar = avresample_alloc_context();
1291  av_opt_set_int(avr_planar, "in_channel_layout", info.channel_layout, 0);
1292  av_opt_set_int(avr_planar, "out_channel_layout", info.channel_layout, 0);
1293  av_opt_set_int(avr_planar, "in_sample_fmt", output_sample_fmt, 0);
1294  av_opt_set_int(avr_planar, "out_sample_fmt", audio_codec->sample_fmt, 0); // planar not allowed here
1295  av_opt_set_int(avr_planar, "in_sample_rate", info.sample_rate, 0);
1296  av_opt_set_int(avr_planar, "out_sample_rate", info.sample_rate, 0);
1297  av_opt_set_int(avr_planar, "in_channels", info.channels, 0);
1298  av_opt_set_int(avr_planar, "out_channels", info.channels, 0);
1299  avresample_open(avr_planar);
1300  }
1301 
1302  // Create input frame (and allocate arrays)
1303  audio_frame = AV_ALLOCATE_FRAME();
1304  AV_RESET_FRAME(audio_frame);
1305  audio_frame->nb_samples = audio_input_position / info.channels;
1306 
1307  // Create a new array
1308  final_samples_planar = (int16_t*)av_malloc(sizeof(int16_t) * audio_frame->nb_samples * info.channels * (av_get_bytes_per_sample(output_sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)));
1309 
1310  // Copy audio into buffer for frame
1311  memcpy(final_samples_planar, samples, audio_frame->nb_samples * info.channels * av_get_bytes_per_sample(output_sample_fmt));
1312 
1313  // Fill input frame with sample data
1314  avcodec_fill_audio_frame(audio_frame, info.channels, output_sample_fmt, (uint8_t *) final_samples_planar,
1315  audio_encoder_buffer_size, 0);
1316 
1317  // Create output frame (and allocate arrays)
1318  frame_final->nb_samples = audio_input_frame_size;
1319  av_samples_alloc(frame_final->data, frame_final->linesize, info.channels, frame_final->nb_samples, audio_codec->sample_fmt, 0);
1320 
1321  // Convert audio samples
1322  int nb_samples = avresample_convert(avr_planar, // audio resample context
1323  frame_final->data, // output data pointers
1324  frame_final->linesize[0], // output plane size, in bytes. (0 if unknown)
1325  frame_final->nb_samples, // maximum number of samples that the output buffer can hold
1326  audio_frame->data, // input data pointers
1327  audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
1328  audio_frame->nb_samples); // number of input samples to convert
1329 
1330  // Copy audio samples over original samples
1331  if (nb_samples > 0)
1332  memcpy(samples, frame_final->data[0], nb_samples * av_get_bytes_per_sample(audio_codec->sample_fmt) * info.channels);
1333 
1334  // deallocate AVFrame
1335  av_freep(&(audio_frame->data[0]));
1336  AV_FREE_FRAME(&audio_frame);
1337  all_queued_samples = NULL; // this array cleared with above call
1338 
1339  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets (Successfully completed 2nd resampling for Planar formats)", "nb_samples", nb_samples, "", -1, "", -1, "", -1, "", -1, "", -1);
1340 
1341  } else {
1342  // Create a new array
1343  final_samples = new int16_t[audio_input_position * (av_get_bytes_per_sample(audio_codec->sample_fmt) / av_get_bytes_per_sample(AV_SAMPLE_FMT_S16))];
1344 
1345  // Copy audio into buffer for frame
1346  memcpy(final_samples, samples, audio_input_position * av_get_bytes_per_sample(audio_codec->sample_fmt));
1347 
1348  // Init the nb_samples property
1349  frame_final->nb_samples = audio_input_frame_size;
1350 
1351  // Fill the final_frame AVFrame with audio (non planar)
1352  avcodec_fill_audio_frame(frame_final, audio_codec->channels, audio_codec->sample_fmt, (uint8_t *) final_samples,
1353  audio_encoder_buffer_size, 0);
1354  }
1355 
1356  // Increment PTS (in samples)
1357  write_audio_count += FFMIN(audio_input_frame_size, audio_input_position);
1358  frame_final->pts = write_audio_count; // Set the AVFrame's PTS
1359 
1360  // Init the packet
1361  AVPacket pkt;
1362  av_init_packet(&pkt);
1363  pkt.data = audio_encoder_buffer;
1364  pkt.size = audio_encoder_buffer_size;
1365 
1366  // Set the packet's PTS prior to encoding
1367  pkt.pts = pkt.dts = write_audio_count;
1368 
1369  /* encode the audio samples */
1370  int got_packet_ptr = 0;
1371 
1372  #if IS_FFMPEG_3_2
1373  // Encode audio (latest version of FFmpeg)
1374  int error_code;
1375  int ret = 0;
1376  int frame_finished = 0;
1377  error_code = ret = avcodec_send_frame(audio_codec, frame_final);
1378  if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) {
1379  avcodec_send_frame(audio_codec, NULL);
1380  }
1381  else {
1382  if (ret >= 0)
1383  pkt.size = 0;
1384  ret = avcodec_receive_packet(audio_codec, &pkt);
1385  if (ret >= 0)
1386  frame_finished = 1;
1387  if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) {
1388  avcodec_flush_buffers(audio_codec);
1389  ret = 0;
1390  }
1391  if (ret >= 0) {
1392  ret = frame_finished;
1393  }
1394  }
1395  if (!pkt.data && !frame_finished)
1396  {
1397  ret = -1;
1398  }
1399  got_packet_ptr = ret;
1400  #else
1401  // Encode audio (older versions of FFmpeg)
1402  int error_code = avcodec_encode_audio2(audio_codec, &pkt, frame_final, &got_packet_ptr);
1403  #endif
1404  /* if zero size, it means the image was buffered */
1405  if (error_code == 0 && got_packet_ptr) {
1406 
1407  // Since the PTS can change during encoding, set the value again. This seems like a huge hack,
1408  // but it fixes lots of PTS related issues when I do this.
1409  pkt.pts = pkt.dts = write_audio_count;
1410 
1411  // Scale the PTS to the audio stream timebase (which is sometimes different than the codec's timebase)
1412  if (pkt.pts != AV_NOPTS_VALUE)
1413  pkt.pts = av_rescale_q(pkt.pts, audio_codec->time_base, audio_st->time_base);
1414  if (pkt.dts != AV_NOPTS_VALUE)
1415  pkt.dts = av_rescale_q(pkt.dts, audio_codec->time_base, audio_st->time_base);
1416  if (pkt.duration > 0)
1417  pkt.duration = av_rescale_q(pkt.duration, audio_codec->time_base, audio_st->time_base);
1418 
1419  // set stream
1420  pkt.stream_index = audio_st->index;
1421  pkt.flags |= AV_PKT_FLAG_KEY;
1422 
1423  /* write the compressed frame in the media file */
1424  int error_code = av_interleaved_write_frame(oc, &pkt);
1425  if (error_code < 0)
1426  {
1427  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
1428  }
1429  }
1430 
1431  if (error_code < 0)
1432  {
1433  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_audio_packets ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
1434  }
1435 
1436  // deallocate AVFrame
1437  av_freep(&(frame_final->data[0]));
1438  AV_FREE_FRAME(&frame_final);
1439 
1440  // deallocate memory for packet
1441  AV_FREE_PACKET(&pkt);
1442 
1443  // Reset position
1444  audio_input_position = 0;
1445  final = false;
1446  }
1447 
1448  // Delete arrays (if needed)
1449  if (all_resampled_samples) {
1450  av_freep(&all_resampled_samples);
1451  all_resampled_samples = NULL;
1452  }
1453  if (all_queued_samples) {
1454  av_freep(&all_queued_samples);
1455  all_queued_samples = NULL;
1456  }
1457 
1458  } // end task
1459 }
1460 
1461 // Allocate an AVFrame object
1462 AVFrame* FFmpegWriter::allocate_avframe(PixelFormat pix_fmt, int width, int height, int *buffer_size, uint8_t *new_buffer)
1463 {
1464  // Create an RGB AVFrame
1465  AVFrame *new_av_frame = NULL;
1466 
1467  // Allocate an AVFrame structure
1468  new_av_frame = AV_ALLOCATE_FRAME();
1469  if (new_av_frame == NULL)
1470  throw OutOfMemory("Could not allocate AVFrame", path);
1471 
1472  // Determine required buffer size and allocate buffer
1473  *buffer_size = AV_GET_IMAGE_SIZE(pix_fmt, width, height);
1474 
1475  // Create buffer (if not provided)
1476  if (!new_buffer)
1477  {
1478  // New Buffer
1479  new_buffer = (uint8_t*)av_malloc(*buffer_size * sizeof(uint8_t));
1480  // Attach buffer to AVFrame
1481  AV_COPY_PICTURE_DATA(new_av_frame, new_buffer, pix_fmt, width, height);
1482  new_av_frame->width = width;
1483  new_av_frame->height = height;
1484  new_av_frame->format = pix_fmt;
1485  }
1486 
1487  // return AVFrame
1488  return new_av_frame;
1489 }
1490 
1491 // process video frame
1492 void FFmpegWriter::process_video_packet(std::shared_ptr<Frame> frame)
1493 {
1494  // Determine the height & width of the source image
1495  int source_image_width = frame->GetWidth();
1496  int source_image_height = frame->GetHeight();
1497 
1498  // Do nothing if size is 1x1 (i.e. no image in this frame)
1499  if (source_image_height == 1 && source_image_width == 1)
1500  return;
1501 
1502  // Init rescalers (if not initialized yet)
1503  if (image_rescalers.size() == 0)
1504  InitScalers(source_image_width, source_image_height);
1505 
1506  // Get a unique rescaler (for this thread)
1507  SwsContext *scaler = image_rescalers[rescaler_position];
1508  rescaler_position++;
1509  if (rescaler_position == num_of_rescalers)
1510  rescaler_position = 0;
1511 
1512  #pragma omp task firstprivate(frame, scaler, source_image_width, source_image_height)
1513  {
1514  // Allocate an RGB frame & final output frame
1515  int bytes_source = 0;
1516  int bytes_final = 0;
1517  AVFrame *frame_source = NULL;
1518  const uchar *pixels = NULL;
1519 
1520  // Get a list of pixels from source image
1521  pixels = frame->GetPixels();
1522 
1523  // Init AVFrame for source image & final (converted image)
1524  frame_source = allocate_avframe(PIX_FMT_RGBA, source_image_width, source_image_height, &bytes_source, (uint8_t*) pixels);
1525  #if IS_FFMPEG_3_2
1526  AVFrame *frame_final = allocate_avframe((AVPixelFormat)(video_st->codecpar->format), info.width, info.height, &bytes_final, NULL);
1527  #else
1528  AVFrame *frame_final = allocate_avframe(video_codec->pix_fmt, info.width, info.height, &bytes_final, NULL);
1529  #endif
1530 
1531  // Fill with data
1532  AV_COPY_PICTURE_DATA(frame_source, (uint8_t*)pixels, PIX_FMT_RGBA, source_image_width, source_image_height);
1533  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::process_video_packet", "frame->number", frame->number, "bytes_source", bytes_source, "bytes_final", bytes_final, "", -1, "", -1, "", -1);
1534 
1535  // Resize & convert pixel format
1536  sws_scale(scaler, frame_source->data, frame_source->linesize, 0,
1537  source_image_height, frame_final->data, frame_final->linesize);
1538 
1539  // Add resized AVFrame to av_frames map
1540  #pragma omp critical (av_frames_section)
1541  add_avframe(frame, frame_final);
1542 
1543  // Deallocate memory
1544  AV_FREE_FRAME(&frame_source);
1545 
1546  } // end task
1547 
1548 }
1549 
1550 // write video frame
1551 bool FFmpegWriter::write_video_packet(std::shared_ptr<Frame> frame, AVFrame* frame_final)
1552 {
1553  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet", "frame->number", frame->number, "", -1, "", -1, "", -1, "", -1, "", -1);
1554 
1555  AVPacket pkt;
1556  av_init_packet(&pkt);
1557  pkt.data = NULL;
1558  pkt.size = 0;
1559  pkt.pts = pkt.dts = AV_NOPTS_VALUE;
1560 
1561  // Pointer for video buffer (if using old FFmpeg version)
1562  uint8_t *video_outbuf = NULL;
1563 
1564  // Increment PTS (in frames and scaled to the codec's timebase)
1565  write_video_count += av_rescale_q(1, (AVRational){info.fps.den, info.fps.num}, video_codec->time_base);
1566 
1567  // Assign the initial AVFrame PTS from the frame counter
1568  frame_final->pts = write_video_count;
1569 
1570  /* encode the image */
1571  int got_packet_ptr = 0;
1572  int error_code = 0;
1573  #if IS_FFMPEG_3_2
1574  // Write video packet (latest version of FFmpeg)
1575  int frameFinished = 0;
1576  int ret = avcodec_send_frame(video_codec, frame_final);
1577  error_code = ret;
1578  if (ret < 0 ) {
1579  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet (Frame not sent)", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
1580  if (ret == AVERROR(EAGAIN) )
1581  cerr << "Frame EAGAIN" << "\n";
1582  if (ret == AVERROR_EOF )
1583  cerr << "Frame AVERROR_EOF" << "\n";
1584  avcodec_send_frame(video_codec, NULL);
1585  }
1586  else {
1587  while (ret >= 0) {
1588  ret = avcodec_receive_packet(video_codec, &pkt);
1589  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1590  avcodec_flush_buffers(video_codec);
1591  got_packet_ptr = 0;
1592  break;
1593  }
1594  if (ret == 0) {
1595  got_packet_ptr = 1;
1596  break;
1597  }
1598  }
1599  }
1600  #else
1601  #if LIBAVFORMAT_VERSION_MAJOR >= 54
1602  // Write video packet (older than FFmpeg 3.2)
1603  error_code = avcodec_encode_video2(video_codec, &pkt, frame_final, &got_packet_ptr);
1604  if (error_code != 0 )
1605  cerr << "Frame AVERROR_EOF" << "\n";
1606  if (got_packet_ptr == 0 )
1607  cerr << "Frame gotpacket error" << "\n";
1608  #else
1609  // Write video packet (even older versions of FFmpeg)
1610  int video_outbuf_size = 200000;
1611  video_outbuf = (uint8_t*) av_malloc(200000);
1612 
1613  /* encode the image */
1614  int out_size = avcodec_encode_video(video_codec, video_outbuf, video_outbuf_size, frame_final);
1615 
1616  /* if zero size, it means the image was buffered */
1617  if (out_size > 0) {
1618  if(video_codec->coded_frame->key_frame)
1619  pkt.flags |= AV_PKT_FLAG_KEY;
1620  pkt.data= video_outbuf;
1621  pkt.size= out_size;
1622 
1623  // got data back (so encode this frame)
1624  got_packet_ptr = 1;
1625  }
1626  #endif
1627  #endif
1628 
1629  /* if zero size, it means the image was buffered */
1630  if (error_code == 0 && got_packet_ptr) {
1631 
1632  // Since the PTS can change during encoding, set the value again. This seems like a huge hack,
1633  // but it fixes lots of PTS related issues when I do this.
1634  //pkt.pts = pkt.dts = write_video_count;
1635 
1636  // set the timestamp
1637  if (pkt.pts != AV_NOPTS_VALUE)
1638  pkt.pts = av_rescale_q(pkt.pts, video_codec->time_base, video_st->time_base);
1639  if (pkt.dts != AV_NOPTS_VALUE)
1640  pkt.dts = av_rescale_q(pkt.dts, video_codec->time_base, video_st->time_base);
1641  if (pkt.duration > 0)
1642  pkt.duration = av_rescale_q(pkt.duration, video_codec->time_base, video_st->time_base);
1643  pkt.stream_index = video_st->index;
1644 
1645  /* write the compressed frame in the media file */
1646  int error_code = av_interleaved_write_frame(oc, &pkt);
1647  if (error_code < 0)
1648  {
1649  ZmqLogger::Instance()->AppendDebugMethod("FFmpegWriter::write_video_packet ERROR [" + (string)av_err2str(error_code) + "]", "error_code", error_code, "", -1, "", -1, "", -1, "", -1, "", -1);
1650  return false;
1651  }
1652  }
1653 
1654  // Deallocate memory (if needed)
1655  if (video_outbuf)
1656  delete[] video_outbuf;
1657 
1658  // Deallocate packet
1659  AV_FREE_PACKET(&pkt);
1660 
1661  // Success
1662  return true;
1663 }
1664 
1665 // Output the ffmpeg info about this format, streams, and codecs (i.e. dump format)
1667 {
1668  // output debug info
1669  av_dump_format(oc, 0, path.c_str(), 1);
1670 }
1671 
1672 // Init a collection of software rescalers (thread safe)
1673 void FFmpegWriter::InitScalers(int source_width, int source_height)
1674 {
1675  // Init software rescalers vector (many of them, one for each thread)
1676  for (int x = 0; x < num_of_rescalers; x++)
1677  {
1678  // Init the software scaler from FFMpeg
1679  img_convert_ctx = sws_getContext(source_width, source_height, PIX_FMT_RGBA, info.width, info.height, AV_GET_CODEC_PIXEL_FORMAT(video_st, video_st->codec), SWS_BILINEAR, NULL, NULL, NULL);
1680 
1681  // Add rescaler to vector
1682  image_rescalers.push_back(img_convert_ctx);
1683  }
1684 }
1685 
1686 // Set audio resample options
1687 void FFmpegWriter::ResampleAudio(int sample_rate, int channels) {
1688  original_sample_rate = sample_rate;
1689  original_channels = channels;
1690 }
1691 
1692 // Remove & deallocate all software scalers
1694 {
1695  // Close all rescalers
1696  for (int x = 0; x < num_of_rescalers; x++)
1697  sws_freeContext(image_rescalers[x]);
1698 
1699  // Clear vector
1700  image_rescalers.clear();
1701 }
#define AV_RESET_FRAME(av_frame)
int channels
The number of audio channels used in the audio stream.
Definition: WriterBase.h:72
A video stream (used to determine which type of stream)
Definition: FFmpegWriter.h:64
#define AV_FREE_FRAME(av_frame)
void SetOption(StreamType stream, string name, string value)
Set custom options (some codecs accept additional params). This must be called after the PrepareStrea...
int num
Numerator for the fraction.
Definition: Fraction.h:44
WriterInfo info
Information about the current media file.
Definition: WriterBase.h:93
void OutputStreamInfo()
Output the ffmpeg info about this format, streams, and codecs (i.e. dump format)
#define AV_FIND_DECODER_CODEC_ID(av_stream)
An audio stream (used to determine which type of stream)
Definition: FFmpegWriter.h:65
int video_bit_rate
The bit rate of the video stream (in bytes)
Definition: WriterBase.h:60
Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square) ...
Definition: WriterBase.h:61
Exception when an invalid # of audio channels are detected.
Definition: Exceptions.h:112
#define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height)
std::map< string, string > metadata
An optional map/dictionary of video & audio metadata.
Definition: WriterBase.h:76
#define PIX_FMT_RGB24
string acodec
The name of the audio codec used to encode / decode the video stream.
Definition: WriterBase.h:69
#define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context)
string vcodec
The name of the video codec used to encode / decode the video stream.
Definition: WriterBase.h:63
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
Definition: Fraction.cpp:71
#define AV_GET_CODEC_FROM_STREAM(av_stream, codec_in)
#define AV_OPTION_FIND(priv_data, name)
#define AVCODEC_MAX_AUDIO_FRAME_SIZE
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:96
int width
The width of the video (in pixels)
Definition: WriterBase.h:57
int audio_bit_rate
The bit rate of the audio stream (in bytes)
Definition: WriterBase.h:70
#define OPEN_MP_NUM_PROCESSORS
void WriteFrame(std::shared_ptr< Frame > frame)
Add a frame to the stack waiting to be encoded.
Exception when encoding audio packet.
Definition: Exceptions.h:101
Exception when invalid sample rate is detected during encoding.
Definition: Exceptions.h:172
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: WriterBase.h:66
void Open()
Open writer.
void SetVideoOptions(bool has_video, string codec, Fraction fps, int width, int height, Fraction pixel_ratio, bool interlaced, bool top_field_first, int bit_rate)
Set video export options.
virtual std::shared_ptr< Frame > GetFrame(int64_t number)=0
#define AV_FREE_CONTEXT(av_context)
Exception when no valid codec is found for a file.
Definition: Exceptions.h:122
Exception when memory could not be allocated.
Definition: Exceptions.h:224
#define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context)
Exception when invalid encoding options are used.
Definition: Exceptions.h:162
#define AV_FREE_PACKET(av_packet)
Exception when no streams are found in the file.
Definition: Exceptions.h:192
void RemoveScalers()
Remove & deallocate all software scalers.
#define AV_ALLOCATE_FRAME()
bool top_field_first
Which interlaced field should be displayed first.
Definition: WriterBase.h:68
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
FFmpegWriter(string path)
Constructor for FFmpegWriter. Throws one of the following exceptions.
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
This class represents a fraction.
Definition: Fraction.h:42
void ResampleAudio(int sample_rate, int channels)
Set audio resample options.
static bool IsValidCodec(string codec_name)
Determine if codec name is valid.
Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3) ...
Definition: WriterBase.h:62
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
#define PixelFormat
void WriteTrailer()
Write the file trailer (after all frames are written). This is called automatically by the Close() me...
#define av_err2str(errnum)
void WriteHeader()
Write the file header (after the options are set). This method is called automatically by the Open() ...
void Close()
Close the writer.
bool interlaced_frame
Are the contents of this frame interlaced.
Definition: WriterBase.h:67
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: WriterBase.h:71
#define AV_GET_IMAGE_SIZE(pix_fmt, width, height)
#define AV_OUTPUT_CONTEXT(output_context, path)
bool has_video
Determines if this file has a video stream.
Definition: WriterBase.h:51
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: WriterBase.h:59
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
This namespace is the default namespace for all code in the openshot library.
#define AV_GET_CODEC_TYPE(av_stream)
#define AV_OPTION_SET(av_stream, priv_data, name, value, avcodec)
bool has_audio
Determines if this file has an audio stream.
Definition: WriterBase.h:52
#define PIX_FMT_RGBA
void SetAudioOptions(bool has_audio, string codec, int sample_rate, int channels, ChannelLayout channel_layout, int bit_rate)
Set audio export options.
Exception when a writer is closed, and a frame is requested.
Definition: Exceptions.h:264
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: WriterBase.h:73
void PrepareStreams()
Prepare & initialize streams and open codecs. This method is called automatically by the Open() metho...
int height
The height of the video (in pixels)
Definition: WriterBase.h:56
#define PIX_FMT_NONE
int den
Denominator for the fraction.
Definition: Fraction.h:45
#define AV_FORMAT_NEW_STREAM(oc, st_codec, av_codec, av_st)
Exception when no valid format is found for a file.
Definition: Exceptions.h:142
#define AUDIO_PACKET_ENCODING_SIZE
StreamType
This enumeration designates the type of stream when encoding (video or audio)
Definition: FFmpegWriter.h:62
#define AV_GET_CODEC_PAR_CONTEXT(av_stream, av_codec)
#define AV_COPY_PARAMS_FROM_CONTEXT(av_stream, av_codec)