OpenShot Library | libopenshot  0.5.0
Clip.cpp
Go to the documentation of this file.
1 
9 // Copyright (c) 2008-2019 OpenShot Studios, LLC
10 //
11 // SPDX-License-Identifier: LGPL-3.0-or-later
12 
13 #include "Clip.h"
14 
15 #include "AudioResampler.h"
16 #include "Exceptions.h"
17 #include "FFmpegReader.h"
18 #include "FrameMapper.h"
19 #include "QtImageReader.h"
20 #include "ChunkReader.h"
21 #include "DummyReader.h"
22 #include "Timeline.h"
23 #include "ZmqLogger.h"
24 
25 #include <algorithm>
26 #include <cmath>
27 
28 #ifdef USE_IMAGEMAGICK
29  #include "MagickUtilities.h"
30  #include "ImageReader.h"
31  #include "TextReader.h"
32 #endif
33 
34 #include <Qt>
35 
36 using namespace openshot;
37 
38 namespace {
39  struct CompositeChoice { const char* name; CompositeType value; };
40  const CompositeChoice composite_choices[] = {
41  {"Normal", COMPOSITE_SOURCE_OVER},
42 
43  // Darken group
44  {"Darken", COMPOSITE_DARKEN},
45  {"Multiply", COMPOSITE_MULTIPLY},
46  {"Color Burn", COMPOSITE_COLOR_BURN},
47 
48  // Lighten group
49  {"Lighten", COMPOSITE_LIGHTEN},
50  {"Screen", COMPOSITE_SCREEN},
51  {"Color Dodge", COMPOSITE_COLOR_DODGE},
52  {"Add", COMPOSITE_PLUS},
53 
54  // Contrast group
55  {"Overlay", COMPOSITE_OVERLAY},
56  {"Soft Light", COMPOSITE_SOFT_LIGHT},
57  {"Hard Light", COMPOSITE_HARD_LIGHT},
58 
59  // Compare
60  {"Difference", COMPOSITE_DIFFERENCE},
61  {"Exclusion", COMPOSITE_EXCLUSION},
62  };
63  const int composite_choices_count = sizeof(composite_choices)/sizeof(CompositeChoice);
64 }
65 
66 // Init default settings for a clip
68 {
69  // Init clip settings
70  Position(0.0);
71  Layer(0);
72  Start(0.0);
73  ClipBase::End(0.0);
75  scale = SCALE_FIT;
80  waveform = false;
82  parentObjectId = "";
83 
84  // Init scale curves
85  scale_x = Keyframe(1.0);
86  scale_y = Keyframe(1.0);
87 
88  // Init location curves
89  location_x = Keyframe(0.0);
90  location_y = Keyframe(0.0);
91 
92  // Init alpha
93  alpha = Keyframe(1.0);
94 
95  // Init time & volume
96  time = Keyframe(1.0);
97  volume = Keyframe(1.0);
98 
99  // Init audio waveform color
100  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
101 
102  // Init shear and perspective curves
103  shear_x = Keyframe(0.0);
104  shear_y = Keyframe(0.0);
105  origin_x = Keyframe(0.5);
106  origin_y = Keyframe(0.5);
107  perspective_c1_x = Keyframe(-1.0);
108  perspective_c1_y = Keyframe(-1.0);
109  perspective_c2_x = Keyframe(-1.0);
110  perspective_c2_y = Keyframe(-1.0);
111  perspective_c3_x = Keyframe(-1.0);
112  perspective_c3_y = Keyframe(-1.0);
113  perspective_c4_x = Keyframe(-1.0);
114  perspective_c4_y = Keyframe(-1.0);
115 
116  // Init audio channel filter and mappings
117  channel_filter = Keyframe(-1.0);
118  channel_mapping = Keyframe(-1.0);
119 
120  // Init audio and video overrides
121  has_audio = Keyframe(-1.0);
122  has_video = Keyframe(-1.0);
123 
124  // Initialize the attached object and attached clip as null pointers
125  parentTrackedObject = nullptr;
126  parentClipObject = NULL;
127 
128  // Init reader info struct
130 }
131 
132 // Init reader info details
134  if (reader) {
135  // Init rotation (if any)
137 
138  // Initialize info struct
139  info = reader->info;
140 
141  // Init cache
143  }
144 }
145 
147  // Only apply metadata rotation if clip rotation has not been explicitly set.
148  if (rotation.GetCount() > 0 || !reader)
149  return;
150 
151  const auto rotate_meta = reader->info.metadata.find("rotate");
152  if (rotate_meta == reader->info.metadata.end()) {
153  // Ensure rotation keyframes always start with a default 0° point.
154  rotation = Keyframe(0.0f);
155  return;
156  }
157 
158  float rotate_angle = 0.0f;
159  try {
160  rotate_angle = strtof(rotate_meta->second.c_str(), nullptr);
161  } catch (const std::exception& e) {
162  return; // ignore invalid metadata
163  }
164 
165  rotation = Keyframe(rotate_angle);
166 
167  // Do not overwrite user-authored scale curves.
168  auto has_default_scale = [](const Keyframe& kf) {
169  return kf.GetCount() == 1 && fabs(kf.GetPoint(0).co.Y - 1.0) < 0.00001;
170  };
171  if (!has_default_scale(scale_x) || !has_default_scale(scale_y))
172  return;
173 
174  // No need to adjust scaling when the metadata rotation is effectively zero.
175  if (fabs(rotate_angle) < 0.0001f)
176  return;
177 
178  float w = static_cast<float>(reader->info.width);
179  float h = static_cast<float>(reader->info.height);
180  if (w <= 0.0f || h <= 0.0f)
181  return;
182 
183  float rad = rotate_angle * static_cast<float>(M_PI) / 180.0f;
184 
185  float new_width = fabs(w * cos(rad)) + fabs(h * sin(rad));
186  float new_height = fabs(w * sin(rad)) + fabs(h * cos(rad));
187  if (new_width <= 0.0f || new_height <= 0.0f)
188  return;
189 
190  float uniform_scale = std::min(w / new_width, h / new_height);
191 
192  scale_x = Keyframe(uniform_scale);
193  scale_y = Keyframe(uniform_scale);
194 }
195 
196 // Default Constructor for a clip
197 Clip::Clip() : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
198 {
199  // Init all default settings
200  init_settings();
201 }
202 
203 // Constructor with reader
204 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), reader(new_reader), allocated_reader(NULL), is_open(false)
205 {
206  // Init all default settings
207  init_settings();
208 
209  // Open and Close the reader (to set the duration of the clip)
210  Open();
211  Close();
212 
213  // Update duration and set parent
214  if (reader) {
215  ClipBase::End(reader->info.duration);
216  reader->ParentClip(this);
217  // Init reader info struct
219  }
220 }
221 
222 // Constructor with filepath
223 Clip::Clip(std::string path) : resampler(NULL), reader(NULL), allocated_reader(NULL), is_open(false)
224 {
225  // Init all default settings
226  init_settings();
227 
228  // Get file extension (and convert to lower case)
229  std::string ext = get_file_extension(path);
230  std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
231 
232  // Determine if common video formats (or image sequences)
233  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
234  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob" || ext=="gif" || path.find("%") != std::string::npos)
235  {
236  try
237  {
238  // Open common video format
239  reader = new openshot::FFmpegReader(path);
240 
241  } catch(...) { }
242  }
243  if (ext=="osp")
244  {
245  try
246  {
247  // Open common video format
248  reader = new openshot::Timeline(path, true);
249 
250  } catch(...) { }
251  }
252 
253 
254  // If no video found, try each reader
255  if (!reader)
256  {
257  try
258  {
259  // Try an image reader
260  reader = new openshot::QtImageReader(path);
261 
262  } catch(...) {
263  try
264  {
265  // Try a video reader
266  reader = new openshot::FFmpegReader(path);
267 
268  } catch(...) { }
269  }
270  }
271 
272  // Update duration and set parent
273  if (reader) {
274  ClipBase::End(reader->info.duration);
275  reader->ParentClip(this);
276  allocated_reader = reader;
277  // Init reader info struct
279  }
280 }
281 
282 // Destructor
284 {
285  // Delete the reader if clip created it
286  if (allocated_reader) {
287  delete allocated_reader;
288  allocated_reader = NULL;
289  reader = NULL;
290  }
291 
292  // Close the resampler
293  if (resampler) {
294  delete resampler;
295  resampler = NULL;
296  }
297 
298  // Close clip
299  Close();
300 }
301 
302 // Attach clip to bounding box
303 void Clip::AttachToObject(std::string object_id)
304 {
305  // Search for the tracked object on the timeline
306  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
307 
308  if (parentTimeline) {
309  // Create a smart pointer to the tracked object from the timeline
310  std::shared_ptr<openshot::TrackedObjectBase> trackedObject = parentTimeline->GetTrackedObject(object_id);
311  Clip* clipObject = parentTimeline->GetClip(object_id);
312 
313  // Check for valid tracked object
314  if (trackedObject){
315  SetAttachedObject(trackedObject);
316  parentClipObject = NULL;
317  }
318  else if (clipObject) {
319  SetAttachedClip(clipObject);
320  parentTrackedObject = nullptr;
321  }
322  }
323 }
324 
325 // Set the pointer to the trackedObject this clip is attached to
326 void Clip::SetAttachedObject(std::shared_ptr<openshot::TrackedObjectBase> trackedObject){
327  parentTrackedObject = trackedObject;
328 }
329 
330 // Set the pointer to the clip this clip is attached to
331 void Clip::SetAttachedClip(Clip* clipObject){
332  parentClipObject = clipObject;
333 }
334 
336 void Clip::Reader(ReaderBase* new_reader)
337 {
338  // Delete previously allocated reader (if not related to new reader)
339  // FrameMappers that point to the same allocated reader are ignored
340  bool is_same_reader = false;
341  if (new_reader && allocated_reader) {
342  if (new_reader->Name() == "FrameMapper") {
343  // Determine if FrameMapper is pointing at the same allocated ready
344  FrameMapper* clip_mapped_reader = static_cast<FrameMapper*>(new_reader);
345  if (allocated_reader == clip_mapped_reader->Reader()) {
346  is_same_reader = true;
347  }
348  }
349  }
350  // Clear existing allocated reader (if different)
351  if (allocated_reader && !is_same_reader) {
352  reader->Close();
353  allocated_reader->Close();
354  delete allocated_reader;
355  reader = NULL;
356  allocated_reader = NULL;
357  }
358 
359  // set reader pointer
360  reader = new_reader;
361 
362  // set parent
363  if (reader) {
364  reader->ParentClip(this);
365 
366  // Init reader info struct
368  }
369 }
370 
373 {
374  if (reader)
375  return reader;
376  else
377  // Throw error if reader not initialized
378  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
379 }
380 
381 // Open the internal reader
383 {
384  if (reader)
385  {
386  // Open the reader
387  reader->Open();
388  is_open = true;
389 
390  // Copy Reader info to Clip
391  info = reader->info;
392 
393  // Set some clip properties from the file reader
394  if (end == 0.0)
395  ClipBase::End(reader->info.duration);
396  }
397  else
398  // Throw error if reader not initialized
399  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
400 }
401 
402 // Close the internal reader
404 {
405  if (is_open && reader) {
406  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
407 
408  // Close the reader
409  reader->Close();
410  }
411 
412  // Clear cache
413  final_cache.Clear();
414  is_open = false;
415 }
416 
417 // Get end position of clip (trim end of video), which can be affected by the time curve.
418 float Clip::End() const
419 {
420  // if a time curve is present, use its length
421  if (time.GetCount() > 1)
422  {
423  // Determine the FPS fo this clip
424  float fps = 24.0;
425  if (reader)
426  // file reader
427  fps = reader->info.fps.ToFloat();
428  else
429  // Throw error if reader not initialized
430  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
431 
432  return float(time.GetLength()) / fps;
433  }
434  else
435  // just use the duration (as detected by the reader)
436  return end;
437 }
438 
439 // Override End() position
440 void Clip::End(float value) {
441  ClipBase::End(value);
442 }
443 
444 // Set associated Timeline pointer
446  timeline = new_timeline;
447 
448  // Clear cache (it might have changed)
449  final_cache.Clear();
450 }
451 
452 // Create an openshot::Frame object for a specific frame number of this reader.
453 std::shared_ptr<Frame> Clip::GetFrame(int64_t clip_frame_number)
454 {
455  // Call override of GetFrame
456  return GetFrame(NULL, clip_frame_number, NULL);
457 }
458 
459 // Create an openshot::Frame object for a specific frame number of this reader.
460 // NOTE: background_frame is ignored in this method (this method is only used by Effect classes)
461 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number)
462 {
463  // Call override of GetFrame
464  return GetFrame(background_frame, clip_frame_number, NULL);
465 }
466 
467 // Use an existing openshot::Frame object and draw this Clip's frame onto it
468 std::shared_ptr<Frame> Clip::GetFrame(std::shared_ptr<openshot::Frame> background_frame, int64_t clip_frame_number, openshot::TimelineInfoStruct* options)
469 {
470  // Check for open reader (or throw exception)
471  if (!is_open)
472  throw ReaderClosed("The Clip is closed. Call Open() before calling this method.");
473 
474  if (reader)
475  {
476  // Get frame object
477  std::shared_ptr<Frame> frame = NULL;
478 
479  // Check cache
480  frame = final_cache.GetFrame(clip_frame_number);
481  if (!frame) {
482  // Generate clip frame
483  frame = GetOrCreateFrame(clip_frame_number);
484 
485  // Get frame size and frame #
486  int64_t timeline_frame_number = clip_frame_number;
487  QSize timeline_size(frame->GetWidth(), frame->GetHeight());
488  if (background_frame) {
489  // If a background frame is provided, use it instead
490  timeline_frame_number = background_frame->number;
491  timeline_size.setWidth(background_frame->GetWidth());
492  timeline_size.setHeight(background_frame->GetHeight());
493  }
494 
495  // Get time mapped frame object (used to increase speed, change direction, etc...)
496  apply_timemapping(frame);
497 
498  // Apply waveform image (if any)
499  apply_waveform(frame, timeline_size);
500 
501  // Apply effects BEFORE applying keyframes (if any local or global effects are used)
502  apply_effects(frame, timeline_frame_number, options, true);
503 
504  // Apply keyframe / transforms to current clip image
505  apply_keyframes(frame, timeline_size);
506 
507  // Apply effects AFTER applying keyframes (if any local or global effects are used)
508  apply_effects(frame, timeline_frame_number, options, false);
509 
510  // Add final frame to cache (before flattening into background_frame)
511  final_cache.Add(frame);
512  }
513 
514  if (!background_frame) {
515  // Create missing background_frame w/ transparent color (if needed)
516  background_frame = std::make_shared<Frame>(frame->number, frame->GetWidth(), frame->GetHeight(),
517  "#00000000", frame->GetAudioSamplesCount(),
518  frame->GetAudioChannelsCount());
519  }
520 
521  // Apply background canvas (i.e. flatten this image onto previous layer image)
522  apply_background(frame, background_frame);
523 
524  // Return processed 'frame'
525  return frame;
526  }
527  else
528  // Throw error if reader not initialized
529  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
530 }
531 
532 // Look up an effect by ID
533 openshot::EffectBase* Clip::GetEffect(const std::string& id)
534 {
535  // Find the matching effect (if any)
536  for (const auto& effect : effects) {
537  if (effect->Id() == id) {
538  return effect;
539  }
540  }
541  return nullptr;
542 }
543 
544 // Return the associated ParentClip (if any)
546  if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
547  // Attach parent clip OR object to this clip
548  AttachToObject(parentObjectId);
549  }
550  return parentClipObject;
551 }
552 
553 // Return the associated Parent Tracked Object (if any)
554 std::shared_ptr<openshot::TrackedObjectBase> Clip::GetParentTrackedObject() {
555  if (!parentObjectId.empty() && (!parentClipObject && !parentTrackedObject)) {
556  // Attach parent clip OR object to this clip
557  AttachToObject(parentObjectId);
558  }
559  return parentTrackedObject;
560 }
561 
562 // Get file extension
563 std::string Clip::get_file_extension(std::string path)
564 {
565  // Return last part of path safely (handle filenames without a dot)
566  const auto dot_pos = path.find_last_of('.');
567  if (dot_pos == std::string::npos || dot_pos + 1 >= path.size()) {
568  return std::string();
569  }
570 
571  return path.substr(dot_pos + 1);
572 }
573 
574 // Adjust the audio and image of a time mapped frame
575 void Clip::apply_timemapping(std::shared_ptr<Frame> frame)
576 {
577  // Check for valid reader
578  if (!reader)
579  // Throw error if reader not initialized
580  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
581 
582  // Check for a valid time map curve
583  if (time.GetLength() > 1)
584  {
585  const std::lock_guard<std::recursive_mutex> lock(getFrameMutex);
586 
587  int64_t clip_frame_number = frame->number;
588  int64_t new_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
589 
590  // create buffer
591  juce::AudioBuffer<float> *source_samples = nullptr;
592 
593  // Get delta (difference from this frame to the next time mapped frame: Y value)
594  double delta = time.GetDelta(clip_frame_number + 1);
595  const bool prev_is_increasing = time.IsIncreasing(clip_frame_number);
596  const bool is_increasing = time.IsIncreasing(clip_frame_number + 1);
597 
598  // Determine length of source audio (in samples)
599  // A delta of 1.0 == normal expected samples
600  // A delta of 0.5 == 50% of normal expected samples
601  // A delta of 2.0 == 200% of normal expected samples
602  int target_sample_count = Frame::GetSamplesPerFrame(adjust_timeline_framenumber(clip_frame_number), Reader()->info.fps,
604  Reader()->info.channels);
605  int source_sample_count = round(target_sample_count * fabs(delta));
606 
607  // Determine starting audio location
608  AudioLocation location;
609  if (previous_location.frame == 0 || abs(new_frame_number - previous_location.frame) > 2 || prev_is_increasing != is_increasing) {
610  // No previous location OR gap detected
611  location.frame = new_frame_number;
612  location.sample_start = 0;
613 
614  // Create / Reset resampler
615  // We don't want to interpolate between unrelated audio data
616  if (resampler) {
617  delete resampler;
618  resampler = nullptr;
619  }
620  // Init resampler with # channels from Reader (should match the timeline)
621  resampler = new AudioResampler(Reader()->info.channels);
622 
623  // Allocate buffer of silence to initialize some data inside the resampler
624  // To prevent it from becoming input limited
625  juce::AudioBuffer<float> init_samples(Reader()->info.channels, 64);
626  init_samples.clear();
627  resampler->SetBuffer(&init_samples, 1.0);
628  resampler->GetResampledBuffer();
629 
630  } else {
631  // Use previous location
632  location = previous_location;
633  }
634 
635  if (source_sample_count <= 0) {
636  // Add silence and bail (we don't need any samples)
637  frame->AddAudioSilence(target_sample_count);
638  return;
639  }
640 
641  // Allocate a new sample buffer for these delta frames
642  source_samples = new juce::AudioBuffer<float>(Reader()->info.channels, source_sample_count);
643  source_samples->clear();
644 
645  // Determine ending audio location
646  int remaining_samples = source_sample_count;
647  int source_pos = 0;
648  while (remaining_samples > 0) {
649  std::shared_ptr<Frame> source_frame = GetOrCreateFrame(location.frame, false);
650  int frame_sample_count = source_frame->GetAudioSamplesCount() - location.sample_start;
651 
652  // Inform FrameMapper of the direction for THIS mapper frame
653  if (auto *fm = dynamic_cast<FrameMapper*>(reader)) {
654  fm->SetDirectionHint(is_increasing);
655  }
656  source_frame->SetAudioDirection(is_increasing);
657 
658  if (frame_sample_count == 0) {
659  // No samples found in source frame (fill with silence)
660  if (is_increasing) {
661  location.frame++;
662  } else {
663  location.frame--;
664  }
665  location.sample_start = 0;
666  break;
667  }
668  if (remaining_samples - frame_sample_count >= 0) {
669  // Use all frame samples & increment location
670  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
671  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, frame_sample_count, 1.0f);
672  }
673  if (is_increasing) {
674  location.frame++;
675  } else {
676  location.frame--;
677  }
678  location.sample_start = 0;
679  remaining_samples -= frame_sample_count;
680  source_pos += frame_sample_count;
681 
682  } else {
683  // Use just what is needed (and reverse samples)
684  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++) {
685  source_samples->addFrom(channel, source_pos, source_frame->GetAudioSamples(channel) + location.sample_start, remaining_samples, 1.0f);
686  }
687  location.sample_start += remaining_samples;
688  remaining_samples = 0;
689  source_pos += remaining_samples;
690  }
691 
692  }
693 
694  // Resize audio for current frame object + fill with silence
695  // We are fixing to clobber this with actual audio data (possibly resampled)
696  frame->AddAudioSilence(target_sample_count);
697 
698  if (source_sample_count != target_sample_count) {
699  // Resample audio (if needed)
700  double resample_ratio = double(source_sample_count) / double(target_sample_count);
701  resampler->SetBuffer(source_samples, resample_ratio);
702 
703  // Resample the data
704  juce::AudioBuffer<float> *resampled_buffer = resampler->GetResampledBuffer();
705 
706  // Fill the frame with resampled data
707  for (int channel = 0; channel < Reader()->info.channels; channel++) {
708  // Add new (slower) samples, to the frame object
709  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, 0), std::min(resampled_buffer->getNumSamples(), target_sample_count), 1.0f);
710  }
711  } else {
712  // Fill the frame
713  for (int channel = 0; channel < Reader()->info.channels; channel++) {
714  // Add new (slower) samples, to the frame object
715  frame->AddAudio(true, channel, 0, source_samples->getReadPointer(channel, 0), target_sample_count, 1.0f);
716  }
717  }
718 
719  // Clean up
720  delete source_samples;
721 
722  // Set previous location
723  previous_location = location;
724  }
725 }
726 
727 // Adjust frame number minimum value
728 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
729 {
730  // Never return a frame number 0 or below
731  if (frame_number < 1)
732  return 1;
733  else
734  return frame_number;
735 
736 }
737 
738 // Get or generate a blank frame
739 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number, bool enable_time)
740 {
741  try {
742  // Init to requested frame
743  int64_t clip_frame_number = adjust_frame_number_minimum(number);
744  bool is_increasing = true;
745 
746  // Adjust for time-mapping (if any)
747  if (enable_time && time.GetLength() > 1) {
748  is_increasing = time.IsIncreasing(clip_frame_number + 1);
749  const int64_t time_frame_number = adjust_frame_number_minimum(time.GetLong(clip_frame_number));
750  if (auto *fm = dynamic_cast<FrameMapper*>(reader)) {
751  // Inform FrameMapper which direction this mapper frame is being requested
752  fm->SetDirectionHint(is_increasing);
753  }
754  clip_frame_number = time_frame_number;
755  }
756 
757  // Debug output
759  "Clip::GetOrCreateFrame (from reader)",
760  "number", number, "clip_frame_number", clip_frame_number);
761 
762  // Attempt to get a frame (but this could fail if a reader has just been closed)
763  auto reader_frame = reader->GetFrame(clip_frame_number);
764  if (reader_frame) {
765  // Override frame # (due to time-mapping might change it)
766  reader_frame->number = number;
767  reader_frame->SetAudioDirection(is_increasing);
768 
769  // Return real frame
770  // Create a new copy of reader frame
771  // This allows a clip to modify the pixels and audio of this frame without
772  // changing the underlying reader's frame data
773  auto reader_copy = std::make_shared<Frame>(*reader_frame.get());
774  if (has_video.GetInt(number) == 0) {
775  // No video, so add transparent pixels
776  reader_copy->AddColor(QColor(Qt::transparent));
777  }
778  if (has_audio.GetInt(number) == 0 || number > reader->info.video_length) {
779  // No audio, so include silence (also, mute audio if past end of reader)
780  reader_copy->AddAudioSilence(reader_copy->GetAudioSamplesCount());
781  }
782  return reader_copy;
783  }
784 
785  } catch (const ReaderClosed & e) {
786  // ...
787  } catch (const OutOfBoundsFrame & e) {
788  // ...
789  }
790 
791  // Estimate # of samples needed for this frame
792  int estimated_samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
793 
794  // Debug output
796  "Clip::GetOrCreateFrame (create blank)",
797  "number", number,
798  "estimated_samples_in_frame", estimated_samples_in_frame);
799 
800  // Create blank frame
801  auto new_frame = std::make_shared<Frame>(
802  number, reader->info.width, reader->info.height,
803  "#000000", estimated_samples_in_frame, reader->info.channels);
804  new_frame->SampleRate(reader->info.sample_rate);
805  new_frame->ChannelsLayout(reader->info.channel_layout);
806  new_frame->AddAudioSilence(estimated_samples_in_frame);
807  return new_frame;
808 }
809 
810 // Generate JSON string of this object
811 std::string Clip::Json() const {
812 
813  // Return formatted string
814  return JsonValue().toStyledString();
815 }
816 
817 // Get all properties for a specific frame
818 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
819 
820  // Generate JSON properties list
821  Json::Value root;
822  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
823  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
824  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
825  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
826  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
827  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
828  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
829  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
830  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
831  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
832  root["composite"] = add_property_json("Composite", composite, "int", "", NULL, 0, composite_choices_count - 1, false, requested_frame);
833  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
834  root["parentObjectId"] = add_property_json("Parent", 0.0, "string", parentObjectId, NULL, -1, -1, false, requested_frame);
835 
836  // Add gravity choices (dropdown style)
837  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
838  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
839  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
840  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
841  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
842  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
843  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
844  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
845  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
846 
847  // Add scale choices (dropdown style)
848  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
849  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
850  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
851  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
852 
853  // Add frame number display choices (dropdown style)
854  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
855  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
856  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
857  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
858 
859  // Add volume mixing choices (dropdown style)
860  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
861  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
862  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
863 
864  // Add composite choices (dropdown style)
865  for (int i = 0; i < composite_choices_count; ++i)
866  root["composite"]["choices"].append(add_property_choice_json(composite_choices[i].name, composite_choices[i].value, composite));
867 
868  // Add waveform choices (dropdown style)
869  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
870  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
871 
872  // Add the parentClipObject's properties
873  if (parentClipObject)
874  {
875  // Convert Clip's frame position to Timeline's frame position
876  long clip_start_position = round(Position() * info.fps.ToDouble()) + 1;
877  long clip_start_frame = (Start() * info.fps.ToDouble()) + 1;
878  double timeline_frame_number = requested_frame + clip_start_position - clip_start_frame;
879 
880  // Correct the parent Clip Object properties by the clip's reference system
881  float parentObject_location_x = parentClipObject->location_x.GetValue(timeline_frame_number);
882  float parentObject_location_y = parentClipObject->location_y.GetValue(timeline_frame_number);
883  float parentObject_scale_x = parentClipObject->scale_x.GetValue(timeline_frame_number);
884  float parentObject_scale_y = parentClipObject->scale_y.GetValue(timeline_frame_number);
885  float parentObject_shear_x = parentClipObject->shear_x.GetValue(timeline_frame_number);
886  float parentObject_shear_y = parentClipObject->shear_y.GetValue(timeline_frame_number);
887  float parentObject_rotation = parentClipObject->rotation.GetValue(timeline_frame_number);
888 
889  // Add the parent Clip Object properties to JSON
890  root["location_x"] = add_property_json("Location X", parentObject_location_x, "float", "", &location_x, -1.0, 1.0, false, requested_frame);
891  root["location_y"] = add_property_json("Location Y", parentObject_location_y, "float", "", &location_y, -1.0, 1.0, false, requested_frame);
892  root["scale_x"] = add_property_json("Scale X", parentObject_scale_x, "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
893  root["scale_y"] = add_property_json("Scale Y", parentObject_scale_y, "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
894  root["rotation"] = add_property_json("Rotation", parentObject_rotation, "float", "", &rotation, -360, 360, false, requested_frame);
895  root["shear_x"] = add_property_json("Shear X", parentObject_shear_x, "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
896  root["shear_y"] = add_property_json("Shear Y", parentObject_shear_y, "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
897  }
898  else
899  {
900  // Add this own clip's properties to JSON
901  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
902  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
903  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
904  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
905  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
906  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
907  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
908  }
909 
910  // Keyframes
911  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
912  root["origin_x"] = add_property_json("Origin X", origin_x.GetValue(requested_frame), "float", "", &origin_x, 0.0, 1.0, false, requested_frame);
913  root["origin_y"] = add_property_json("Origin Y", origin_y.GetValue(requested_frame), "float", "", &origin_y, 0.0, 1.0, false, requested_frame);
914  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
915  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
916  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
917  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
918  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
919  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
920 
921  // Add enable audio/video choices (dropdown style)
922  root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
923  root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
924  root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
925  root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
926  root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
927  root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
928 
929  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
930  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
931  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
932  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
933  root["wave_color"]["alpha"] = add_property_json("Alpha", wave_color.alpha.GetValue(requested_frame), "float", "", &wave_color.alpha, 0, 255, false, requested_frame);
934 
935  // Return formatted string
936  return root.toStyledString();
937 }
938 
939 // Generate Json::Value for this object
940 Json::Value Clip::JsonValue() const {
941 
942  // Create root json object
943  Json::Value root = ClipBase::JsonValue(); // get parent properties
944  root["parentObjectId"] = parentObjectId;
945  root["gravity"] = gravity;
946  root["scale"] = scale;
947  root["anchor"] = anchor;
948  root["display"] = display;
949  root["mixing"] = mixing;
950  root["composite"] = composite;
951  root["waveform"] = waveform;
952  root["scale_x"] = scale_x.JsonValue();
953  root["scale_y"] = scale_y.JsonValue();
954  root["location_x"] = location_x.JsonValue();
955  root["location_y"] = location_y.JsonValue();
956  root["alpha"] = alpha.JsonValue();
957  root["rotation"] = rotation.JsonValue();
958  root["time"] = time.JsonValue();
959  root["volume"] = volume.JsonValue();
960  root["wave_color"] = wave_color.JsonValue();
961  root["shear_x"] = shear_x.JsonValue();
962  root["shear_y"] = shear_y.JsonValue();
963  root["origin_x"] = origin_x.JsonValue();
964  root["origin_y"] = origin_y.JsonValue();
965  root["channel_filter"] = channel_filter.JsonValue();
966  root["channel_mapping"] = channel_mapping.JsonValue();
967  root["has_audio"] = has_audio.JsonValue();
968  root["has_video"] = has_video.JsonValue();
969  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
970  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
971  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
972  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
973  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
974  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
975  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
976  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
977 
978  // Add array of effects
979  root["effects"] = Json::Value(Json::arrayValue);
980 
981  // loop through effects
982  for (auto existing_effect : effects)
983  {
984  root["effects"].append(existing_effect->JsonValue());
985  }
986 
987  if (reader)
988  root["reader"] = reader->JsonValue();
989  else
990  root["reader"] = Json::Value(Json::objectValue);
991 
992  // return JsonValue
993  return root;
994 }
995 
996 // Load JSON string into this object
997 void Clip::SetJson(const std::string value) {
998 
999  // Parse JSON string into JSON objects
1000  try
1001  {
1002  const Json::Value root = openshot::stringToJson(value);
1003  // Set all values that match
1004  SetJsonValue(root);
1005  }
1006  catch (const std::exception& e)
1007  {
1008  // Error parsing JSON (or missing keys)
1009  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
1010  }
1011 }
1012 
1013 // Load Json::Value into this object
1014 void Clip::SetJsonValue(const Json::Value root) {
1015 
1016  // Set parent data
1017  ClipBase::SetJsonValue(root);
1018 
1019  // Set data from Json (if key is found)
1020  if (!root["parentObjectId"].isNull()){
1021  parentObjectId = root["parentObjectId"].asString();
1022  if (parentObjectId.size() > 0 && parentObjectId != ""){
1023  AttachToObject(parentObjectId);
1024  } else{
1025  parentTrackedObject = nullptr;
1026  parentClipObject = NULL;
1027  }
1028  }
1029  if (!root["gravity"].isNull())
1030  gravity = (GravityType) root["gravity"].asInt();
1031  if (!root["scale"].isNull())
1032  scale = (ScaleType) root["scale"].asInt();
1033  if (!root["anchor"].isNull())
1034  anchor = (AnchorType) root["anchor"].asInt();
1035  if (!root["display"].isNull())
1036  display = (FrameDisplayType) root["display"].asInt();
1037  if (!root["mixing"].isNull())
1038  mixing = (VolumeMixType) root["mixing"].asInt();
1039  if (!root["composite"].isNull())
1040  composite = (CompositeType) root["composite"].asInt();
1041  if (!root["waveform"].isNull())
1042  waveform = root["waveform"].asBool();
1043  if (!root["scale_x"].isNull())
1044  scale_x.SetJsonValue(root["scale_x"]);
1045  if (!root["scale_y"].isNull())
1046  scale_y.SetJsonValue(root["scale_y"]);
1047  if (!root["location_x"].isNull())
1048  location_x.SetJsonValue(root["location_x"]);
1049  if (!root["location_y"].isNull())
1050  location_y.SetJsonValue(root["location_y"]);
1051  if (!root["alpha"].isNull())
1052  alpha.SetJsonValue(root["alpha"]);
1053  if (!root["rotation"].isNull())
1054  rotation.SetJsonValue(root["rotation"]);
1055  if (!root["time"].isNull())
1056  time.SetJsonValue(root["time"]);
1057  if (!root["volume"].isNull())
1058  volume.SetJsonValue(root["volume"]);
1059  if (!root["wave_color"].isNull())
1060  wave_color.SetJsonValue(root["wave_color"]);
1061  if (!root["shear_x"].isNull())
1062  shear_x.SetJsonValue(root["shear_x"]);
1063  if (!root["shear_y"].isNull())
1064  shear_y.SetJsonValue(root["shear_y"]);
1065  if (!root["origin_x"].isNull())
1066  origin_x.SetJsonValue(root["origin_x"]);
1067  if (!root["origin_y"].isNull())
1068  origin_y.SetJsonValue(root["origin_y"]);
1069  if (!root["channel_filter"].isNull())
1070  channel_filter.SetJsonValue(root["channel_filter"]);
1071  if (!root["channel_mapping"].isNull())
1072  channel_mapping.SetJsonValue(root["channel_mapping"]);
1073  if (!root["has_audio"].isNull())
1074  has_audio.SetJsonValue(root["has_audio"]);
1075  if (!root["has_video"].isNull())
1076  has_video.SetJsonValue(root["has_video"]);
1077  if (!root["perspective_c1_x"].isNull())
1078  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
1079  if (!root["perspective_c1_y"].isNull())
1080  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
1081  if (!root["perspective_c2_x"].isNull())
1082  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
1083  if (!root["perspective_c2_y"].isNull())
1084  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
1085  if (!root["perspective_c3_x"].isNull())
1086  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
1087  if (!root["perspective_c3_y"].isNull())
1088  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
1089  if (!root["perspective_c4_x"].isNull())
1090  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
1091  if (!root["perspective_c4_y"].isNull())
1092  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
1093  if (!root["effects"].isNull()) {
1094 
1095  // Clear existing effects
1096  effects.clear();
1097 
1098  // loop through effects
1099  for (const auto existing_effect : root["effects"]) {
1100  // Skip NULL nodes
1101  if (existing_effect.isNull()) {
1102  continue;
1103  }
1104 
1105  // Create Effect
1106  EffectBase *e = NULL;
1107  if (!existing_effect["type"].isNull()) {
1108 
1109  // Create instance of effect
1110  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString()))) {
1111 
1112  // Load Json into Effect
1113  e->SetJsonValue(existing_effect);
1114 
1115  // Add Effect to Timeline
1116  AddEffect(e);
1117  }
1118  }
1119  }
1120  }
1121  if (!root["reader"].isNull()) // does Json contain a reader?
1122  {
1123  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
1124  {
1125  // Close previous reader (if any)
1126  bool already_open = false;
1127  if (reader)
1128  {
1129  // Track if reader was open
1130  already_open = reader->IsOpen();
1131 
1132  // Close and delete existing allocated reader (if any)
1133  Reader(NULL);
1134  }
1135 
1136  // Create new reader (and load properties)
1137  std::string type = root["reader"]["type"].asString();
1138 
1139  if (type == "FFmpegReader") {
1140 
1141  // Create new reader
1142  reader = new openshot::FFmpegReader(root["reader"]["path"].asString(), false);
1143  reader->SetJsonValue(root["reader"]);
1144 
1145  } else if (type == "QtImageReader") {
1146 
1147  // Create new reader
1148  reader = new openshot::QtImageReader(root["reader"]["path"].asString(), false);
1149  reader->SetJsonValue(root["reader"]);
1150 
1151 #ifdef USE_IMAGEMAGICK
1152  } else if (type == "ImageReader") {
1153 
1154  // Create new reader
1155  reader = new ImageReader(root["reader"]["path"].asString(), false);
1156  reader->SetJsonValue(root["reader"]);
1157 
1158  } else if (type == "TextReader") {
1159 
1160  // Create new reader
1161  reader = new TextReader();
1162  reader->SetJsonValue(root["reader"]);
1163 #endif
1164 
1165  } else if (type == "ChunkReader") {
1166 
1167  // Create new reader
1168  reader = new openshot::ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
1169  reader->SetJsonValue(root["reader"]);
1170 
1171  } else if (type == "DummyReader") {
1172 
1173  // Create new reader
1174  reader = new openshot::DummyReader();
1175  reader->SetJsonValue(root["reader"]);
1176 
1177  } else if (type == "Timeline") {
1178 
1179  // Create new reader (always load from file again)
1180  // This prevents FrameMappers from being loaded on accident
1181  reader = new openshot::Timeline(root["reader"]["path"].asString(), true);
1182  }
1183 
1184  // mark as managed reader and set parent
1185  if (reader) {
1186  reader->ParentClip(this);
1187  allocated_reader = reader;
1188  }
1189 
1190  // Re-Open reader (if needed)
1191  if (already_open) {
1192  reader->Open();
1193  }
1194  }
1195  }
1196 
1197  // Clear cache (it might have changed)
1198  final_cache.Clear();
1199 }
1200 
1201 // Sort effects by order
1202 void Clip::sort_effects()
1203 {
1204  // sort clips
1205  effects.sort(CompareClipEffects());
1206 }
1207 
1208 // Add an effect to the clip
1210 {
1211  // Set parent clip pointer
1212  effect->ParentClip(this);
1213 
1214  // Add effect to list
1215  effects.push_back(effect);
1216 
1217  // Sort effects
1218  sort_effects();
1219 
1220  // Get the parent timeline of this clip
1221  Timeline* parentTimeline = static_cast<Timeline *>(ParentTimeline());
1222 
1223  if (parentTimeline)
1224  effect->ParentTimeline(parentTimeline);
1225 
1226  #ifdef USE_OPENCV
1227  // Add Tracked Object to Timeline
1228  if (effect->info.has_tracked_object){
1229 
1230  // Check if this clip has a parent timeline
1231  if (parentTimeline){
1232 
1233  effect->ParentTimeline(parentTimeline);
1234 
1235  // Iterate through effect's vector of Tracked Objects
1236  for (auto const& trackedObject : effect->trackedObjects){
1237 
1238  // Cast the Tracked Object as TrackedObjectBBox
1239  std::shared_ptr<TrackedObjectBBox> trackedObjectBBox = std::static_pointer_cast<TrackedObjectBBox>(trackedObject.second);
1240 
1241  // Set the Tracked Object's parent clip to this
1242  trackedObjectBBox->ParentClip(this);
1243 
1244  // Add the Tracked Object to the timeline
1245  parentTimeline->AddTrackedObject(trackedObjectBBox);
1246  }
1247  }
1248  }
1249  #endif
1250 
1251  // Clear cache (it might have changed)
1252  final_cache.Clear();
1253 }
1254 
1255 // Remove an effect from the clip
1257 {
1258  effects.remove(effect);
1259 
1260  // Clear cache (it might have changed)
1261  final_cache.Clear();
1262 }
1263 
1264 // Apply background image to the current clip image (i.e. flatten this image onto previous layer)
1265 void Clip::apply_background(std::shared_ptr<openshot::Frame> frame, std::shared_ptr<openshot::Frame> background_frame) {
1266  // Add background canvas
1267  std::shared_ptr<QImage> background_canvas = background_frame->GetImage();
1268  QPainter painter(background_canvas.get());
1269 
1270  // Composite a new layer onto the image
1271  painter.setCompositionMode(static_cast<QPainter::CompositionMode>(composite));
1272  painter.drawImage(0, 0, *frame->GetImage());
1273  painter.end();
1274 
1275  // Add new QImage to frame
1276  frame->AddImage(background_canvas);
1277 }
1278 
1279 // Apply effects to the source frame (if any)
1280 void Clip::apply_effects(std::shared_ptr<Frame> frame, int64_t timeline_frame_number, TimelineInfoStruct* options, bool before_keyframes)
1281 {
1282  for (auto effect : effects)
1283  {
1284  // Apply the effect to this frame
1285  if (effect->info.apply_before_clip && before_keyframes) {
1286  effect->GetFrame(frame, frame->number);
1287  } else if (!effect->info.apply_before_clip && !before_keyframes) {
1288  effect->GetFrame(frame, frame->number);
1289  }
1290  }
1291 
1292  if (timeline != NULL && options != NULL) {
1293  // Apply global timeline effects (i.e. transitions & masks... if any)
1294  Timeline* timeline_instance = static_cast<Timeline*>(timeline);
1295  options->is_before_clip_keyframes = before_keyframes;
1296  timeline_instance->apply_effects(frame, timeline_frame_number, Layer(), options);
1297  }
1298 }
1299 
1300 // Compare 2 floating point numbers for equality
1301 bool Clip::isNear(double a, double b)
1302 {
1303  return fabs(a - b) < 0.000001;
1304 }
1305 
1306 // Apply keyframes to the source frame (if any)
1307 void Clip::apply_keyframes(std::shared_ptr<Frame> frame, QSize timeline_size) {
1308  // Skip out if video was disabled or only an audio frame (no visualisation in use)
1309  if (!frame->has_image_data) {
1310  // Skip the rest of the image processing for performance reasons
1311  return;
1312  }
1313 
1314  // Get image from clip, and create transparent background image
1315  std::shared_ptr<QImage> source_image = frame->GetImage();
1316  std::shared_ptr<QImage> background_canvas = std::make_shared<QImage>(timeline_size.width(),
1317  timeline_size.height(),
1318  QImage::Format_RGBA8888_Premultiplied);
1319  background_canvas->fill(QColor(Qt::transparent));
1320 
1321  // Get transform from clip's keyframes
1322  QTransform transform = get_transform(frame, background_canvas->width(), background_canvas->height());
1323 
1324  // Load timeline's new frame image into a QPainter
1325  QPainter painter(background_canvas.get());
1326  painter.setRenderHint(QPainter::TextAntialiasing, true);
1327  if (!transform.isIdentity()) {
1328  painter.setRenderHint(QPainter::SmoothPixmapTransform, true);
1329  }
1330  // Apply transform (translate, rotate, scale)
1331  painter.setTransform(transform);
1332 
1333  // Composite a new layer onto the image
1334  painter.setCompositionMode(static_cast<QPainter::CompositionMode>(composite));
1335 
1336  // Apply opacity via painter instead of per-pixel alpha manipulation
1337  const float alpha_value = alpha.GetValue(frame->number);
1338  if (alpha_value != 1.0f) {
1339  painter.setOpacity(alpha_value);
1340  painter.drawImage(0, 0, *source_image);
1341  // Reset so any subsequent drawing (e.g., overlays) isn’t faded
1342  painter.setOpacity(1.0);
1343  } else {
1344  painter.drawImage(0, 0, *source_image);
1345  }
1346 
1347  if (timeline) {
1348  Timeline *t = static_cast<Timeline *>(timeline);
1349 
1350  // Draw frame #'s on top of image (if needed)
1351  if (display != FRAME_DISPLAY_NONE) {
1352  std::stringstream frame_number_str;
1353  switch (display) {
1354  case (FRAME_DISPLAY_NONE):
1355  // This is only here to prevent unused-enum warnings
1356  break;
1357 
1358  case (FRAME_DISPLAY_CLIP):
1359  frame_number_str << frame->number;
1360  break;
1361 
1362  case (FRAME_DISPLAY_TIMELINE):
1363  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number;
1364  break;
1365 
1366  case (FRAME_DISPLAY_BOTH):
1367  frame_number_str << round((Position() - Start()) * t->info.fps.ToFloat()) + frame->number << " (" << frame->number << ")";
1368  break;
1369  }
1370 
1371  // Draw frame number on top of image
1372  painter.setPen(QColor("#ffffff"));
1373  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
1374  }
1375  }
1376  painter.end();
1377 
1378  // Add new QImage to frame
1379  frame->AddImage(background_canvas);
1380 }
1381 
1382 // Apply apply_waveform image to the source frame (if any)
1383 void Clip::apply_waveform(std::shared_ptr<Frame> frame, QSize timeline_size) {
1384 
1385  if (!Waveform()) {
1386  // Exit if no waveform is needed
1387  return;
1388  }
1389 
1390  // Get image from clip
1391  std::shared_ptr<QImage> source_image = frame->GetImage();
1392 
1393  // Debug output
1394  ZmqLogger::Instance()->AppendDebugMethod("Clip::apply_waveform (Generate Waveform Image)",
1395  "frame->number", frame->number,
1396  "Waveform()", Waveform(),
1397  "width", timeline_size.width(),
1398  "height", timeline_size.height());
1399 
1400  // Get the color of the waveform
1401  int red = wave_color.red.GetInt(frame->number);
1402  int green = wave_color.green.GetInt(frame->number);
1403  int blue = wave_color.blue.GetInt(frame->number);
1404  int alpha = wave_color.alpha.GetInt(frame->number);
1405 
1406  // Generate Waveform Dynamically (the size of the timeline)
1407  source_image = frame->GetWaveform(timeline_size.width(), timeline_size.height(), red, green, blue, alpha);
1408  frame->AddImage(source_image);
1409 }
1410 
1411 // Scale a source size to a target size (given a specific scale-type)
1412 QSize Clip::scale_size(QSize source_size, ScaleType source_scale, int target_width, int target_height) {
1413  switch (source_scale)
1414  {
1415  case (SCALE_FIT): {
1416  source_size.scale(target_width, target_height, Qt::KeepAspectRatio);
1417  break;
1418  }
1419  case (SCALE_STRETCH): {
1420  source_size.scale(target_width, target_height, Qt::IgnoreAspectRatio);
1421  break;
1422  }
1423  case (SCALE_CROP): {
1424  source_size.scale(target_width, target_height, Qt::KeepAspectRatioByExpanding);;
1425  break;
1426  }
1427  }
1428 
1429  return source_size;
1430 }
1431 
1432 // Get QTransform from keyframes
1433 QTransform Clip::get_transform(std::shared_ptr<Frame> frame, int width, int height)
1434 {
1435  // Get image from clip
1436  std::shared_ptr<QImage> source_image = frame->GetImage();
1437 
1438  /* RESIZE SOURCE IMAGE - based on scale type */
1439  QSize source_size = scale_size(source_image->size(), scale, width, height);
1440 
1441  // Initialize parent object's properties (Clip or Tracked Object)
1442  float parentObject_location_x = 0.0;
1443  float parentObject_location_y = 0.0;
1444  float parentObject_scale_x = 1.0;
1445  float parentObject_scale_y = 1.0;
1446  float parentObject_shear_x = 0.0;
1447  float parentObject_shear_y = 0.0;
1448  float parentObject_rotation = 0.0;
1449 
1450  // Get the parentClipObject properties
1451  if (GetParentClip()){
1452  // Get the start trim position of the parent clip
1453  long parent_start_offset = parentClipObject->Start() * info.fps.ToDouble();
1454  long parent_frame_number = frame->number + parent_start_offset;
1455 
1456  // Get parent object's properties (Clip)
1457  parentObject_location_x = parentClipObject->location_x.GetValue(parent_frame_number);
1458  parentObject_location_y = parentClipObject->location_y.GetValue(parent_frame_number);
1459  parentObject_scale_x = parentClipObject->scale_x.GetValue(parent_frame_number);
1460  parentObject_scale_y = parentClipObject->scale_y.GetValue(parent_frame_number);
1461  parentObject_shear_x = parentClipObject->shear_x.GetValue(parent_frame_number);
1462  parentObject_shear_y = parentClipObject->shear_y.GetValue(parent_frame_number);
1463  parentObject_rotation = parentClipObject->rotation.GetValue(parent_frame_number);
1464  }
1465 
1466  // Get the parentTrackedObject properties
1467  if (GetParentTrackedObject()){
1468  // Get the attached object's parent clip's properties
1469  Clip* parentClip = (Clip*) parentTrackedObject->ParentClip();
1470  if (parentClip)
1471  {
1472  // Get the start trim position of the parent clip
1473  long parent_start_offset = parentClip->Start() * info.fps.ToDouble();
1474  long parent_frame_number = frame->number + parent_start_offset;
1475 
1476  // Access the parentTrackedObject's properties
1477  std::map<std::string, float> trackedObjectProperties = parentTrackedObject->GetBoxValues(parent_frame_number);
1478 
1479  // Get actual scaled parent size
1480  QSize parent_size = scale_size(QSize(parentClip->info.width, parentClip->info.height),
1481  parentClip->scale, width, height);
1482 
1483  // Get actual scaled tracked object size
1484  int trackedWidth = trackedObjectProperties["w"] * trackedObjectProperties["sx"] * parent_size.width() *
1485  parentClip->scale_x.GetValue(parent_frame_number);
1486  int trackedHeight = trackedObjectProperties["h"] * trackedObjectProperties["sy"] * parent_size.height() *
1487  parentClip->scale_y.GetValue(parent_frame_number);
1488 
1489  // Scale the clip source_size based on the actual tracked object size
1490  source_size = scale_size(source_size, scale, trackedWidth, trackedHeight);
1491 
1492  // Update parentObject's properties based on the tracked object's properties and parent clip's scale
1493  parentObject_location_x = parentClip->location_x.GetValue(parent_frame_number) + ((trackedObjectProperties["cx"] - 0.5) * parentClip->scale_x.GetValue(parent_frame_number));
1494  parentObject_location_y = parentClip->location_y.GetValue(parent_frame_number) + ((trackedObjectProperties["cy"] - 0.5) * parentClip->scale_y.GetValue(parent_frame_number));
1495  parentObject_rotation = trackedObjectProperties["r"] + parentClip->rotation.GetValue(parent_frame_number);
1496  }
1497  }
1498 
1499  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
1500  float x = 0.0; // left
1501  float y = 0.0; // top
1502 
1503  // Adjust size for scale x and scale y
1504  float sx = scale_x.GetValue(frame->number); // percentage X scale
1505  float sy = scale_y.GetValue(frame->number); // percentage Y scale
1506 
1507  // Change clip's scale to parentObject's scale
1508  if(parentObject_scale_x != 0.0 && parentObject_scale_y != 0.0){
1509  sx*= parentObject_scale_x;
1510  sy*= parentObject_scale_y;
1511  }
1512 
1513  float scaled_source_width = source_size.width() * sx;
1514  float scaled_source_height = source_size.height() * sy;
1515 
1516  switch (gravity)
1517  {
1518  case (GRAVITY_TOP_LEFT):
1519  // This is only here to prevent unused-enum warnings
1520  break;
1521  case (GRAVITY_TOP):
1522  x = (width - scaled_source_width) / 2.0; // center
1523  break;
1524  case (GRAVITY_TOP_RIGHT):
1525  x = width - scaled_source_width; // right
1526  break;
1527  case (GRAVITY_LEFT):
1528  y = (height - scaled_source_height) / 2.0; // center
1529  break;
1530  case (GRAVITY_CENTER):
1531  x = (width - scaled_source_width) / 2.0; // center
1532  y = (height - scaled_source_height) / 2.0; // center
1533  break;
1534  case (GRAVITY_RIGHT):
1535  x = width - scaled_source_width; // right
1536  y = (height - scaled_source_height) / 2.0; // center
1537  break;
1538  case (GRAVITY_BOTTOM_LEFT):
1539  y = (height - scaled_source_height); // bottom
1540  break;
1541  case (GRAVITY_BOTTOM):
1542  x = (width - scaled_source_width) / 2.0; // center
1543  y = (height - scaled_source_height); // bottom
1544  break;
1545  case (GRAVITY_BOTTOM_RIGHT):
1546  x = width - scaled_source_width; // right
1547  y = (height - scaled_source_height); // bottom
1548  break;
1549  }
1550 
1551  // Debug output
1553  "Clip::get_transform (Gravity)",
1554  "frame->number", frame->number,
1555  "source_clip->gravity", gravity,
1556  "scaled_source_width", scaled_source_width,
1557  "scaled_source_height", scaled_source_height);
1558 
1559  QTransform transform;
1560 
1561  /* LOCATION, ROTATION, AND SCALE */
1562  float r = rotation.GetValue(frame->number) + parentObject_rotation; // rotate in degrees
1563  x += width * (location_x.GetValue(frame->number) + parentObject_location_x); // move in percentage of final width
1564  y += height * (location_y.GetValue(frame->number) + parentObject_location_y); // move in percentage of final height
1565  float shear_x_value = shear_x.GetValue(frame->number) + parentObject_shear_x;
1566  float shear_y_value = shear_y.GetValue(frame->number) + parentObject_shear_y;
1567  float origin_x_value = origin_x.GetValue(frame->number);
1568  float origin_y_value = origin_y.GetValue(frame->number);
1569 
1570  // Transform source image (if needed)
1572  "Clip::get_transform (Build QTransform - if needed)",
1573  "frame->number", frame->number,
1574  "x", x, "y", y,
1575  "r", r,
1576  "sx", sx, "sy", sy);
1577 
1578  if (!isNear(x, 0) || !isNear(y, 0)) {
1579  // TRANSLATE/MOVE CLIP
1580  transform.translate(x, y);
1581  }
1582  if (!isNear(r, 0) || !isNear(shear_x_value, 0) || !isNear(shear_y_value, 0)) {
1583  // ROTATE CLIP (around origin_x, origin_y)
1584  float origin_x_offset = (scaled_source_width * origin_x_value);
1585  float origin_y_offset = (scaled_source_height * origin_y_value);
1586  transform.translate(origin_x_offset, origin_y_offset);
1587  transform.rotate(r);
1588  transform.shear(shear_x_value, shear_y_value);
1589  transform.translate(-origin_x_offset,-origin_y_offset);
1590  }
1591  // SCALE CLIP (if needed)
1592  float source_width_scale = (float(source_size.width()) / float(source_image->width())) * sx;
1593  float source_height_scale = (float(source_size.height()) / float(source_image->height())) * sy;
1594  if (!isNear(source_width_scale, 1.0) || !isNear(source_height_scale, 1.0)) {
1595  transform.scale(source_width_scale, source_height_scale);
1596  }
1597 
1598  return transform;
1599 }
1600 
1601 // Adjust frame number for Clip position and start (which can result in a different number)
1602 int64_t Clip::adjust_timeline_framenumber(int64_t clip_frame_number) {
1603 
1604  // Get clip position from parent clip (if any)
1605  float position = 0.0;
1606  float start = 0.0;
1607  Clip *parent = static_cast<Clip *>(ParentClip());
1608  if (parent) {
1609  position = parent->Position();
1610  start = parent->Start();
1611  }
1612 
1613  // Adjust start frame and position based on parent clip.
1614  // This ensures the same frame # is used by mapped readers and clips,
1615  // when calculating samples per frame.
1616  // Thus, this prevents gaps and mismatches in # of samples.
1617  int64_t clip_start_frame = (start * info.fps.ToDouble()) + 1;
1618  int64_t clip_start_position = round(position * info.fps.ToDouble()) + 1;
1619  int64_t frame_number = clip_frame_number + clip_start_position - clip_start_frame;
1620 
1621  return frame_number;
1622 }
openshot::ClipBase::add_property_json
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition: ClipBase.cpp:96
openshot::stringToJson
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:16
openshot::CacheMemory::Clear
void Clear()
Clear the cache of all frames.
Definition: CacheMemory.cpp:238
openshot::Clip::Open
void Open() override
Open the internal reader.
Definition: Clip.cpp:382
openshot::Keyframe::IsIncreasing
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
openshot::ReaderInfo::sample_rate
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:60
openshot::ClipBase::timeline
openshot::TimelineBase * timeline
Pointer to the parent timeline instance (if any)
Definition: ClipBase.h:40
openshot::EffectInfo
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:28
openshot::FRAME_DISPLAY_BOTH
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition: Enums.h:56
openshot::Fraction::ToFloat
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:35
openshot::EffectBase
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:53
openshot::EffectBase::info
EffectInfoStruct info
Information about the current effect.
Definition: EffectBase.h:69
openshot::ReaderBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:106
openshot::COMPOSITE_MULTIPLY
@ COMPOSITE_MULTIPLY
Definition: Enums.h:91
openshot::Clip::anchor
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:178
Clip.h
Header file for Clip class.
openshot::Keyframe::GetLong
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:287
openshot::CacheMemory::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
Definition: CacheMemory.cpp:81
openshot::ChunkReader
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:78
openshot::CacheMemory::Add
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
Definition: CacheMemory.cpp:47
openshot::ReaderBase::GetFrame
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::Clip::previous_location
AudioLocation previous_location
Previous time-mapped audio location.
Definition: Clip.h:95
openshot::FRAME_DISPLAY_CLIP
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition: Enums.h:54
openshot::FRAME_DISPLAY_TIMELINE
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition: Enums.h:55
openshot::ReaderBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:157
openshot::Clip::GetEffect
openshot::EffectBase * GetEffect(const std::string &id)
Look up an effect by ID.
Definition: Clip.cpp:533
openshot::ClipBase::End
virtual void End(float value)
Set end position (in seconds) of clip (trim end of video)
Definition: ClipBase.cpp:53
openshot
This namespace is the default namespace for all code in the openshot library.
Definition: Compressor.h:28
openshot::Clip::scale_y
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:317
openshot::Clip::PropertiesJSON
std::string PropertiesJSON(int64_t requested_frame) const override
Definition: Clip.cpp:818
openshot::EffectBase::ParentClip
openshot::ClipBase * ParentClip()
Parent clip object of this effect (which can be unparented and NULL)
Definition: EffectBase.cpp:201
openshot::AudioLocation
This struct holds the associated video frame and starting sample # for an audio packet.
Definition: AudioLocation.h:25
openshot::Keyframe::GetDelta
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:399
TextReader.h
Header file for TextReader class.
openshot::Clip::time
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition: Clip.h:330
openshot::CompositeType
CompositeType
This enumeration determines how clips are composited onto lower layers.
Definition: Enums.h:75
openshot::ClipBase::add_property_choice_json
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:132
openshot::AudioLocation::frame
int64_t frame
Definition: AudioLocation.h:26
openshot::Clip
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:89
juce::AudioBuffer< float >
openshot::AudioLocation::sample_start
int sample_start
Definition: AudioLocation.h:27
openshot::Clip::alpha
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:320
openshot::Clip::End
float End() const override
Get end position (in seconds) of clip (trim end of video), which can be affected by the time curve.
Definition: Clip.cpp:418
openshot::ReaderBase::info
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:88
openshot::COMPOSITE_SCREEN
@ COMPOSITE_SCREEN
Definition: Enums.h:92
openshot::GRAVITY_TOP_LEFT
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition: Enums.h:23
Timeline.h
Header file for Timeline class.
openshot::Clip::origin_x
openshot::Keyframe origin_x
Curve representing X origin point (0.0=0% (left), 1.0=100% (right))
Definition: Clip.h:326
openshot::Clip::ParentTimeline
void ParentTimeline(openshot::TimelineBase *new_timeline) override
Set associated Timeline pointer.
Definition: Clip.cpp:445
openshot::Clip::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t clip_frame_number) override
Get an openshot::Frame object for a specific frame number of this clip. The image size and number of ...
Definition: Clip.cpp:453
openshot::Clip::Close
void Close() override
Close the internal reader.
Definition: Clip.cpp:403
AudioResampler.h
Header file for AudioResampler class.
openshot::Clip::location_y
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition: Clip.h:319
openshot::DummyReader
This class is used as a simple, dummy reader, which can be very useful when writing unit tests....
Definition: DummyReader.h:85
openshot::GRAVITY_TOP_RIGHT
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition: Enums.h:25
openshot::Keyframe::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:372
openshot::GravityType
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:21
openshot::Clip::origin_y
openshot::Keyframe origin_y
Curve representing Y origin point (0.0=0% (top), 1.0=100% (bottom))
Definition: Clip.h:327
openshot::Clip::GetParentTrackedObject
std::shared_ptr< openshot::TrackedObjectBase > GetParentTrackedObject()
Return the associated Parent Tracked Object (if any)
Definition: Clip.cpp:554
openshot::ReaderInfo::duration
float duration
Length of time (in seconds)
Definition: ReaderBase.h:43
openshot::EffectBase::trackedObjects
std::map< int, std::shared_ptr< openshot::TrackedObjectBase > > trackedObjects
Map of Tracked Object's by their indices (used by Effects that track objects on clips)
Definition: EffectBase.h:66
openshot::Clip::channel_mapping
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:348
openshot::Clip::AddEffect
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:1209
openshot::ReaderBase::Name
virtual std::string Name()=0
Return the type name of the class.
openshot::Clip::~Clip
virtual ~Clip()
Destructor.
Definition: Clip.cpp:283
openshot::ReaderInfo::width
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:46
openshot::Clip::Json
std::string Json() const override
Generate JSON string of this object.
Definition: Clip.cpp:811
openshot::ClipBase::Position
void Position(float value)
Set the Id of this clip object
Definition: ClipBase.cpp:19
openshot::GRAVITY_RIGHT
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:28
openshot::FRAME_DISPLAY_NONE
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition: Enums.h:53
openshot::COMPOSITE_SOFT_LIGHT
@ COMPOSITE_SOFT_LIGHT
Definition: Enums.h:99
openshot::CompareClipEffects
Definition: Clip.h:48
openshot::Clip::SetJsonValue
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
Definition: Clip.cpp:1014
openshot::OutOfBoundsFrame
Exception for frames that are out of bounds.
Definition: Exceptions.h:300
openshot::Fraction::ToDouble
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:40
openshot::Timeline::apply_effects
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
Definition: Timeline.cpp:546
openshot::Keyframe::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:339
openshot::COMPOSITE_LIGHTEN
@ COMPOSITE_LIGHTEN
Definition: Enums.h:95
FrameMapper.h
Header file for the FrameMapper class.
openshot::GRAVITY_TOP
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition: Enums.h:24
openshot::COMPOSITE_OVERLAY
@ COMPOSITE_OVERLAY
Definition: Enums.h:93
openshot::CacheBase::SetMaxBytesFromInfo
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:28
openshot::Color
This class represents a color (used on the timeline and clips)
Definition: Color.h:27
openshot::Clip::display
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:179
openshot::ClipBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:80
openshot::Clip::perspective_c2_y
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:340
openshot::Clip::scale_x
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:316
openshot::QtImageReader
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:74
openshot::ClipBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ClipBase.cpp:64
openshot::ReaderInfo::video_length
int64_t video_length
The number of frames in the video stream.
Definition: ReaderBase.h:53
openshot::AudioResampler
This class is used to resample audio data for many sequential frames.
Definition: AudioResampler.h:30
openshot::AudioResampler::SetBuffer
void SetBuffer(juce::AudioBuffer< float > *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
Definition: AudioResampler.cpp:60
openshot::ReaderInfo::height
int height
The height of the video (in pixels)
Definition: ReaderBase.h:45
openshot::VOLUME_MIX_REDUCE
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:71
openshot::ClipBase::position
float position
The position on the timeline where this clip should start playing.
Definition: ClipBase.h:35
openshot::Timeline::GetClip
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
Definition: Timeline.cpp:413
openshot::Clip::perspective_c3_y
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:342
openshot::Clip::perspective_c4_y
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:344
ZmqLogger.h
Header file for ZeroMQ-based Logger class.
openshot::Clip::has_video
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:352
openshot::Keyframe
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:53
openshot::Clip::gravity
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:176
openshot::Color::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:117
openshot::TimelineInfoStruct::is_before_clip_keyframes
bool is_before_clip_keyframes
Is this before clip keyframes are applied.
Definition: TimelineBase.h:35
openshot::ReaderBase::Open
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::GRAVITY_BOTTOM
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition: Enums.h:30
openshot::ReaderBase::IsOpen
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::InvalidJSON
Exception for invalid JSON.
Definition: Exceptions.h:217
openshot::Timeline
This class represents a timeline.
Definition: Timeline.h:154
openshot::ImageReader
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:55
openshot::Clip::composite
openshot::CompositeType composite
How this clip is composited onto lower layers.
Definition: Clip.h:181
openshot::Clip::perspective_c1_x
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:337
openshot::SCALE_CROP
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:37
openshot::Color::green
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:31
openshot::COMPOSITE_HARD_LIGHT
@ COMPOSITE_HARD_LIGHT
Definition: Enums.h:98
openshot::Clip::init_settings
void init_settings()
Init default settings for a clip.
Definition: Clip.cpp:67
openshot::TimelineInfoStruct
This struct contains info about the current Timeline clip instance.
Definition: TimelineBase.h:32
openshot::EffectInfoStruct::has_tracked_object
bool has_tracked_object
Determines if this effect track objects through the clip.
Definition: EffectBase.h:42
openshot::ReaderInfo::metadata
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:65
openshot::ClipBase::end
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:38
openshot::ClipBase::Start
void Start(float value)
Set start position (in seconds) of clip (trim start of video)
Definition: ClipBase.cpp:42
openshot::FFmpegReader
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:103
path
path
Definition: FFmpegWriter.cpp:1468
openshot::COMPOSITE_PLUS
@ COMPOSITE_PLUS
Definition: Enums.h:90
openshot::FrameMapper
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:193
openshot::Frame::GetSamplesPerFrame
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:484
ChunkReader.h
Header file for ChunkReader class.
openshot::COMPOSITE_DIFFERENCE
@ COMPOSITE_DIFFERENCE
Definition: Enums.h:100
openshot::ZmqLogger::Instance
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:35
openshot::ClipBase::start
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:37
openshot::Clip::Reader
openshot::ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:372
openshot::SCALE_FIT
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:38
openshot::GRAVITY_BOTTOM_LEFT
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition: Enums.h:29
openshot::Clip::perspective_c2_x
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:339
openshot::ZmqLogger::AppendDebugMethod
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:178
openshot::Clip::volume
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:331
openshot::COMPOSITE_DARKEN
@ COMPOSITE_DARKEN
Definition: Enums.h:94
openshot::Timeline::AddTrackedObject
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Definition: Timeline.cpp:224
openshot::Clip::SetJson
void SetJson(const std::string value) override
Load JSON string into this object.
Definition: Clip.cpp:997
openshot::GRAVITY_BOTTOM_RIGHT
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition: Enums.h:31
openshot::Color::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:86
openshot::Keyframe::GetLength
int64_t GetLength() const
Definition: KeyFrame.cpp:417
openshot::Keyframe::GetInt
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:282
openshot::ANCHOR_CANVAS
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition: Enums.h:46
openshot::Clip::SetAttachedClip
void SetAttachedClip(Clip *clipObject)
Set the pointer to the clip this clip is attached to.
Definition: Clip.cpp:331
openshot::Clip::perspective_c4_x
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:343
openshot::ReaderClosed
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:363
openshot::ReaderInfo::channel_layout
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:62
openshot::Clip::perspective_c1_y
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:338
openshot::COMPOSITE_EXCLUSION
@ COMPOSITE_EXCLUSION
Definition: Enums.h:101
openshot::Clip::channel_filter
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
Definition: Clip.h:347
openshot::Clip::init_reader_rotation
void init_reader_rotation()
Update default rotation from reader.
Definition: Clip.cpp:146
openshot::ClipBase::Id
void Id(std::string value)
Definition: ClipBase.h:94
openshot::Clip::init_reader_settings
void init_reader_settings()
Init reader info details.
Definition: Clip.cpp:133
openshot::TimelineBase
This class represents a timeline (used for building generic timeline implementations)
Definition: TimelineBase.h:41
MagickUtilities.h
Header file for MagickUtilities (IM6/IM7 compatibility overlay)
openshot::GRAVITY_LEFT
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:26
openshot::Keyframe::GetCount
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:424
openshot::ReaderInfo::fps
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:48
openshot::Clip::Clip
Clip()
Default Constructor.
Definition: Clip.cpp:197
openshot::ReaderBase
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:75
openshot::Clip::SetAttachedObject
void SetAttachedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Set the pointer to the trackedObject this clip is attached to.
Definition: Clip.cpp:326
openshot::ClipBase::previous_properties
std::string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:39
openshot::Clip::scale
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:177
openshot::COMPOSITE_SOURCE_OVER
@ COMPOSITE_SOURCE_OVER
Definition: Enums.h:76
openshot::AudioResampler::GetResampledBuffer
juce::AudioBuffer< float > * GetResampledBuffer()
Get the resampled audio buffer.
Definition: AudioResampler.cpp:106
openshot::VOLUME_MIX_AVERAGE
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:70
openshot::ReaderBase::Close
virtual void Close()=0
Close the reader (and any resources it was consuming)
openshot::AnchorType
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:44
openshot::ScaleType
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:35
openshot::Clip::AttachToObject
void AttachToObject(std::string object_id)
Attach clip to Tracked Object or to another Clip.
Definition: Clip.cpp:303
openshot::Color::alpha
openshot::Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:33
openshot::Clip::has_audio
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:351
openshot::Clip::GetParentClip
openshot::Clip * GetParentClip()
Return the associated ParentClip (if any)
Definition: Clip.cpp:545
openshot::Clip::rotation
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:323
openshot::SCALE_NONE
@ SCALE_NONE
Do not scale the clip.
Definition: Enums.h:40
openshot::TextReader
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:62
QtImageReader.h
Header file for QtImageReader class.
openshot::GRAVITY_CENTER
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:27
openshot::Clip::JsonValue
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Clip.cpp:940
openshot::Color::red
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:30
openshot::SCALE_STRETCH
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:39
ImageReader.h
Header file for ImageReader class.
openshot::FrameMapper::Reader
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:65
openshot::Clip::perspective_c3_x
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:341
openshot::COMPOSITE_COLOR_BURN
@ COMPOSITE_COLOR_BURN
Definition: Enums.h:97
openshot::VOLUME_MIX_NONE
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:69
openshot::ChunkVersion
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition: ChunkReader.h:49
openshot::ClipBase::Layer
void Layer(int value)
Set layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.cpp:31
openshot::ReaderInfo::channels
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:61
openshot::VolumeMixType
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:67
openshot::Clip::wave_color
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:334
openshot::Clip::shear_y
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:325
openshot::Clip::RemoveEffect
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1256
DummyReader.h
Header file for DummyReader class.
openshot::Color::blue
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:32
openshot::Timeline::GetTrackedObject
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
Definition: Timeline.cpp:242
Exceptions.h
Header file for all Exception classes.
openshot::Clip::mixing
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:180
FFmpegReader.h
Header file for FFmpegReader class.
openshot::COMPOSITE_COLOR_DODGE
@ COMPOSITE_COLOR_DODGE
Definition: Enums.h:96
openshot::Clip::shear_x
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:324
openshot::EffectBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: EffectBase.cpp:115
openshot::Keyframe::GetValue
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:258
openshot::Clip::location_x
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition: Clip.h:318
openshot::Clip::getFrameMutex
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
Definition: Clip.h:92
openshot::FrameDisplayType
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition: Enums.h:51
openshot::ReaderBase::ParentClip
openshot::ClipBase * ParentClip()
Parent clip object of this reader (which can be unparented and NULL)
Definition: ReaderBase.cpp:240