OpenShot Library | libopenshot  0.2.5
Clip.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Clip class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @ref License
7  */
8 
9 /* LICENSE
10  *
11  * Copyright (c) 2008-2019 OpenShot Studios, LLC
12  * <http://www.openshotstudios.com/>. This file is part of
13  * OpenShot Library (libopenshot), an open-source project dedicated to
14  * delivering high quality video editing and animation solutions to the
15  * world. For more information visit <http://www.openshot.org/>.
16  *
17  * OpenShot Library (libopenshot) is free software: you can redistribute it
18  * and/or modify it under the terms of the GNU Lesser General Public License
19  * as published by the Free Software Foundation, either version 3 of the
20  * License, or (at your option) any later version.
21  *
22  * OpenShot Library (libopenshot) is distributed in the hope that it will be
23  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25  * GNU Lesser General Public License for more details.
26  *
27  * You should have received a copy of the GNU Lesser General Public License
28  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
29  */
30 
31 #include "../include/Clip.h"
32 #include "../include/FFmpegReader.h"
33 #include "../include/FrameMapper.h"
34 #ifdef USE_IMAGEMAGICK
35  #include "../include/ImageReader.h"
36  #include "../include/TextReader.h"
37 #endif
38 #include "../include/QtImageReader.h"
39 #include "../include/ChunkReader.h"
40 #include "../include/DummyReader.h"
41 
42 using namespace openshot;
43 
44 // Init default settings for a clip
45 void Clip::init_settings()
46 {
47  // Init clip settings
48  Position(0.0);
49  Layer(0);
50  Start(0.0);
51  End(0.0);
53  scale = SCALE_FIT;
57  waveform = false;
59 
60  // Init scale curves
61  scale_x = Keyframe(1.0);
62  scale_y = Keyframe(1.0);
63 
64  // Init location curves
65  location_x = Keyframe(0.0);
66  location_y = Keyframe(0.0);
67 
68  // Init alpha
69  alpha = Keyframe(1.0);
70 
71  // Init rotation
72  init_reader_rotation();
73 
74  // Init time & volume
75  time = Keyframe(1.0);
76  volume = Keyframe(1.0);
77 
78  // Init audio waveform color
79  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
80 
81  // Init crop settings
83  crop_width = Keyframe(1.0);
84  crop_height = Keyframe(1.0);
85  crop_x = Keyframe(0.0);
86  crop_y = Keyframe(0.0);
87 
88  // Init shear and perspective curves
89  shear_x = Keyframe(0.0);
90  shear_y = Keyframe(0.0);
91  perspective_c1_x = Keyframe(-1.0);
92  perspective_c1_y = Keyframe(-1.0);
93  perspective_c2_x = Keyframe(-1.0);
94  perspective_c2_y = Keyframe(-1.0);
95  perspective_c3_x = Keyframe(-1.0);
96  perspective_c3_y = Keyframe(-1.0);
97  perspective_c4_x = Keyframe(-1.0);
98  perspective_c4_y = Keyframe(-1.0);
99 
100  // Init audio channel filter and mappings
101  channel_filter = Keyframe(-1.0);
102  channel_mapping = Keyframe(-1.0);
103 
104  // Init audio and video overrides
105  has_audio = Keyframe(-1.0);
106  has_video = Keyframe(-1.0);
107 }
108 
109 // Init reader's rotation (if any)
110 void Clip::init_reader_rotation() {
111  // Only init rotation from reader when needed
112  if (rotation.GetCount() > 1)
113  // Do nothing if more than 1 rotation Point
114  return;
115  else if (rotation.GetCount() == 1 && rotation.GetValue(1) != 0.0)
116  // Do nothing if 1 Point, and it's not the default value
117  return;
118 
119  // Init rotation
120  if (reader && reader->info.metadata.count("rotate") > 0) {
121  // Use reader metadata rotation (if any)
122  // This is typical with cell phone videos filmed in different orientations
123  try {
124  float rotate_metadata = strtof(reader->info.metadata["rotate"].c_str(), 0);
125  rotation = Keyframe(rotate_metadata);
126  } catch (const std::exception& e) {}
127  }
128  else
129  // Default no rotation
130  rotation = Keyframe(0.0);
131 }
132 
133 // Default Constructor for a clip
134 Clip::Clip() : resampler(NULL), audio_cache(NULL), reader(NULL), allocated_reader(NULL)
135 {
136  // Init all default settings
137  init_settings();
138 }
139 
140 // Constructor with reader
141 Clip::Clip(ReaderBase* new_reader) : resampler(NULL), audio_cache(NULL), reader(new_reader), allocated_reader(NULL)
142 {
143  // Init all default settings
144  init_settings();
145 
146  // Open and Close the reader (to set the duration of the clip)
147  Open();
148  Close();
149 
150  // Update duration
151  End(reader->info.duration);
152 }
153 
154 // Constructor with filepath
155 Clip::Clip(std::string path) : resampler(NULL), audio_cache(NULL), reader(NULL), allocated_reader(NULL)
156 {
157  // Init all default settings
158  init_settings();
159 
160  // Get file extension (and convert to lower case)
161  std::string ext = get_file_extension(path);
162  transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
163 
164  // Determine if common video formats
165  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
166  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
167  {
168  try
169  {
170  // Open common video format
171  reader = new FFmpegReader(path);
172 
173  } catch(...) { }
174  }
175 
176  // If no video found, try each reader
177  if (!reader)
178  {
179  try
180  {
181  // Try an image reader
182  reader = new QtImageReader(path);
183 
184  } catch(...) {
185  try
186  {
187  // Try a video reader
188  reader = new FFmpegReader(path);
189 
190  } catch(...) { }
191  }
192  }
193 
194  // Update duration
195  if (reader) {
196  End(reader->info.duration);
197  allocated_reader = reader;
198  init_reader_rotation();
199  }
200 }
201 
202 // Destructor
204 {
205  // Delete the reader if clip created it
206  if (allocated_reader) {
207  delete allocated_reader;
208  allocated_reader = NULL;
209  }
210 
211  // Close the resampler
212  if (resampler) {
213  delete resampler;
214  resampler = NULL;
215  }
216 }
217 
218 /// Set the current reader
219 void Clip::Reader(ReaderBase* new_reader)
220 {
221  // set reader pointer
222  reader = new_reader;
223 
224  // set parent
225  reader->SetClip(this);
226 
227  // Init rotation (if any)
228  init_reader_rotation();
229 }
230 
231 /// Get the current reader
233 {
234  if (reader)
235  return reader;
236  else
237  // Throw error if reader not initialized
238  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
239 }
240 
241 // Open the internal reader
243 {
244  if (reader)
245  {
246  // Open the reader
247  reader->Open();
248 
249  // Set some clip properties from the file reader
250  if (end == 0.0)
251  End(reader->info.duration);
252  }
253  else
254  // Throw error if reader not initialized
255  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
256 }
257 
258 // Close the internal reader
260 {
261  if (reader) {
262  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close");
263 
264  // Close the reader
265  reader->Close();
266  }
267  else
268  // Throw error if reader not initialized
269  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
270 }
271 
272 // Get end position of clip (trim end of video), which can be affected by the time curve.
273 float Clip::End() const
274 {
275  // if a time curve is present, use its length
276  if (time.GetCount() > 1)
277  {
278  // Determine the FPS fo this clip
279  float fps = 24.0;
280  if (reader)
281  // file reader
282  fps = reader->info.fps.ToFloat();
283  else
284  // Throw error if reader not initialized
285  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
286 
287  return float(time.GetLength()) / fps;
288  }
289  else
290  // just use the duration (as detected by the reader)
291  return end;
292 }
293 
294 // Get an openshot::Frame object for a specific frame number of this reader.
295 std::shared_ptr<Frame> Clip::GetFrame(int64_t requested_frame)
296 {
297  if (reader)
298  {
299  // Adjust out of bounds frame number
300  requested_frame = adjust_frame_number_minimum(requested_frame);
301 
302  // Adjust has_video and has_audio overrides
303  int enabled_audio = has_audio.GetInt(requested_frame);
304  if (enabled_audio == -1 && reader && reader->info.has_audio)
305  enabled_audio = 1;
306  else if (enabled_audio == -1 && reader && !reader->info.has_audio)
307  enabled_audio = 0;
308  int enabled_video = has_video.GetInt(requested_frame);
309  if (enabled_video == -1 && reader && reader->info.has_video)
310  enabled_video = 1;
311  else if (enabled_video == -1 && reader && !reader->info.has_audio)
312  enabled_video = 0;
313 
314  // Is a time map detected
315  int64_t new_frame_number = requested_frame;
316  int64_t time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame));
317  if (time.GetLength() > 1)
318  new_frame_number = time_mapped_number;
319 
320  // Now that we have re-mapped what frame number is needed, go and get the frame pointer
321  std::shared_ptr<Frame> original_frame;
322  #pragma omp critical (Clip_GetFrame)
323  original_frame = GetOrCreateFrame(new_frame_number);
324 
325  // Create a new frame
326  std::shared_ptr<Frame> frame(new Frame(new_frame_number, 1, 1, "#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount()));
327  #pragma omp critical (Clip_GetFrame)
328  {
329  frame->SampleRate(original_frame->SampleRate());
330  frame->ChannelsLayout(original_frame->ChannelsLayout());
331  }
332 
333  // Copy the image from the odd field
334  if (enabled_video)
335  frame->AddImage(std::shared_ptr<QImage>(new QImage(*original_frame->GetImage())));
336 
337  // Loop through each channel, add audio
338  if (enabled_audio && reader->info.has_audio)
339  for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
340  frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
341 
342  // Get time mapped frame number (used to increase speed, change direction, etc...)
343  get_time_mapped_frame(frame, requested_frame);
344 
345  // Apply effects to the frame (if any)
346  apply_effects(frame);
347 
348  // Return processed 'frame'
349  return frame;
350  }
351  else
352  // Throw error if reader not initialized
353  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
354 }
355 
356 // Get file extension
357 std::string Clip::get_file_extension(std::string path)
358 {
359  // return last part of path
360  return path.substr(path.find_last_of(".") + 1);
361 }
362 
363 // Reverse an audio buffer
364 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
365 {
366  int number_of_samples = buffer->getNumSamples();
367  int channels = buffer->getNumChannels();
368 
369  // Reverse array (create new buffer to hold the reversed version)
370  juce::AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
371  reversed->clear();
372 
373  for (int channel = 0; channel < channels; channel++)
374  {
375  int n=0;
376  for (int s = number_of_samples - 1; s >= 0; s--, n++)
377  reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
378  }
379 
380  // Copy the samples back to the original array
381  buffer->clear();
382  // Loop through channels, and get audio samples
383  for (int channel = 0; channel < channels; channel++)
384  // Get the audio samples for this channel
385  buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
386 
387  delete reversed;
388  reversed = NULL;
389 }
390 
391 // Adjust the audio and image of a time mapped frame
392 void Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, int64_t frame_number)
393 {
394  // Check for valid reader
395  if (!reader)
396  // Throw error if reader not initialized
397  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.");
398 
399  // Check for a valid time map curve
400  if (time.GetLength() > 1)
401  {
402  const GenericScopedLock<juce::CriticalSection> lock(getFrameCriticalSection);
403 
404  // create buffer and resampler
405  juce::AudioSampleBuffer *samples = NULL;
406  if (!resampler)
407  resampler = new AudioResampler();
408 
409  // Get new frame number
410  int new_frame_number = frame->number;
411 
412  // Get delta (difference in previous Y value)
413  int delta = int(round(time.GetDelta(frame_number)));
414 
415  // Init audio vars
416  int sample_rate = reader->info.sample_rate;
417  int channels = reader->info.channels;
418  int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
419 
420  // Only resample audio if needed
421  if (reader->info.has_audio) {
422  // Determine if we are speeding up or slowing down
423  if (time.GetRepeatFraction(frame_number).den > 1) {
424  // SLOWING DOWN AUDIO
425  // Resample data, and return new buffer pointer
426  juce::AudioSampleBuffer *resampled_buffer = NULL;
427  int resampled_buffer_size = 0;
428 
429  // SLOW DOWN audio (split audio)
430  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
431  samples->clear();
432 
433  // Loop through channels, and get audio samples
434  for (int channel = 0; channel < channels; channel++)
435  // Get the audio samples for this channel
436  samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
437  number_of_samples, 1.0f);
438 
439  // Reverse the samples (if needed)
440  if (!time.IsIncreasing(frame_number))
441  reverse_buffer(samples);
442 
443  // Resample audio to be X times slower (where X is the denominator of the repeat fraction)
444  resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
445 
446  // Resample the data (since it's the 1st slice)
447  resampled_buffer = resampler->GetResampledBuffer();
448 
449  // Get the length of the resampled buffer (if one exists)
450  resampled_buffer_size = resampled_buffer->getNumSamples();
451 
452  // Just take the samples we need for the requested frame
453  int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
454  if (start > 0)
455  start -= 1;
456  for (int channel = 0; channel < channels; channel++)
457  // Add new (slower) samples, to the frame object
458  frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
459  number_of_samples, 1.0f);
460 
461  // Clean up
462  resampled_buffer = NULL;
463 
464  }
465  else if (abs(delta) > 1 && abs(delta) < 100) {
466  int start = 0;
467  if (delta > 0) {
468  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
469  int total_delta_samples = 0;
470  for (int delta_frame = new_frame_number - (delta - 1);
471  delta_frame <= new_frame_number; delta_frame++)
472  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
473  reader->info.sample_rate,
474  reader->info.channels);
475 
476  // Allocate a new sample buffer for these delta frames
477  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
478  samples->clear();
479 
480  // Loop through each frame in this delta
481  for (int delta_frame = new_frame_number - (delta - 1);
482  delta_frame <= new_frame_number; delta_frame++) {
483  // buffer to hold detal samples
484  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
485  juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
486  number_of_delta_samples);
487  delta_samples->clear();
488 
489  for (int channel = 0; channel < channels; channel++)
490  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
491  number_of_delta_samples, 1.0f);
492 
493  // Reverse the samples (if needed)
494  if (!time.IsIncreasing(frame_number))
495  reverse_buffer(delta_samples);
496 
497  // Copy the samples to
498  for (int channel = 0; channel < channels; channel++)
499  // Get the audio samples for this channel
500  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
501  number_of_delta_samples, 1.0f);
502 
503  // Clean up
504  delete delta_samples;
505  delta_samples = NULL;
506 
507  // Increment start position
508  start += number_of_delta_samples;
509  }
510  }
511  else {
512  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
513  int total_delta_samples = 0;
514  for (int delta_frame = new_frame_number - (delta + 1);
515  delta_frame >= new_frame_number; delta_frame--)
516  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
517  reader->info.sample_rate,
518  reader->info.channels);
519 
520  // Allocate a new sample buffer for these delta frames
521  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
522  samples->clear();
523 
524  // Loop through each frame in this delta
525  for (int delta_frame = new_frame_number - (delta + 1);
526  delta_frame >= new_frame_number; delta_frame--) {
527  // buffer to hold delta samples
528  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
529  juce::AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
530  number_of_delta_samples);
531  delta_samples->clear();
532 
533  for (int channel = 0; channel < channels; channel++)
534  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
535  number_of_delta_samples, 1.0f);
536 
537  // Reverse the samples (if needed)
538  if (!time.IsIncreasing(frame_number))
539  reverse_buffer(delta_samples);
540 
541  // Copy the samples to
542  for (int channel = 0; channel < channels; channel++)
543  // Get the audio samples for this channel
544  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
545  number_of_delta_samples, 1.0f);
546 
547  // Clean up
548  delete delta_samples;
549  delta_samples = NULL;
550 
551  // Increment start position
552  start += number_of_delta_samples;
553  }
554  }
555 
556  // Resample audio to be X times faster (where X is the delta of the repeat fraction)
557  resampler->SetBuffer(samples, float(start) / float(number_of_samples));
558 
559  // Resample data, and return new buffer pointer
560  juce::AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
561  int resampled_buffer_size = buffer->getNumSamples();
562 
563  // Add the newly resized audio samples to the current frame
564  for (int channel = 0; channel < channels; channel++)
565  // Add new (slower) samples, to the frame object
566  frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
567 
568  // Clean up
569  buffer = NULL;
570  }
571  else {
572  // Use the samples on this frame (but maybe reverse them if needed)
573  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
574  samples->clear();
575 
576  // Loop through channels, and get audio samples
577  for (int channel = 0; channel < channels; channel++)
578  // Get the audio samples for this channel
579  samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
580 
581  // reverse the samples
582  if (!time.IsIncreasing(frame_number))
583  reverse_buffer(samples);
584 
585  // Add reversed samples to the frame object
586  for (int channel = 0; channel < channels; channel++)
587  frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
588 
589 
590  }
591 
592  delete samples;
593  samples = NULL;
594  }
595  }
596 }
597 
598 // Adjust frame number minimum value
599 int64_t Clip::adjust_frame_number_minimum(int64_t frame_number)
600 {
601  // Never return a frame number 0 or below
602  if (frame_number < 1)
603  return 1;
604  else
605  return frame_number;
606 
607 }
608 
609 // Get or generate a blank frame
610 std::shared_ptr<Frame> Clip::GetOrCreateFrame(int64_t number)
611 {
612  std::shared_ptr<Frame> new_frame;
613 
614  // Init some basic properties about this frame
615  int samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
616 
617  try {
618  // Debug output
619  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame);
620 
621  // Attempt to get a frame (but this could fail if a reader has just been closed)
622  new_frame = reader->GetFrame(number);
623 
624  // Return real frame
625  if (new_frame)
626  return new_frame;
627 
628  } catch (const ReaderClosed & e) {
629  // ...
630  } catch (const TooManySeeks & e) {
631  // ...
632  } catch (const OutOfBoundsFrame & e) {
633  // ...
634  }
635 
636  // Debug output
637  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame);
638 
639  // Create blank frame
640  new_frame = std::make_shared<Frame>(number, reader->info.width, reader->info.height, "#000000", samples_in_frame, reader->info.channels);
641  new_frame->SampleRate(reader->info.sample_rate);
642  new_frame->ChannelsLayout(reader->info.channel_layout);
643  new_frame->AddAudioSilence(samples_in_frame);
644  return new_frame;
645 }
646 
647 // Generate JSON string of this object
648 std::string Clip::Json() const {
649 
650  // Return formatted string
651  return JsonValue().toStyledString();
652 }
653 
654 // Get all properties for a specific frame
655 std::string Clip::PropertiesJSON(int64_t requested_frame) const {
656 
657  // Generate JSON properties list
658  Json::Value root;
659  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
660  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
661  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
662  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
663  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
664  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
665  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
666  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
667  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
668  root["mixing"] = add_property_json("Volume Mixing", mixing, "int", "", NULL, 0, 2, false, requested_frame);
669  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
670 
671  // Add gravity choices (dropdown style)
672  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
673  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
674  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
675  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
676  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
677  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
678  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
679  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
680  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
681 
682  // Add scale choices (dropdown style)
683  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
684  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
685  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
686  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
687 
688  // Add frame number display choices (dropdown style)
689  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
690  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
691  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
692  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
693 
694  // Add volume mixing choices (dropdown style)
695  root["mixing"]["choices"].append(add_property_choice_json("None", VOLUME_MIX_NONE, mixing));
696  root["mixing"]["choices"].append(add_property_choice_json("Average", VOLUME_MIX_AVERAGE, mixing));
697  root["mixing"]["choices"].append(add_property_choice_json("Reduce", VOLUME_MIX_REDUCE, mixing));
698 
699  // Add waveform choices (dropdown style)
700  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
701  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
702 
703  // Keyframes
704  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
705  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
706  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
707  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
708  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
709  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
710  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
711  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
712  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
713  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
714  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
715  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
716  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
717  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
718 
719  // Add enable audio/video choices (dropdown style)
720  root["has_audio"]["choices"].append(add_property_choice_json("Auto", -1, has_audio.GetValue(requested_frame)));
721  root["has_audio"]["choices"].append(add_property_choice_json("Off", 0, has_audio.GetValue(requested_frame)));
722  root["has_audio"]["choices"].append(add_property_choice_json("On", 1, has_audio.GetValue(requested_frame)));
723  root["has_video"]["choices"].append(add_property_choice_json("Auto", -1, has_video.GetValue(requested_frame)));
724  root["has_video"]["choices"].append(add_property_choice_json("Off", 0, has_video.GetValue(requested_frame)));
725  root["has_video"]["choices"].append(add_property_choice_json("On", 1, has_video.GetValue(requested_frame)));
726 
727  root["crop_x"] = add_property_json("Crop X", crop_x.GetValue(requested_frame), "float", "", &crop_x, -1.0, 1.0, false, requested_frame);
728  root["crop_y"] = add_property_json("Crop Y", crop_y.GetValue(requested_frame), "float", "", &crop_y, -1.0, 1.0, false, requested_frame);
729  root["crop_width"] = add_property_json("Crop Width", crop_width.GetValue(requested_frame), "float", "", &crop_width, 0.0, 1.0, false, requested_frame);
730  root["crop_height"] = add_property_json("Crop Height", crop_height.GetValue(requested_frame), "float", "", &crop_height, 0.0, 1.0, false, requested_frame);
731 
732  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
733  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
734  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
735  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
736 
737 
738  // Return formatted string
739  return root.toStyledString();
740 }
741 
742 // Generate Json::Value for this object
743 Json::Value Clip::JsonValue() const {
744 
745  // Create root json object
746  Json::Value root = ClipBase::JsonValue(); // get parent properties
747  root["gravity"] = gravity;
748  root["scale"] = scale;
749  root["anchor"] = anchor;
750  root["display"] = display;
751  root["mixing"] = mixing;
752  root["waveform"] = waveform;
753  root["scale_x"] = scale_x.JsonValue();
754  root["scale_y"] = scale_y.JsonValue();
755  root["location_x"] = location_x.JsonValue();
756  root["location_y"] = location_y.JsonValue();
757  root["alpha"] = alpha.JsonValue();
758  root["rotation"] = rotation.JsonValue();
759  root["time"] = time.JsonValue();
760  root["volume"] = volume.JsonValue();
761  root["wave_color"] = wave_color.JsonValue();
762  root["crop_width"] = crop_width.JsonValue();
763  root["crop_height"] = crop_height.JsonValue();
764  root["crop_x"] = crop_x.JsonValue();
765  root["crop_y"] = crop_y.JsonValue();
766  root["shear_x"] = shear_x.JsonValue();
767  root["shear_y"] = shear_y.JsonValue();
768  root["channel_filter"] = channel_filter.JsonValue();
769  root["channel_mapping"] = channel_mapping.JsonValue();
770  root["has_audio"] = has_audio.JsonValue();
771  root["has_video"] = has_video.JsonValue();
772  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
773  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
774  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
775  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
776  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
777  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
778  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
779  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
780 
781  // Add array of effects
782  root["effects"] = Json::Value(Json::arrayValue);
783 
784  // loop through effects
785  for (auto existing_effect : effects)
786  {
787  root["effects"].append(existing_effect->JsonValue());
788  }
789 
790  if (reader)
791  root["reader"] = reader->JsonValue();
792 
793  // return JsonValue
794  return root;
795 }
796 
797 // Load JSON string into this object
798 void Clip::SetJson(const std::string value) {
799 
800  // Parse JSON string into JSON objects
801  try
802  {
803  const Json::Value root = openshot::stringToJson(value);
804  // Set all values that match
805  SetJsonValue(root);
806  }
807  catch (const std::exception& e)
808  {
809  // Error parsing JSON (or missing keys)
810  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)");
811  }
812 }
813 
814 // Load Json::Value into this object
815 void Clip::SetJsonValue(const Json::Value root) {
816 
817  // Set parent data
819 
820  // Set data from Json (if key is found)
821  if (!root["gravity"].isNull())
822  gravity = (GravityType) root["gravity"].asInt();
823  if (!root["scale"].isNull())
824  scale = (ScaleType) root["scale"].asInt();
825  if (!root["anchor"].isNull())
826  anchor = (AnchorType) root["anchor"].asInt();
827  if (!root["display"].isNull())
828  display = (FrameDisplayType) root["display"].asInt();
829  if (!root["mixing"].isNull())
830  mixing = (VolumeMixType) root["mixing"].asInt();
831  if (!root["waveform"].isNull())
832  waveform = root["waveform"].asBool();
833  if (!root["scale_x"].isNull())
834  scale_x.SetJsonValue(root["scale_x"]);
835  if (!root["scale_y"].isNull())
836  scale_y.SetJsonValue(root["scale_y"]);
837  if (!root["location_x"].isNull())
838  location_x.SetJsonValue(root["location_x"]);
839  if (!root["location_y"].isNull())
840  location_y.SetJsonValue(root["location_y"]);
841  if (!root["alpha"].isNull())
842  alpha.SetJsonValue(root["alpha"]);
843  if (!root["rotation"].isNull())
844  rotation.SetJsonValue(root["rotation"]);
845  if (!root["time"].isNull())
846  time.SetJsonValue(root["time"]);
847  if (!root["volume"].isNull())
848  volume.SetJsonValue(root["volume"]);
849  if (!root["wave_color"].isNull())
850  wave_color.SetJsonValue(root["wave_color"]);
851  if (!root["crop_width"].isNull())
852  crop_width.SetJsonValue(root["crop_width"]);
853  if (!root["crop_height"].isNull())
854  crop_height.SetJsonValue(root["crop_height"]);
855  if (!root["crop_x"].isNull())
856  crop_x.SetJsonValue(root["crop_x"]);
857  if (!root["crop_y"].isNull())
858  crop_y.SetJsonValue(root["crop_y"]);
859  if (!root["shear_x"].isNull())
860  shear_x.SetJsonValue(root["shear_x"]);
861  if (!root["shear_y"].isNull())
862  shear_y.SetJsonValue(root["shear_y"]);
863  if (!root["channel_filter"].isNull())
864  channel_filter.SetJsonValue(root["channel_filter"]);
865  if (!root["channel_mapping"].isNull())
866  channel_mapping.SetJsonValue(root["channel_mapping"]);
867  if (!root["has_audio"].isNull())
868  has_audio.SetJsonValue(root["has_audio"]);
869  if (!root["has_video"].isNull())
870  has_video.SetJsonValue(root["has_video"]);
871  if (!root["perspective_c1_x"].isNull())
872  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
873  if (!root["perspective_c1_y"].isNull())
874  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
875  if (!root["perspective_c2_x"].isNull())
876  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
877  if (!root["perspective_c2_y"].isNull())
878  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
879  if (!root["perspective_c3_x"].isNull())
880  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
881  if (!root["perspective_c3_y"].isNull())
882  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
883  if (!root["perspective_c4_x"].isNull())
884  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
885  if (!root["perspective_c4_y"].isNull())
886  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
887  if (!root["effects"].isNull()) {
888 
889  // Clear existing effects
890  effects.clear();
891 
892  // loop through effects
893  for (const auto existing_effect : root["effects"]) {
894  // Create Effect
895  EffectBase *e = NULL;
896 
897  if (!existing_effect["type"].isNull()) {
898  // Create instance of effect
899  if ( (e = EffectInfo().CreateEffect(existing_effect["type"].asString())) ) {
900 
901  // Load Json into Effect
902  e->SetJsonValue(existing_effect);
903 
904  // Add Effect to Timeline
905  AddEffect(e);
906  }
907  }
908  }
909  }
910  if (!root["reader"].isNull()) // does Json contain a reader?
911  {
912  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
913  {
914  // Close previous reader (if any)
915  bool already_open = false;
916  if (reader)
917  {
918  // Track if reader was open
919  already_open = reader->IsOpen();
920 
921  // Close and delete existing reader (if any)
922  reader->Close();
923  delete reader;
924  reader = NULL;
925  }
926 
927  // Create new reader (and load properties)
928  std::string type = root["reader"]["type"].asString();
929 
930  if (type == "FFmpegReader") {
931 
932  // Create new reader
933  reader = new FFmpegReader(root["reader"]["path"].asString(), false);
934  reader->SetJsonValue(root["reader"]);
935 
936  } else if (type == "QtImageReader") {
937 
938  // Create new reader
939  reader = new QtImageReader(root["reader"]["path"].asString(), false);
940  reader->SetJsonValue(root["reader"]);
941 
942 #ifdef USE_IMAGEMAGICK
943  } else if (type == "ImageReader") {
944 
945  // Create new reader
946  reader = new ImageReader(root["reader"]["path"].asString(), false);
947  reader->SetJsonValue(root["reader"]);
948 
949  } else if (type == "TextReader") {
950 
951  // Create new reader
952  reader = new TextReader();
953  reader->SetJsonValue(root["reader"]);
954 #endif
955 
956  } else if (type == "ChunkReader") {
957 
958  // Create new reader
959  reader = new ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
960  reader->SetJsonValue(root["reader"]);
961 
962  } else if (type == "DummyReader") {
963 
964  // Create new reader
965  reader = new DummyReader();
966  reader->SetJsonValue(root["reader"]);
967  }
968 
969  // mark as managed reader and set parent
970  if (reader) {
971  reader->SetClip(this);
972  allocated_reader = reader;
973  }
974 
975  // Re-Open reader (if needed)
976  if (already_open)
977  reader->Open();
978 
979  }
980  }
981 }
982 
983 // Sort effects by order
984 void Clip::sort_effects()
985 {
986  // sort clips
987  effects.sort(CompareClipEffects());
988 }
989 
990 // Add an effect to the clip
992 {
993  // Add effect to list
994  effects.push_back(effect);
995 
996  // Sort effects
997  sort_effects();
998 }
999 
1000 // Remove an effect from the clip
1002 {
1003  effects.remove(effect);
1004 }
1005 
1006 // Apply effects to the source frame (if any)
1007 std::shared_ptr<Frame> Clip::apply_effects(std::shared_ptr<Frame> frame)
1008 {
1009  // Find Effects at this position and layer
1010  for (auto effect : effects)
1011  {
1012  // Apply the effect to this frame
1013  frame = effect->GetFrame(frame, frame->number);
1014 
1015  } // end effect loop
1016 
1017  // Return modified frame
1018  return frame;
1019 }
openshot::ClipBase::add_property_json
Json::Value add_property_json(std::string name, float value, std::string type, std::string memo, const Keyframe *keyframe, float min_value, float max_value, bool readonly, int64_t requested_frame) const
Generate JSON for a property.
Definition: ClipBase.cpp:68
openshot::stringToJson
const Json::Value stringToJson(const std::string value)
Definition: Json.cpp:33
openshot::Keyframe::IsIncreasing
bool IsIncreasing(int index) const
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:296
openshot::Clip::crop_width
openshot::Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:230
openshot::Frame::SampleRate
int SampleRate()
Get the original sample rate of this frame's audio data.
Definition: Frame.cpp:565
openshot::ReaderInfo::sample_rate
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:82
openshot::EffectInfo
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:46
openshot::FRAME_DISPLAY_BOTH
@ FRAME_DISPLAY_BOTH
Display both the clip's and timeline's frame number.
Definition: Enums.h:73
openshot::Fraction::ToFloat
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:44
openshot::Clip::crop_height
openshot::Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:231
openshot::EffectBase
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
openshot::ReaderBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ReaderBase.cpp:116
openshot::Clip::Open
void Open()
Open the internal reader.
Definition: Clip.cpp:242
openshot::Clip::anchor
openshot::AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:145
openshot::Keyframe::GetLong
int64_t GetLong(int64_t index) const
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:291
openshot::Clip::getFrameCriticalSection
juce::CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: Clip.h:98
openshot::Frame::GetAudioSamples
float * GetAudioSamples(int channel)
Get an array of sample data.
Definition: Frame.cpp:334
openshot::ChunkReader
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:103
openshot::Clip::crop_y
openshot::Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:233
openshot::ReaderBase::GetFrame
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t number)=0
openshot::Clip::Close
void Close()
Close the internal reader.
Definition: Clip.cpp:259
openshot::FRAME_DISPLAY_CLIP
@ FRAME_DISPLAY_CLIP
Display the clip's internal frame number.
Definition: Enums.h:71
openshot::FRAME_DISPLAY_TIMELINE
@ FRAME_DISPLAY_TIMELINE
Display the timeline's frame number.
Definition: Enums.h:72
openshot::ReaderBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ReaderBase.cpp:171
openshot::Clip::crop_x
openshot::Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:232
openshot
This namespace is the default namespace for all code in the openshot library.
Definition: AudioBufferSource.h:38
openshot::Clip::scale_y
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:213
openshot::Clip::PropertiesJSON
std::string PropertiesJSON(int64_t requested_frame) const override
Definition: Clip.cpp:655
openshot::Keyframe::GetDelta
double GetDelta(int64_t index) const
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:485
openshot::Clip::time
openshot::Keyframe time
Curve representing the frames over time to play (used for speed and direction of video)
Definition: Clip.h:222
openshot::ClipBase::add_property_choice_json
Json::Value add_property_choice_json(std::string name, int value, int selected_value) const
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:104
openshot::Clip::alpha
openshot::Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:218
openshot::ReaderBase::info
openshot::ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
openshot::GRAVITY_TOP_LEFT
@ GRAVITY_TOP_LEFT
Align clip to the top left of its parent.
Definition: Enums.h:40
openshot::Clip::location_y
openshot::Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1)
Definition: Clip.h:215
openshot::DummyReader
This class is used as a simple, dummy reader, which always returns a blank frame.
Definition: DummyReader.h:54
openshot::GRAVITY_TOP_RIGHT
@ GRAVITY_TOP_RIGHT
Align clip to the top right of its parent.
Definition: Enums.h:42
openshot::Keyframe::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: KeyFrame.cpp:362
openshot::GravityType
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:38
openshot::ReaderInfo::duration
float duration
Length of time (in seconds)
Definition: ReaderBase.h:65
openshot::EffectBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: EffectBase.cpp:117
openshot::Frame
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:106
openshot::Clip::channel_mapping
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
Definition: Clip.h:249
openshot::Clip::AddEffect
void AddEffect(openshot::EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:991
openshot::Clip::~Clip
virtual ~Clip()
Destructor.
Definition: Clip.cpp:203
openshot::ReaderInfo::has_video
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:62
openshot::ReaderInfo::width
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:68
openshot::Clip::Json
std::string Json() const override
Get and Set JSON methods.
Definition: Clip.cpp:648
openshot::TooManySeeks
Exception when too many seek attempts happen.
Definition: Exceptions.h:369
openshot::GRAVITY_RIGHT
@ GRAVITY_RIGHT
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:45
openshot::FRAME_DISPLAY_NONE
@ FRAME_DISPLAY_NONE
Do not display the frame number.
Definition: Enums.h:70
openshot::CompareClipEffects
Definition: Clip.h:54
openshot::Keyframe::GetRepeatFraction
Fraction GetRepeatFraction(int64_t index) const
Get the fraction that represents how many times this value is repeated in the curve.
Definition: KeyFrame.cpp:382
openshot::Frame::AddAudioSilence
void AddAudioSilence(int numSamples)
Add audio silence.
Definition: Frame.cpp:1056
openshot::OutOfBoundsFrame
Exception for frames that are out of bounds.
Definition: Exceptions.h:285
openshot::Keyframe::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: KeyFrame.cpp:329
openshot::GRAVITY_TOP
@ GRAVITY_TOP
Align clip to the top center of its parent.
Definition: Enums.h:41
openshot::Color
This class represents a color (used on the timeline and clips)
Definition: Color.h:45
openshot::Clip::display
openshot::FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:146
openshot::ClipBase::SetJsonValue
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
Definition: ClipBase.cpp:52
openshot::Clip::perspective_c2_y
openshot::Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:241
openshot::Clip::scale_x
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:212
openshot::QtImageReader
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:65
openshot::ClipBase::JsonValue
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
Definition: ClipBase.cpp:36
openshot::AudioResampler
This class is used to resample audio data for many sequential frames.
Definition: AudioResampler.h:46
openshot::ReaderInfo::height
int height
The height of the video (in pixels)
Definition: ReaderBase.h:67
openshot::VOLUME_MIX_REDUCE
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
Definition: Enums.h:81
openshot::Fraction::num
int num
Numerator for the fraction.
Definition: Fraction.h:47
openshot::Clip::perspective_c3_y
openshot::Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:243
openshot::Clip::perspective_c4_y
openshot::Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:245
openshot::Fraction::den
int den
Denominator for the fraction.
Definition: Fraction.h:48
openshot::Clip::has_video
openshot::Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes)
Definition: Clip.h:253
openshot::Keyframe
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
Definition: KeyFrame.h:64
openshot::Clip::gravity
openshot::GravityType gravity
The gravity of a clip determines where it snaps to its parent.
Definition: Clip.h:143
openshot::Color::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Color.cpp:126
openshot::ReaderBase::Open
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
openshot::Clip::SetJsonValue
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Definition: Clip.cpp:815
openshot::GRAVITY_BOTTOM
@ GRAVITY_BOTTOM
Align clip to the bottom center of its parent.
Definition: Enums.h:47
openshot::ReaderInfo::has_audio
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:63
openshot::ReaderBase::IsOpen
virtual bool IsOpen()=0
Determine if reader is open or closed.
openshot::Clip::End
float End() const
Override End() method.
Definition: Clip.cpp:273
openshot::InvalidJSON
Exception for invalid JSON.
Definition: Exceptions.h:205
openshot::Clip::GetFrame
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:295
openshot::Frame::AddImage
void AddImage(int new_width, int new_height, int bytes_per_pixel, QImage::Format type, const unsigned char *pixels_)
Add (or replace) pixel data to the frame.
Definition: Frame.cpp:754
openshot::ImageReader
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:71
openshot::Clip::perspective_c1_x
openshot::Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:238
openshot::SCALE_CROP
@ SCALE_CROP
Scale the clip until both height and width fill the canvas (cropping the overlap)
Definition: Enums.h:54
openshot::Color::green
openshot::Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:49
openshot::ReaderBase::SetClip
void SetClip(openshot::ClipBase *clip)
Set parent clip object of this reader.
Definition: ReaderBase.cpp:259
openshot::ReaderInfo::metadata
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
Definition: ReaderBase.h:87
openshot::Frame::GetAudioChannelsCount
int GetAudioChannelsCount()
Get number of audio channels.
Definition: Frame.cpp:432
openshot::Clip::crop_gravity
openshot::GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
Definition: Clip.h:229
openshot::ClipBase::end
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:55
openshot::FFmpegReader
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:94
path
path
Definition: FFmpegWriter.cpp:1410
openshot::Frame::GetSamplesPerFrame
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:547
openshot::ZmqLogger::Instance
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
Definition: ZmqLogger.cpp:45
openshot::ClipBase::start
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:54
openshot::Clip::Reader
openshot::ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:232
openshot::SCALE_FIT
@ SCALE_FIT
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:55
openshot::GRAVITY_BOTTOM_LEFT
@ GRAVITY_BOTTOM_LEFT
Align clip to the bottom left of its parent.
Definition: Enums.h:46
openshot::Clip::perspective_c2_x
openshot::Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:240
openshot::ZmqLogger::AppendDebugMethod
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
Definition: ZmqLogger.cpp:179
openshot::Clip::volume
openshot::Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:223
openshot::GRAVITY_BOTTOM_RIGHT
@ GRAVITY_BOTTOM_RIGHT
Align clip to the bottom right of its parent.
Definition: Enums.h:48
openshot::Color::JsonValue
Json::Value JsonValue() const
Generate Json::Value for this object.
Definition: Color.cpp:95
openshot::Keyframe::GetLength
int64_t GetLength() const
Definition: KeyFrame.cpp:503
openshot::Keyframe::GetInt
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:286
openshot::ANCHOR_CANVAS
@ ANCHOR_CANVAS
Anchor the clip to the canvas.
Definition: Enums.h:63
openshot::Frame::AddAudio
void AddAudio(bool replaceSamples, int destChannel, int destStartSample, const float *source, int numSamples, float gainToApplyToSource)
Add audio samples to a specific channel.
Definition: Frame.cpp:874
openshot::Clip::perspective_c4_x
openshot::Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:244
openshot::ReaderClosed
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:337
openshot::ReaderInfo::channel_layout
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:84
openshot::Clip::perspective_c1_y
openshot::Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:239
openshot::Clip::channel_filter
openshot::Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:248
openshot::ClipBase::Id
void Id(std::string value)
Set basic properties.
Definition: ClipBase.h:84
openshot::Frame::GetImage
std::shared_ptr< QImage > GetImage()
Get pointer to Qt QImage image object.
Definition: Frame.cpp:913
openshot::GRAVITY_LEFT
@ GRAVITY_LEFT
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:43
openshot::Keyframe::GetCount
int64_t GetCount() const
Get the number of points (i.e. # of points)
Definition: KeyFrame.cpp:510
openshot::AudioResampler::GetResampledBuffer
juce::AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
Definition: AudioResampler.cpp:123
openshot::ReaderInfo::fps
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:70
openshot::Clip::Clip
Clip()
Default Constructor.
Definition: Clip.cpp:134
openshot::ReaderBase
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:97
openshot::ClipBase::previous_properties
std::string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:56
openshot::Clip::scale
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
Definition: Clip.h:144
openshot::Frame::ChannelsLayout
openshot::ChannelLayout ChannelsLayout()
Definition: Frame.cpp:571
openshot::VOLUME_MIX_AVERAGE
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
Definition: Enums.h:80
openshot::ReaderBase::Close
virtual void Close()=0
Close the reader (and any resources it was consuming)
openshot::AnchorType
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:61
openshot::ScaleType
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:52
openshot::Clip::has_audio
openshot::Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:252
openshot::Clip::rotation
openshot::Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:219
openshot::SCALE_NONE
@ SCALE_NONE
Do not scale the clip.
Definition: Enums.h:57
openshot::TextReader
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:85
openshot::GRAVITY_CENTER
@ GRAVITY_CENTER
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:44
openshot::Clip::SetJson
void SetJson(const std::string value)
Load JSON string into this object.
Definition: Clip.cpp:798
openshot::Frame::number
int64_t number
This is the frame number (starting at 1)
Definition: Frame.h:129
openshot::Clip::JsonValue
Json::Value JsonValue() const override
Generate Json::Value for this object.
Definition: Clip.cpp:743
openshot::Color::red
openshot::Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:48
openshot::SCALE_STRETCH
@ SCALE_STRETCH
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:56
openshot::Clip::perspective_c3_x
openshot::Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:242
openshot::VOLUME_MIX_NONE
@ VOLUME_MIX_NONE
Do not apply any volume mixing adjustments. Just add the samples together.
Definition: Enums.h:79
openshot::ChunkVersion
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low,...
Definition: ChunkReader.h:74
openshot::Frame::GetAudioSamplesCount
int GetAudioSamplesCount()
Get number of audio samples.
Definition: Frame.cpp:442
openshot::ReaderInfo::channels
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:83
openshot::VolumeMixType
VolumeMixType
This enumeration determines the strategy when mixing audio with other clips.
Definition: Enums.h:77
openshot::Clip::wave_color
openshot::Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:226
openshot::Clip::shear_y
openshot::Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:237
openshot::Clip::RemoveEffect
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:1001
openshot::Color::blue
openshot::Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:50
openshot::AudioResampler::SetBuffer
void SetBuffer(juce::AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
Definition: AudioResampler.cpp:77
openshot::Clip::mixing
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
Definition: Clip.h:147
openshot::Clip::shear_x
openshot::Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:236
openshot::Keyframe::GetValue
double GetValue(int64_t index) const
Get the value at a specific index.
Definition: KeyFrame.cpp:262
openshot::Clip::location_x
openshot::Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1)
Definition: Clip.h:214
openshot::FrameDisplayType
FrameDisplayType
This enumeration determines the display format of the clip's frame number (if any)....
Definition: Enums.h:68