OpenShot Library | libopenshot  0.1.8
Clip.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Clip class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Clip.h"
29 
30 using namespace openshot;
31 
32 // Init default settings for a clip
33 void Clip::init_settings()
34 {
35  // Init clip settings
36  Position(0.0);
37  Layer(0);
38  Start(0.0);
39  End(0.0);
41  scale = SCALE_FIT;
44  waveform = false;
46 
47  // Init scale curves
48  scale_x = Keyframe(1.0);
49  scale_y = Keyframe(1.0);
50 
51  // Init location curves
52  location_x = Keyframe(0.0);
53  location_y = Keyframe(0.0);
54 
55  // Init alpha & rotation
56  alpha = Keyframe(1.0);
57  rotation = Keyframe(0.0);
58 
59  // Init time & volume
60  time = Keyframe(1.0);
61  volume = Keyframe(1.0);
62 
63  // Init audio waveform color
64  wave_color = Color((unsigned char)0, (unsigned char)123, (unsigned char)255, (unsigned char)255);
65 
66  // Init crop settings
68  crop_width = Keyframe(-1.0);
69  crop_height = Keyframe(-1.0);
70  crop_x = Keyframe(0.0);
71  crop_y = Keyframe(0.0);
72 
73  // Init shear and perspective curves
74  shear_x = Keyframe(0.0);
75  shear_y = Keyframe(0.0);
76  perspective_c1_x = Keyframe(-1.0);
77  perspective_c1_y = Keyframe(-1.0);
78  perspective_c2_x = Keyframe(-1.0);
79  perspective_c2_y = Keyframe(-1.0);
80  perspective_c3_x = Keyframe(-1.0);
81  perspective_c3_y = Keyframe(-1.0);
82  perspective_c4_x = Keyframe(-1.0);
83  perspective_c4_y = Keyframe(-1.0);
84 
85  // Init audio channel filter and mappings
86  channel_filter = Keyframe(-1.0);
87  channel_mapping = Keyframe(-1.0);
88 
89  // Init audio and video overrides
90  has_audio = Keyframe(-1.0);
91  has_video = Keyframe(-1.0);
92 
93  // Default pointers
94  reader = NULL;
95  resampler = NULL;
96  audio_cache = NULL;
97  manage_reader = false;
98 }
99 
100 // Default Constructor for a clip
102 {
103  // Init all default settings
104  init_settings();
105 }
106 
107 // Constructor with reader
108 Clip::Clip(ReaderBase* new_reader)
109 {
110  // Init all default settings
111  init_settings();
112 
113  // Set the reader
114  reader = new_reader;
115 
116  // Open and Close the reader (to set the duration of the clip)
117  Open();
118  Close();
119 
120  // Update duration
121  End(reader->info.duration);
122 }
123 
124 // Constructor with filepath
125 Clip::Clip(string path)
126 {
127  // Init all default settings
128  init_settings();
129 
130  // Get file extension (and convert to lower case)
131  string ext = get_file_extension(path);
132  transform(ext.begin(), ext.end(), ext.begin(), ::tolower);
133 
134  // Determine if common video formats
135  if (ext=="avi" || ext=="mov" || ext=="mkv" || ext=="mpg" || ext=="mpeg" || ext=="mp3" || ext=="mp4" || ext=="mts" ||
136  ext=="ogg" || ext=="wav" || ext=="wmv" || ext=="webm" || ext=="vob")
137  {
138  try
139  {
140  // Open common video format
141  reader = new FFmpegReader(path);
142 
143  } catch(...) { }
144  }
145 
146  // If no video found, try each reader
147  if (!reader)
148  {
149  try
150  {
151  // Try an image reader
152  reader = new QtImageReader(path);
153 
154  } catch(...) {
155  try
156  {
157  // Try a video reader
158  reader = new FFmpegReader(path);
159 
160  } catch(...) { }
161  }
162  }
163 
164  // Update duration
165  if (reader) {
166  End(reader->info.duration);
167  manage_reader = true;
168  }
169 }
170 
171 // Destructor
173 {
174  // Delete the reader if clip created it
175  if (manage_reader && reader) {
176  delete reader;
177  reader = NULL;
178  }
179 
180  // Close the resampler
181  if (resampler) {
182  delete resampler;
183  resampler = NULL;
184  }
185 }
186 
187 /// Set the current reader
188 void Clip::Reader(ReaderBase* new_reader)
189 {
190  // set reader pointer
191  reader = new_reader;
192 }
193 
194 /// Get the current reader
196 {
197  if (reader)
198  return reader;
199  else
200  // Throw error if reader not initialized
201  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
202 }
203 
204 // Open the internal reader
206 {
207  if (reader)
208  {
209  // Open the reader
210  reader->Open();
211 
212  // Set some clip properties from the file reader
213  if (end == 0.0)
214  End(reader->info.duration);
215  }
216  else
217  // Throw error if reader not initialized
218  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
219 }
220 
221 // Close the internal reader
223 {
224  if (reader) {
225  ZmqLogger::Instance()->AppendDebugMethod("Clip::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
226 
227  // Close the reader
228  reader->Close();
229  }
230  else
231  // Throw error if reader not initialized
232  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
233 }
234 
235 // Get end position of clip (trim end of video), which can be affected by the time curve.
236 float Clip::End() throw(ReaderClosed)
237 {
238  // if a time curve is present, use it's length
239  if (time.Points.size() > 1)
240  {
241  // Determine the FPS fo this clip
242  float fps = 24.0;
243  if (reader)
244  // file reader
245  fps = reader->info.fps.ToFloat();
246  else
247  // Throw error if reader not initialized
248  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
249 
250  return float(time.GetLength()) / fps;
251  }
252  else
253  // just use the duration (as detected by the reader)
254  return end;
255 }
256 
257 // Get an openshot::Frame object for a specific frame number of this reader.
258 std::shared_ptr<Frame> Clip::GetFrame(long int requested_frame) throw(ReaderClosed)
259 {
260  if (reader)
261  {
262  // Adjust out of bounds frame number
263  requested_frame = adjust_frame_number_minimum(requested_frame);
264 
265  // Adjust has_video and has_audio overrides
266  int enabled_audio = has_audio.GetInt(requested_frame);
267  if (enabled_audio == -1 && reader && reader->info.has_audio)
268  enabled_audio = 1;
269  else if (enabled_audio == -1 && reader && !reader->info.has_audio)
270  enabled_audio = 0;
271  int enabled_video = has_video.GetInt(requested_frame);
272  if (enabled_video == -1 && reader && reader->info.has_video)
273  enabled_video = 1;
274  else if (enabled_video == -1 && reader && !reader->info.has_audio)
275  enabled_video = 0;
276 
277  // Is a time map detected
278  long int new_frame_number = requested_frame;
279  long int time_mapped_number = adjust_frame_number_minimum(time.GetLong(requested_frame));
280  if (time.Values.size() > 1)
281  new_frame_number = time_mapped_number;
282 
283  // Now that we have re-mapped what frame number is needed, go and get the frame pointer
284  std::shared_ptr<Frame> original_frame = GetOrCreateFrame(new_frame_number);
285 
286  // Create a new frame
287  std::shared_ptr<Frame> frame(new Frame(new_frame_number, 1, 1, "#000000", original_frame->GetAudioSamplesCount(), original_frame->GetAudioChannelsCount()));
288  frame->SampleRate(original_frame->SampleRate());
289  frame->ChannelsLayout(original_frame->ChannelsLayout());
290 
291  // Copy the image from the odd field
292  if (enabled_video)
293  frame->AddImage(std::shared_ptr<QImage>(new QImage(*original_frame->GetImage())));
294 
295  // Loop through each channel, add audio
296  if (enabled_audio && reader->info.has_audio)
297  for (int channel = 0; channel < original_frame->GetAudioChannelsCount(); channel++)
298  frame->AddAudio(true, channel, 0, original_frame->GetAudioSamples(channel), original_frame->GetAudioSamplesCount(), 1.0);
299 
300  // Get time mapped frame number (used to increase speed, change direction, etc...)
301  std::shared_ptr<Frame> new_frame = get_time_mapped_frame(frame, requested_frame);
302 
303  // Apply effects to the frame (if any)
304  apply_effects(new_frame);
305 
306  // Return processed 'frame'
307  return new_frame;
308  }
309  else
310  // Throw error if reader not initialized
311  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
312 }
313 
314 // Get file extension
315 string Clip::get_file_extension(string path)
316 {
317  // return last part of path
318  return path.substr(path.find_last_of(".") + 1);
319 }
320 
321 // Reverse an audio buffer
322 void Clip::reverse_buffer(juce::AudioSampleBuffer* buffer)
323 {
324  int number_of_samples = buffer->getNumSamples();
325  int channels = buffer->getNumChannels();
326 
327  // Reverse array (create new buffer to hold the reversed version)
328  AudioSampleBuffer *reversed = new juce::AudioSampleBuffer(channels, number_of_samples);
329  reversed->clear();
330 
331  for (int channel = 0; channel < channels; channel++)
332  {
333  int n=0;
334  for (int s = number_of_samples - 1; s >= 0; s--, n++)
335  reversed->getWritePointer(channel)[n] = buffer->getWritePointer(channel)[s];
336  }
337 
338  // Copy the samples back to the original array
339  buffer->clear();
340  // Loop through channels, and get audio samples
341  for (int channel = 0; channel < channels; channel++)
342  // Get the audio samples for this channel
343  buffer->addFrom(channel, 0, reversed->getReadPointer(channel), number_of_samples, 1.0f);
344 
345  delete reversed;
346  reversed = NULL;
347 }
348 
349 // Adjust the audio and image of a time mapped frame
350 std::shared_ptr<Frame> Clip::get_time_mapped_frame(std::shared_ptr<Frame> frame, long int frame_number) throw(ReaderClosed)
351 {
352  // Check for valid reader
353  if (!reader)
354  // Throw error if reader not initialized
355  throw ReaderClosed("No Reader has been initialized for this Clip. Call Reader(*reader) before calling this method.", "");
356 
357  // Check for a valid time map curve
358  if (time.Values.size() > 1)
359  {
360  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
361  std::shared_ptr<Frame> new_frame;
362 
363  // create buffer and resampler
364  juce::AudioSampleBuffer *samples = NULL;
365  if (!resampler)
366  resampler = new AudioResampler();
367 
368  // Get new frame number
369  int new_frame_number = adjust_frame_number_minimum(round(time.GetValue(frame_number)));
370 
371  // Create a new frame
372  int samples_in_frame = Frame::GetSamplesPerFrame(new_frame_number, reader->info.fps, reader->info.sample_rate, frame->GetAudioChannelsCount());
373  new_frame = std::make_shared<Frame>(new_frame_number, 1, 1, "#000000", samples_in_frame, frame->GetAudioChannelsCount());
374 
375  // Copy the image from the new frame
376  new_frame->AddImage(GetOrCreateFrame(new_frame_number)->GetImage());
377 
378 
379  // Get delta (difference in previous Y value)
380  int delta = int(round(time.GetDelta(frame_number)));
381 
382  // Init audio vars
383  int sample_rate = reader->info.sample_rate;
384  int channels = reader->info.channels;
385  int number_of_samples = GetOrCreateFrame(new_frame_number)->GetAudioSamplesCount();
386 
387  // Only resample audio if needed
388  if (reader->info.has_audio) {
389  // Determine if we are speeding up or slowing down
390  if (time.GetRepeatFraction(frame_number).den > 1) {
391  // SLOWING DOWN AUDIO
392  // Resample data, and return new buffer pointer
393  AudioSampleBuffer *resampled_buffer = NULL;
394  int resampled_buffer_size = 0;
395 
396  // SLOW DOWN audio (split audio)
397  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
398  samples->clear();
399 
400  // Loop through channels, and get audio samples
401  for (int channel = 0; channel < channels; channel++)
402  // Get the audio samples for this channel
403  samples->addFrom(channel, 0, GetOrCreateFrame(new_frame_number)->GetAudioSamples(channel),
404  number_of_samples, 1.0f);
405 
406  // Reverse the samples (if needed)
407  if (!time.IsIncreasing(frame_number))
408  reverse_buffer(samples);
409 
410  // Resample audio to be X times slower (where X is the denominator of the repeat fraction)
411  resampler->SetBuffer(samples, 1.0 / time.GetRepeatFraction(frame_number).den);
412 
413  // Resample the data (since it's the 1st slice)
414  resampled_buffer = resampler->GetResampledBuffer();
415 
416  // Get the length of the resampled buffer (if one exists)
417  resampled_buffer_size = resampled_buffer->getNumSamples();
418 
419  // Just take the samples we need for the requested frame
420  int start = (number_of_samples * (time.GetRepeatFraction(frame_number).num - 1));
421  if (start > 0)
422  start -= 1;
423  for (int channel = 0; channel < channels; channel++)
424  // Add new (slower) samples, to the frame object
425  new_frame->AddAudio(true, channel, 0, resampled_buffer->getReadPointer(channel, start),
426  number_of_samples, 1.0f);
427 
428  // Clean up
429  resampled_buffer = NULL;
430 
431  }
432  else if (abs(delta) > 1 && abs(delta) < 100) {
433  int start = 0;
434  if (delta > 0) {
435  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
436  int total_delta_samples = 0;
437  for (int delta_frame = new_frame_number - (delta - 1);
438  delta_frame <= new_frame_number; delta_frame++)
439  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
440  reader->info.sample_rate,
441  reader->info.channels);
442 
443  // Allocate a new sample buffer for these delta frames
444  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
445  samples->clear();
446 
447  // Loop through each frame in this delta
448  for (int delta_frame = new_frame_number - (delta - 1);
449  delta_frame <= new_frame_number; delta_frame++) {
450  // buffer to hold detal samples
451  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
452  AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
453  number_of_delta_samples);
454  delta_samples->clear();
455 
456  for (int channel = 0; channel < channels; channel++)
457  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
458  number_of_delta_samples, 1.0f);
459 
460  // Reverse the samples (if needed)
461  if (!time.IsIncreasing(frame_number))
462  reverse_buffer(delta_samples);
463 
464  // Copy the samples to
465  for (int channel = 0; channel < channels; channel++)
466  // Get the audio samples for this channel
467  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
468  number_of_delta_samples, 1.0f);
469 
470  // Clean up
471  delete delta_samples;
472  delta_samples = NULL;
473 
474  // Increment start position
475  start += number_of_delta_samples;
476  }
477  }
478  else {
479  // SPEED UP (multiple frames of audio), as long as it's not more than X frames
480  int total_delta_samples = 0;
481  for (int delta_frame = new_frame_number - (delta + 1);
482  delta_frame >= new_frame_number; delta_frame--)
483  total_delta_samples += Frame::GetSamplesPerFrame(delta_frame, reader->info.fps,
484  reader->info.sample_rate,
485  reader->info.channels);
486 
487  // Allocate a new sample buffer for these delta frames
488  samples = new juce::AudioSampleBuffer(channels, total_delta_samples);
489  samples->clear();
490 
491  // Loop through each frame in this delta
492  for (int delta_frame = new_frame_number - (delta + 1);
493  delta_frame >= new_frame_number; delta_frame--) {
494  // buffer to hold delta samples
495  int number_of_delta_samples = GetOrCreateFrame(delta_frame)->GetAudioSamplesCount();
496  AudioSampleBuffer *delta_samples = new juce::AudioSampleBuffer(channels,
497  number_of_delta_samples);
498  delta_samples->clear();
499 
500  for (int channel = 0; channel < channels; channel++)
501  delta_samples->addFrom(channel, 0, GetOrCreateFrame(delta_frame)->GetAudioSamples(channel),
502  number_of_delta_samples, 1.0f);
503 
504  // Reverse the samples (if needed)
505  if (!time.IsIncreasing(frame_number))
506  reverse_buffer(delta_samples);
507 
508  // Copy the samples to
509  for (int channel = 0; channel < channels; channel++)
510  // Get the audio samples for this channel
511  samples->addFrom(channel, start, delta_samples->getReadPointer(channel),
512  number_of_delta_samples, 1.0f);
513 
514  // Clean up
515  delete delta_samples;
516  delta_samples = NULL;
517 
518  // Increment start position
519  start += number_of_delta_samples;
520  }
521  }
522 
523  // Resample audio to be X times faster (where X is the delta of the repeat fraction)
524  resampler->SetBuffer(samples, float(start) / float(number_of_samples));
525 
526  // Resample data, and return new buffer pointer
527  AudioSampleBuffer *buffer = resampler->GetResampledBuffer();
528  int resampled_buffer_size = buffer->getNumSamples();
529 
530  // Add the newly resized audio samples to the current frame
531  for (int channel = 0; channel < channels; channel++)
532  // Add new (slower) samples, to the frame object
533  new_frame->AddAudio(true, channel, 0, buffer->getReadPointer(channel), number_of_samples, 1.0f);
534 
535  // Clean up
536  buffer = NULL;
537  }
538  else {
539  // Use the samples on this frame (but maybe reverse them if needed)
540  samples = new juce::AudioSampleBuffer(channels, number_of_samples);
541  samples->clear();
542 
543  // Loop through channels, and get audio samples
544  for (int channel = 0; channel < channels; channel++)
545  // Get the audio samples for this channel
546  samples->addFrom(channel, 0, frame->GetAudioSamples(channel), number_of_samples, 1.0f);
547 
548  // reverse the samples
549  if (!time.IsIncreasing(frame_number))
550  reverse_buffer(samples);
551 
552  // Add reversed samples to the frame object
553  for (int channel = 0; channel < channels; channel++)
554  new_frame->AddAudio(true, channel, 0, samples->getReadPointer(channel), number_of_samples, 1.0f);
555 
556 
557  }
558 
559  delete samples;
560  samples = NULL;
561  }
562 
563  // Return new time mapped frame
564  return new_frame;
565 
566  } else
567  // Use original frame
568  return frame;
569 }
570 
571 // Adjust frame number minimum value
572 long int Clip::adjust_frame_number_minimum(long int frame_number)
573 {
574  // Never return a frame number 0 or below
575  if (frame_number < 1)
576  return 1;
577  else
578  return frame_number;
579 
580 }
581 
582 // Get or generate a blank frame
583 std::shared_ptr<Frame> Clip::GetOrCreateFrame(long int number)
584 {
585  std::shared_ptr<Frame> new_frame;
586 
587  // Init some basic properties about this frame
588  int samples_in_frame = Frame::GetSamplesPerFrame(number, reader->info.fps, reader->info.sample_rate, reader->info.channels);
589 
590  try {
591  // Debug output
592  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
593 
594  // Determine the max size of this clips source image (based on the timeline's size, the scaling mode,
595  // and the scaling keyframes). This is a performance improvement, to keep the images as small as possible,
596  // without loosing quality. NOTE: We cannot go smaller than the timeline itself, or the add_layer timeline
597  // method will scale it back to timeline size before scaling it smaller again. This needs to be fixed in
598  // the future.
599  if (scale == SCALE_FIT || scale == SCALE_STRETCH) {
600  // Best fit or Stretch scaling (based on max timeline size * scaling keyframes)
601  float max_scale_x = scale_x.GetMaxPoint().co.Y;
602  float max_scale_y = scale_y.GetMaxPoint().co.Y;
603  reader->SetMaxSize(max(float(max_width), max_width * max_scale_x), max(float(max_height), max_height * max_scale_y));
604 
605  } else if (scale == SCALE_CROP) {
606  // Cropping scale mode (based on max timeline size * cropped size * scaling keyframes)
607  float max_scale_x = scale_x.GetMaxPoint().co.Y;
608  float max_scale_y = scale_y.GetMaxPoint().co.Y;
609  QSize width_size(max_width * max_scale_x, round(max_width / (float(reader->info.width) / float(reader->info.height))));
610  QSize height_size(round(max_height / (float(reader->info.height) / float(reader->info.width))), max_height * max_scale_y);
611 
612  // respect aspect ratio
613  if (width_size.width() >= max_width && width_size.height() >= max_height)
614  reader->SetMaxSize(max(max_width, width_size.width()), max(max_height, width_size.height()));
615  else
616  reader->SetMaxSize(max(max_width, height_size.width()), max(max_height, height_size.height()));
617 
618  } else {
619  // No scaling, use original image size (slower)
620  reader->SetMaxSize(0, 0);
621  }
622 
623  // Attempt to get a frame (but this could fail if a reader has just been closed)
624  new_frame = reader->GetFrame(number);
625 
626  // Return real frame
627  if (new_frame)
628  return new_frame;
629 
630  } catch (const ReaderClosed & e) {
631  // ...
632  } catch (const TooManySeeks & e) {
633  // ...
634  } catch (const OutOfBoundsFrame & e) {
635  // ...
636  }
637 
638  // Debug output
639  ZmqLogger::Instance()->AppendDebugMethod("Clip::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
640 
641  // Create blank frame
642  new_frame = std::make_shared<Frame>(number, reader->info.width, reader->info.height, "#000000", samples_in_frame, reader->info.channels);
643  new_frame->SampleRate(reader->info.sample_rate);
644  new_frame->ChannelsLayout(reader->info.channel_layout);
645  return new_frame;
646 }
647 
648 // Generate JSON string of this object
649 string Clip::Json() {
650 
651  // Return formatted string
652  return JsonValue().toStyledString();
653 }
654 
655 // Get all properties for a specific frame
656 string Clip::PropertiesJSON(long int requested_frame) {
657 
658  // Generate JSON properties list
659  Json::Value root;
660  root["id"] = add_property_json("ID", 0.0, "string", Id(), NULL, -1, -1, true, requested_frame);
661  root["position"] = add_property_json("Position", Position(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
662  root["layer"] = add_property_json("Track", Layer(), "int", "", NULL, 0, 20, false, requested_frame);
663  root["start"] = add_property_json("Start", Start(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
664  root["end"] = add_property_json("End", End(), "float", "", NULL, 0, 30 * 60 * 60 * 48, false, requested_frame);
665  root["duration"] = add_property_json("Duration", Duration(), "float", "", NULL, 0, 30 * 60 * 60 * 48, true, requested_frame);
666  root["gravity"] = add_property_json("Gravity", gravity, "int", "", NULL, 0, 8, false, requested_frame);
667  root["scale"] = add_property_json("Scale", scale, "int", "", NULL, 0, 3, false, requested_frame);
668  root["anchor"] = add_property_json("Anchor", anchor, "int", "", NULL, 0, 1, false, requested_frame);
669  root["display"] = add_property_json("Frame Number", display, "int", "", NULL, 0, 3, false, requested_frame);
670  root["waveform"] = add_property_json("Waveform", waveform, "int", "", NULL, 0, 1, false, requested_frame);
671 
672  // Add gravity choices (dropdown style)
673  root["gravity"]["choices"].append(add_property_choice_json("Top Left", GRAVITY_TOP_LEFT, gravity));
674  root["gravity"]["choices"].append(add_property_choice_json("Top Center", GRAVITY_TOP, gravity));
675  root["gravity"]["choices"].append(add_property_choice_json("Top Right", GRAVITY_TOP_RIGHT, gravity));
676  root["gravity"]["choices"].append(add_property_choice_json("Left", GRAVITY_LEFT, gravity));
677  root["gravity"]["choices"].append(add_property_choice_json("Center", GRAVITY_CENTER, gravity));
678  root["gravity"]["choices"].append(add_property_choice_json("Right", GRAVITY_RIGHT, gravity));
679  root["gravity"]["choices"].append(add_property_choice_json("Bottom Left", GRAVITY_BOTTOM_LEFT, gravity));
680  root["gravity"]["choices"].append(add_property_choice_json("Bottom Center", GRAVITY_BOTTOM, gravity));
681  root["gravity"]["choices"].append(add_property_choice_json("Bottom Right", GRAVITY_BOTTOM_RIGHT, gravity));
682 
683  // Add scale choices (dropdown style)
684  root["scale"]["choices"].append(add_property_choice_json("Crop", SCALE_CROP, scale));
685  root["scale"]["choices"].append(add_property_choice_json("Best Fit", SCALE_FIT, scale));
686  root["scale"]["choices"].append(add_property_choice_json("Stretch", SCALE_STRETCH, scale));
687  root["scale"]["choices"].append(add_property_choice_json("None", SCALE_NONE, scale));
688 
689  // Add anchor choices (dropdown style)
690  root["anchor"]["choices"].append(add_property_choice_json("Canvas", ANCHOR_CANVAS, anchor));
691  root["anchor"]["choices"].append(add_property_choice_json("Viewport", ANCHOR_VIEWPORT, anchor));
692 
693  // Add frame number display choices (dropdown style)
694  root["display"]["choices"].append(add_property_choice_json("None", FRAME_DISPLAY_NONE, display));
695  root["display"]["choices"].append(add_property_choice_json("Clip", FRAME_DISPLAY_CLIP, display));
696  root["display"]["choices"].append(add_property_choice_json("Timeline", FRAME_DISPLAY_TIMELINE, display));
697  root["display"]["choices"].append(add_property_choice_json("Both", FRAME_DISPLAY_BOTH, display));
698 
699  // Add waveform choices (dropdown style)
700  root["waveform"]["choices"].append(add_property_choice_json("Yes", true, waveform));
701  root["waveform"]["choices"].append(add_property_choice_json("No", false, waveform));
702 
703  // Keyframes
704  root["location_x"] = add_property_json("Location X", location_x.GetValue(requested_frame), "float", "", &location_x, -1.0, 1.0, false, requested_frame);
705  root["location_y"] = add_property_json("Location Y", location_y.GetValue(requested_frame), "float", "", &location_y, -1.0, 1.0, false, requested_frame);
706  root["scale_x"] = add_property_json("Scale X", scale_x.GetValue(requested_frame), "float", "", &scale_x, 0.0, 1.0, false, requested_frame);
707  root["scale_y"] = add_property_json("Scale Y", scale_y.GetValue(requested_frame), "float", "", &scale_y, 0.0, 1.0, false, requested_frame);
708  root["alpha"] = add_property_json("Alpha", alpha.GetValue(requested_frame), "float", "", &alpha, 0.0, 1.0, false, requested_frame);
709  root["shear_x"] = add_property_json("Shear X", shear_x.GetValue(requested_frame), "float", "", &shear_x, -1.0, 1.0, false, requested_frame);
710  root["shear_y"] = add_property_json("Shear Y", shear_y.GetValue(requested_frame), "float", "", &shear_y, -1.0, 1.0, false, requested_frame);
711  root["rotation"] = add_property_json("Rotation", rotation.GetValue(requested_frame), "float", "", &rotation, -360, 360, false, requested_frame);
712  root["volume"] = add_property_json("Volume", volume.GetValue(requested_frame), "float", "", &volume, 0.0, 1.0, false, requested_frame);
713  root["time"] = add_property_json("Time", time.GetValue(requested_frame), "float", "", &time, 0.0, 30 * 60 * 60 * 48, false, requested_frame);
714  root["channel_filter"] = add_property_json("Channel Filter", channel_filter.GetValue(requested_frame), "int", "", &channel_filter, -1, 10, false, requested_frame);
715  root["channel_mapping"] = add_property_json("Channel Mapping", channel_mapping.GetValue(requested_frame), "int", "", &channel_mapping, -1, 10, false, requested_frame);
716  root["has_audio"] = add_property_json("Enable Audio", has_audio.GetValue(requested_frame), "int", "", &has_audio, -1, 1.0, false, requested_frame);
717  root["has_video"] = add_property_json("Enable Video", has_video.GetValue(requested_frame), "int", "", &has_video, -1, 1.0, false, requested_frame);
718 
719  root["wave_color"] = add_property_json("Wave Color", 0.0, "color", "", &wave_color.red, 0, 255, false, requested_frame);
720  root["wave_color"]["red"] = add_property_json("Red", wave_color.red.GetValue(requested_frame), "float", "", &wave_color.red, 0, 255, false, requested_frame);
721  root["wave_color"]["blue"] = add_property_json("Blue", wave_color.blue.GetValue(requested_frame), "float", "", &wave_color.blue, 0, 255, false, requested_frame);
722  root["wave_color"]["green"] = add_property_json("Green", wave_color.green.GetValue(requested_frame), "float", "", &wave_color.green, 0, 255, false, requested_frame);
723 
724 
725  // Return formatted string
726  return root.toStyledString();
727 }
728 
729 // Generate Json::JsonValue for this object
730 Json::Value Clip::JsonValue() {
731 
732  // Create root json object
733  Json::Value root = ClipBase::JsonValue(); // get parent properties
734  root["gravity"] = gravity;
735  root["scale"] = scale;
736  root["anchor"] = anchor;
737  root["display"] = display;
738  root["waveform"] = waveform;
739  root["scale_x"] = scale_x.JsonValue();
740  root["scale_y"] = scale_y.JsonValue();
741  root["location_x"] = location_x.JsonValue();
742  root["location_y"] = location_y.JsonValue();
743  root["alpha"] = alpha.JsonValue();
744  root["rotation"] = rotation.JsonValue();
745  root["time"] = time.JsonValue();
746  root["volume"] = volume.JsonValue();
747  root["wave_color"] = wave_color.JsonValue();
748  root["crop_width"] = crop_width.JsonValue();
749  root["crop_height"] = crop_height.JsonValue();
750  root["crop_x"] = crop_x.JsonValue();
751  root["crop_y"] = crop_y.JsonValue();
752  root["shear_x"] = shear_x.JsonValue();
753  root["shear_y"] = shear_y.JsonValue();
754  root["channel_filter"] = channel_filter.JsonValue();
755  root["channel_mapping"] = channel_mapping.JsonValue();
756  root["has_audio"] = has_audio.JsonValue();
757  root["has_video"] = has_video.JsonValue();
758  root["perspective_c1_x"] = perspective_c1_x.JsonValue();
759  root["perspective_c1_y"] = perspective_c1_y.JsonValue();
760  root["perspective_c2_x"] = perspective_c2_x.JsonValue();
761  root["perspective_c2_y"] = perspective_c2_y.JsonValue();
762  root["perspective_c3_x"] = perspective_c3_x.JsonValue();
763  root["perspective_c3_y"] = perspective_c3_y.JsonValue();
764  root["perspective_c4_x"] = perspective_c4_x.JsonValue();
765  root["perspective_c4_y"] = perspective_c4_y.JsonValue();
766 
767  // Add array of effects
768  root["effects"] = Json::Value(Json::arrayValue);
769 
770  // loop through effects
771  list<EffectBase*>::iterator effect_itr;
772  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
773  {
774  // Get clip object from the iterator
775  EffectBase *existing_effect = (*effect_itr);
776  root["effects"].append(existing_effect->JsonValue());
777  }
778 
779  if (reader)
780  root["reader"] = reader->JsonValue();
781 
782  // return JsonValue
783  return root;
784 }
785 
786 // Load JSON string into this object
787 void Clip::SetJson(string value) throw(InvalidJSON) {
788 
789  // Parse JSON string into JSON objects
790  Json::Value root;
791  Json::Reader reader;
792  bool success = reader.parse( value, root );
793  if (!success)
794  // Raise exception
795  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
796 
797  try
798  {
799  // Set all values that match
800  SetJsonValue(root);
801  }
802  catch (exception e)
803  {
804  // Error parsing JSON (or missing keys)
805  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
806  }
807 }
808 
809 // Load Json::JsonValue into this object
810 void Clip::SetJsonValue(Json::Value root) {
811 
812  // Set parent data
814 
815  // Set data from Json (if key is found)
816  if (!root["gravity"].isNull())
817  gravity = (GravityType) root["gravity"].asInt();
818  if (!root["scale"].isNull())
819  scale = (ScaleType) root["scale"].asInt();
820  if (!root["anchor"].isNull())
821  anchor = (AnchorType) root["anchor"].asInt();
822  if (!root["display"].isNull())
823  display = (FrameDisplayType) root["display"].asInt();
824  if (!root["waveform"].isNull())
825  waveform = root["waveform"].asBool();
826  if (!root["scale_x"].isNull())
827  scale_x.SetJsonValue(root["scale_x"]);
828  if (!root["scale_y"].isNull())
829  scale_y.SetJsonValue(root["scale_y"]);
830  if (!root["location_x"].isNull())
831  location_x.SetJsonValue(root["location_x"]);
832  if (!root["location_y"].isNull())
833  location_y.SetJsonValue(root["location_y"]);
834  if (!root["alpha"].isNull())
835  alpha.SetJsonValue(root["alpha"]);
836  if (!root["rotation"].isNull())
837  rotation.SetJsonValue(root["rotation"]);
838  if (!root["time"].isNull())
839  time.SetJsonValue(root["time"]);
840  if (!root["volume"].isNull())
841  volume.SetJsonValue(root["volume"]);
842  if (!root["wave_color"].isNull())
843  wave_color.SetJsonValue(root["wave_color"]);
844  if (!root["crop_width"].isNull())
845  crop_width.SetJsonValue(root["crop_width"]);
846  if (!root["crop_height"].isNull())
847  crop_height.SetJsonValue(root["crop_height"]);
848  if (!root["crop_x"].isNull())
849  crop_x.SetJsonValue(root["crop_x"]);
850  if (!root["crop_y"].isNull())
851  crop_y.SetJsonValue(root["crop_y"]);
852  if (!root["shear_x"].isNull())
853  shear_x.SetJsonValue(root["shear_x"]);
854  if (!root["shear_y"].isNull())
855  shear_y.SetJsonValue(root["shear_y"]);
856  if (!root["channel_filter"].isNull())
857  channel_filter.SetJsonValue(root["channel_filter"]);
858  if (!root["channel_mapping"].isNull())
859  channel_mapping.SetJsonValue(root["channel_mapping"]);
860  if (!root["has_audio"].isNull())
861  has_audio.SetJsonValue(root["has_audio"]);
862  if (!root["has_video"].isNull())
863  has_video.SetJsonValue(root["has_video"]);
864  if (!root["perspective_c1_x"].isNull())
865  perspective_c1_x.SetJsonValue(root["perspective_c1_x"]);
866  if (!root["perspective_c1_y"].isNull())
867  perspective_c1_y.SetJsonValue(root["perspective_c1_y"]);
868  if (!root["perspective_c2_x"].isNull())
869  perspective_c2_x.SetJsonValue(root["perspective_c2_x"]);
870  if (!root["perspective_c2_y"].isNull())
871  perspective_c2_y.SetJsonValue(root["perspective_c2_y"]);
872  if (!root["perspective_c3_x"].isNull())
873  perspective_c3_x.SetJsonValue(root["perspective_c3_x"]);
874  if (!root["perspective_c3_y"].isNull())
875  perspective_c3_y.SetJsonValue(root["perspective_c3_y"]);
876  if (!root["perspective_c4_x"].isNull())
877  perspective_c4_x.SetJsonValue(root["perspective_c4_x"]);
878  if (!root["perspective_c4_y"].isNull())
879  perspective_c4_y.SetJsonValue(root["perspective_c4_y"]);
880  if (!root["effects"].isNull()) {
881 
882  // Clear existing effects
883  effects.clear();
884 
885  // loop through effects
886  for (int x = 0; x < root["effects"].size(); x++) {
887  // Get each effect
888  Json::Value existing_effect = root["effects"][x];
889 
890  // Create Effect
891  EffectBase *e = NULL;
892 
893  if (!existing_effect["type"].isNull()) {
894  // Create instance of effect
895  e = EffectInfo().CreateEffect(existing_effect["type"].asString());
896 
897  // Load Json into Effect
898  e->SetJsonValue(existing_effect);
899 
900  // Add Effect to Timeline
901  AddEffect(e);
902  }
903  }
904  }
905  if (!root["reader"].isNull()) // does Json contain a reader?
906  {
907  if (!root["reader"]["type"].isNull()) // does the reader Json contain a 'type'?
908  {
909  // Close previous reader (if any)
910  bool already_open = false;
911  if (reader)
912  {
913  // Track if reader was open
914  already_open = reader->IsOpen();
915 
916  // Close and delete existing reader (if any)
917  reader->Close();
918  delete reader;
919  reader = NULL;
920  }
921 
922  // Create new reader (and load properties)
923  string type = root["reader"]["type"].asString();
924 
925  if (type == "FFmpegReader") {
926 
927  // Create new reader
928  reader = new FFmpegReader(root["reader"]["path"].asString(), false);
929  reader->SetJsonValue(root["reader"]);
930 
931  } else if (type == "QtImageReader") {
932 
933  // Create new reader
934  reader = new QtImageReader(root["reader"]["path"].asString(), false);
935  reader->SetJsonValue(root["reader"]);
936 
937 #ifdef USE_IMAGEMAGICK
938  } else if (type == "ImageReader") {
939 
940  // Create new reader
941  reader = new ImageReader(root["reader"]["path"].asString(), false);
942  reader->SetJsonValue(root["reader"]);
943 
944  } else if (type == "TextReader") {
945 
946  // Create new reader
947  reader = new TextReader();
948  reader->SetJsonValue(root["reader"]);
949 #endif
950 
951  } else if (type == "ChunkReader") {
952 
953  // Create new reader
954  reader = new ChunkReader(root["reader"]["path"].asString(), (ChunkVersion) root["reader"]["chunk_version"].asInt());
955  reader->SetJsonValue(root["reader"]);
956 
957  } else if (type == "DummyReader") {
958 
959  // Create new reader
960  reader = new DummyReader();
961  reader->SetJsonValue(root["reader"]);
962  }
963 
964  // mark as managed reader
965  if (reader)
966  manage_reader = true;
967 
968  // Re-Open reader (if needed)
969  if (already_open)
970  reader->Open();
971 
972  }
973  }
974 }
975 
976 // Sort effects by order
977 void Clip::sort_effects()
978 {
979  // sort clips
980  effects.sort(CompareClipEffects());
981 }
982 
983 // Add an effect to the clip
985 {
986  // Add effect to list
987  effects.push_back(effect);
988 
989  // Sort effects
990  sort_effects();
991 }
992 
993 // Remove an effect from the clip
995 {
996  effects.remove(effect);
997 }
998 
999 // Apply effects to the source frame (if any)
1000 std::shared_ptr<Frame> Clip::apply_effects(std::shared_ptr<Frame> frame)
1001 {
1002  // Find Effects at this position and layer
1003  list<EffectBase*>::iterator effect_itr;
1004  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1005  {
1006  // Get clip object from the iterator
1007  EffectBase *effect = (*effect_itr);
1008 
1009  // Apply the effect to this frame
1010  frame = effect->GetFrame(frame, frame->number);
1011 
1012  } // end effect loop
1013 
1014  // Return modified frame
1015  return frame;
1016 }
vector< Coordinate > Values
Vector of all Values (i.e. the processed coordinates from the curve)
Definition: KeyFrame.h:93
int max_height
The maximium image height needed by this clip (used for optimizations)
Definition: ClipBase.h:62
This class reads a special chunk-formatted file, which can be easily shared in a distributed environm...
Definition: ChunkReader.h:104
Keyframe perspective_c3_x
Curves representing X for coordinate 3.
Definition: Clip.h:249
Display the timeline&#39;s frame number.
Definition: Enums.h:69
Point GetMaxPoint()
Get max point (by Y coordinate)
Definition: KeyFrame.cpp:207
void SetBuffer(AudioSampleBuffer *new_buffer, double sample_rate, double new_sample_rate)
Sets the audio buffer and key settings.
void Close()
Close the internal reader.
Definition: Clip.cpp:222
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:220
Keyframe perspective_c4_x
Curves representing X for coordinate 4.
Definition: Clip.h:251
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
Keyframe perspective_c1_x
Curves representing X for coordinate 1.
Definition: Clip.h:245
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe perspective_c2_x
Curves representing X for coordinate 2.
Definition: Clip.h:247
Keyframe crop_x
Curve representing X offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:239
string previous_properties
This string contains the previous JSON properties.
Definition: ClipBase.h:60
string PropertiesJSON(long int requested_frame)
Definition: Clip.cpp:656
float End()
Override End() method.
Definition: Clip.cpp:236
Json::Value add_property_json(string name, float value, string type, string memo, Keyframe *keyframe, float min_value, float max_value, bool readonly, long int requested_frame)
Generate JSON for a property.
Definition: ClipBase.cpp:65
Keyframe perspective_c3_y
Curves representing Y for coordinate 3.
Definition: Clip.h:250
Align clip to the bottom right of its parent.
Definition: Enums.h:45
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, long int frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:321
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it&#39;s parent.
Definition: Clip.h:151
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:230
int max_width
The maximum image width needed by this clip (used for optimizations)
Definition: ClipBase.h:61
Do not scale the clip.
Definition: Enums.h:54
This class represents a single frame of video (i.e. image & audio data)
Definition: Frame.h:115
This class is used as a simple, dummy reader, which always returns a blank frame. ...
Definition: DummyReader.h:53
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
double GetValue(long int index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
Keyframe time
Curve representing the frames over time to play (used for speed and direction of video) ...
Definition: Clip.h:229
ScaleType
This enumeration determines how clips are scaled to fit their parent container.
Definition: Enums.h:49
void AddEffect(EffectBase *effect)
Add an effect to the clip.
Definition: Clip.cpp:984
virtual void Close()=0
Close the reader (and any resources it was consuming)
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:95
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:84
Keyframe has_audio
Override has_video and has_audio properties of clip (and their readers)
Definition: Clip.h:259
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
~Clip()
Destructor.
Definition: Clip.cpp:172
Keyframe has_video
An optional override to determine if this clip has video (-1=undefined, 0=no, 1=yes) ...
Definition: Clip.h:260
Do not display the frame number.
Definition: Enums.h:67
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:233
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:69
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:810
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:362
Keyframe crop_width
Curve representing width in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:237
This class uses the ImageMagick++ libraries, to open image files, and return openshot::Frame objects ...
Definition: ImageReader.h:67
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:221
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:222
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ReaderBase.h:143
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
This class uses the FFmpeg libraries, to open video files and audio files, and return openshot::Frame...
Definition: FFmpegReader.h:92
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ClipBase.cpp:49
double Y
The Y value of the coordinate (usually representing the value of the property being animated) ...
Definition: Coordinate.h:62
Keyframe perspective_c1_y
Curves representing Y for coordinate 1.
Definition: Clip.h:246
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
Keyframe crop_y
Curve representing Y offset in percent (-1.0=-100%, 0.0=0%, 1.0=100%)
Definition: Clip.h:240
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:243
ScaleType scale
The scale determines how a clip should be resized to fit it&#39;s parent.
Definition: Clip.h:152
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
Align clip to the top left of its parent.
Definition: Enums.h:37
Json::Value add_property_choice_json(string name, int value, int selected_value)
Generate JSON choice for a property (dropdown properties)
Definition: ClipBase.cpp:101
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
string Id()
Get basic properties.
Definition: ClipBase.h:82
FrameDisplayType
This enumeration determines the display format of the clip&#39;s frame number (if any). Useful for debugging.
Definition: Enums.h:65
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:255
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:83
bool IsIncreasing(int index)
Get the direction of the curve at a specific index (increasing or decreasing)
Definition: KeyFrame.cpp:292
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:154
void SetJson(string value)
Load JSON string into this object.
Definition: Clip.cpp:787
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:256
float start
The position in seconds to start playing (used to trim the beginning of a clip)
Definition: ClipBase.h:58
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:106
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:109
ChunkVersion
This enumeration allows the user to choose which version of the chunk they would like (low...
Definition: ChunkReader.h:75
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:226
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:155
std::shared_ptr< Frame > GetFrame(long int requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:258
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
Display the clip&#39;s internal frame number.
Definition: Enums.h:68
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:244
Clip()
Default Constructor.
Definition: Clip.cpp:101
Anchor the clip to the viewport (which can be moved / animated around the canvas) ...
Definition: Enums.h:61
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
AnchorType
This enumeration determines what parent a clip should be aligned to.
Definition: Enums.h:58
float end
The position in seconds to end playing (used to trim the ending of a clip)
Definition: ClipBase.h:59
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
void Open()
Open the internal reader.
Definition: Clip.cpp:205
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
int GetInt(long int index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
GravityType crop_gravity
Cropping needs to have a gravity to determine what side we are cropping.
Definition: Clip.h:236
double GetDelta(long int index)
Get the change in Y value (from the previous Y value)
Definition: KeyFrame.cpp:410
Display both the clip&#39;s and timeline&#39;s frame number.
Definition: Enums.h:70
void RemoveEffect(EffectBase *effect)
Remove an effect from the clip.
Definition: Clip.cpp:994
This namespace is the default namespace for all code in the openshot library.
Coordinate co
This is the primary coordinate.
Definition: Point.h:83
AnchorType anchor
The anchor determines what parent a clip should snap to.
Definition: Clip.h:153
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:225
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:219
Keyframe perspective_c2_y
Curves representing Y for coordinate 2.
Definition: Clip.h:248
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: Clip.h:112
AudioSampleBuffer * GetResampledBuffer()
Get the resampled audio buffer.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ClipBase.cpp:33
This class uses the ImageMagick++ libraries, to create frames with "Text", and return openshot::Frame...
Definition: TextReader.h:81
This class uses the Qt library, to open image files, and return openshot::Frame objects containing th...
Definition: QtImageReader.h:69
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
virtual std::shared_ptr< Frame > GetFrame(long int number)=0
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
Keyframe perspective_c4_y
Curves representing Y for coordinate 4.
Definition: Clip.h:252
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:505
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:730
long int GetLong(long int index)
Get the rounded LONG value at a specific index.
Definition: KeyFrame.cpp:270
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:87
long int GetLength()
Definition: KeyFrame.cpp:442
GravityType
This enumeration determines how clips are aligned to their parent container.
Definition: Enums.h:35
Anchor the clip to the canvas.
Definition: Enums.h:60
string Json()
Get and Set JSON methods.
Definition: Clip.cpp:649
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:85
virtual void Open()=0
Open the reader (and start consuming resources, such as images or video files)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254
Fraction GetRepeatFraction(long int index)
Get the fraction that represents how many times this value is repeated in the curve.
Definition: KeyFrame.cpp:388
ReaderBase * Reader()
Get the current reader.
Definition: Clip.cpp:195
virtual bool IsOpen()=0
Determine if reader is open or closed.
This class is used to resample audio data for many sequential frames.
Keyframe crop_height
Curve representing height in percent (0.0=0%, 1.0=100%)
Definition: Clip.h:238