OpenShot Library | libopenshot  0.1.8
Timeline.cpp
Go to the documentation of this file.
1 /**
2  * @file
3  * @brief Source file for Timeline class
4  * @author Jonathan Thomas <jonathan@openshot.org>
5  *
6  * @section LICENSE
7  *
8  * Copyright (c) 2008-2014 OpenShot Studios, LLC
9  * <http://www.openshotstudios.com/>. This file is part of
10  * OpenShot Library (libopenshot), an open-source project dedicated to
11  * delivering high quality video editing and animation solutions to the
12  * world. For more information visit <http://www.openshot.org/>.
13  *
14  * OpenShot Library (libopenshot) is free software: you can redistribute it
15  * and/or modify it under the terms of the GNU Lesser General Public License
16  * as published by the Free Software Foundation, either version 3 of the
17  * License, or (at your option) any later version.
18  *
19  * OpenShot Library (libopenshot) is distributed in the hope that it will be
20  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU Lesser General Public License for more details.
23  *
24  * You should have received a copy of the GNU Lesser General Public License
25  * along with OpenShot Library. If not, see <http://www.gnu.org/licenses/>.
26  */
27 
28 #include "../include/Timeline.h"
29 
30 using namespace openshot;
31 
32 // Default Constructor for the timeline (which sets the canvas width and height)
33 Timeline::Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout) :
34  is_open(false), auto_map_clips(true)
35 {
36  // Create CrashHandler and Attach (incase of errors)
38 
39  // Init viewport size (curve based, because it can be animated)
40  viewport_scale = Keyframe(100.0);
41  viewport_x = Keyframe(0.0);
42  viewport_y = Keyframe(0.0);
43 
44  // Init background color
45  color.red = Keyframe(0.0);
46  color.green = Keyframe(0.0);
47  color.blue = Keyframe(0.0);
48 
49  // Init FileInfo struct (clear all values)
50  info.width = width;
51  info.height = height;
52  info.fps = fps;
53  info.sample_rate = sample_rate;
54  info.channels = channels;
55  info.channel_layout = channel_layout;
57  info.duration = 60 * 30; // 30 minute default duration
58  info.has_audio = true;
59  info.has_video = true;
61 
62  // Init max image size
64 
65  // Init cache
66  final_cache = new CacheMemory();
68 }
69 
70 // Add an openshot::Clip to the timeline
72 {
73  // All clips should be converted to the frame rate of this timeline
74  if (auto_map_clips)
75  // Apply framemapper (or update existing framemapper)
76  apply_mapper_to_clip(clip);
77 
78  // Add clip to list
79  clips.push_back(clip);
80 
81  // Sort clips
82  sort_clips();
83 }
84 
85 // Add an effect to the timeline
87 {
88  // Add effect to list
89  effects.push_back(effect);
90 
91  // Sort effects
92  sort_effects();
93 }
94 
95 // Remove an effect from the timeline
97 {
98  effects.remove(effect);
99 }
100 
101 // Remove an openshot::Clip to the timeline
103 {
104  clips.remove(clip);
105 }
106 
107 // Apply a FrameMapper to a clip which matches the settings of this timeline
108 void Timeline::apply_mapper_to_clip(Clip* clip)
109 {
110  // Get lock (prevent getting frames while this happens)
111  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
112 
113  // Determine type of reader
114  ReaderBase* clip_reader = NULL;
115  if (clip->Reader()->Name() == "FrameMapper")
116  {
117  // Get the existing reader
118  clip_reader = (ReaderBase*) clip->Reader();
119 
120  } else {
121 
122  // Create a new FrameMapper to wrap the current reader
124  }
125 
126  // Update the mapping
127  FrameMapper* clip_mapped_reader = (FrameMapper*) clip_reader;
129 
130  // Update timeline offset
131  float time_diff = 0 - clip->Position() + clip->Start();
132  int clip_offset = -round(time_diff * info.fps.ToFloat());
133 
134  if (clip_offset != 0)
135  // Reduce negative offset by 1 (since we want to avoid frame 0)
136  clip_offset += 1;
137 
138  clip_mapped_reader->SetTimelineFrameOffset(clip_offset);
139 
140  // Update clip reader
141  clip->Reader(clip_reader);
142 }
143 
144 // Apply the timeline's framerate and samplerate to all clips
146 {
147  // Clear all cached frames
148  ClearAllCache();
149 
150  // Loop through all clips
151  list<Clip*>::iterator clip_itr;
152  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
153  {
154  // Get clip object from the iterator
155  Clip *clip = (*clip_itr);
156 
157  // Apply framemapper (or update existing framemapper)
158  apply_mapper_to_clip(clip);
159  }
160 }
161 
162 // Calculate time of a frame number, based on a framerate
163 double Timeline::calculate_time(long int number, Fraction rate)
164 {
165  // Get float version of fps fraction
166  double raw_fps = rate.ToFloat();
167 
168  // Return the time (in seconds) of this frame
169  return double(number - 1) / raw_fps;
170 }
171 
172 // Apply effects to the source frame (if any)
173 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame, long int timeline_frame_number, int layer)
174 {
175  // Debug output
176  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects", "frame->number", frame->number, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1, "", -1);
177 
178  // Find Effects at this position and layer
179  list<EffectBase*>::iterator effect_itr;
180  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
181  {
182  // Get effect object from the iterator
183  EffectBase *effect = (*effect_itr);
184 
185  // Does clip intersect the current requested time
186  long effect_start_position = round(effect->Position() * info.fps.ToDouble()) + 1;
187  long effect_end_position = round((effect->Position() + (effect->Duration())) * info.fps.ToDouble()) + 1;
188 
189  bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
190 
191  // Debug output
192  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Does effect intersect)", "effect->Position()", effect->Position(), "does_effect_intersect", does_effect_intersect, "timeline_frame_number", timeline_frame_number, "layer", layer, "", -1, "", -1);
193 
194  // Clip is visible
195  if (does_effect_intersect)
196  {
197  // Determine the frame needed for this clip (based on the position on the timeline)
198  long effect_start_frame = (effect->Start() * info.fps.ToDouble()) + 1;
199  long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
200 
201  // Debug output
202  ZmqLogger::Instance()->AppendDebugMethod("Timeline::apply_effects (Process Effect)", "effect_frame_number", effect_frame_number, "does_effect_intersect", does_effect_intersect, "", -1, "", -1, "", -1, "", -1);
203 
204  // Apply the effect to this frame
205  frame = effect->GetFrame(frame, effect_frame_number);
206  }
207 
208  } // end effect loop
209 
210  // Return modified frame
211  return frame;
212 }
213 
214 // Get or generate a blank frame
215 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(Clip* clip, long int number)
216 {
217  std::shared_ptr<Frame> new_frame;
218 
219  // Init some basic properties about this frame
220  int samples_in_frame = Frame::GetSamplesPerFrame(number, info.fps, info.sample_rate, info.channels);
221 
222  try {
223  // Debug output
224  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (from reader)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
225 
226  // Set max image size (used for performance optimization)
227  clip->SetMaxSize(info.width, info.height);
228 
229  // Attempt to get a frame (but this could fail if a reader has just been closed)
230  new_frame = std::shared_ptr<Frame>(clip->GetFrame(number));
231 
232  // Return real frame
233  return new_frame;
234 
235  } catch (const ReaderClosed & e) {
236  // ...
237  } catch (const TooManySeeks & e) {
238  // ...
239  } catch (const OutOfBoundsFrame & e) {
240  // ...
241  }
242 
243  // Debug output
244  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetOrCreateFrame (create blank)", "number", number, "samples_in_frame", samples_in_frame, "", -1, "", -1, "", -1, "", -1);
245 
246  // Create blank frame
247  new_frame = std::make_shared<Frame>(number, max_width, max_height, "#000000", samples_in_frame, info.channels);
248  new_frame->SampleRate(info.sample_rate);
249  new_frame->ChannelsLayout(info.channel_layout);
250  return new_frame;
251 }
252 
253 // Process a new layer of video or audio
254 void Timeline::add_layer(std::shared_ptr<Frame> new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip)
255 {
256  // Get the clip's frame & image
257  std::shared_ptr<Frame> source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
258 
259  // No frame found... so bail
260  if (!source_frame)
261  return;
262 
263  // Debug output
264  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
265 
266  /* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
267  if (source_clip->Waveform())
268  {
269  // Debug output
270  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
271 
272  // Get the color of the waveform
273  int red = source_clip->wave_color.red.GetInt(clip_frame_number);
274  int green = source_clip->wave_color.green.GetInt(clip_frame_number);
275  int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
276  int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
277 
278  // Generate Waveform Dynamically (the size of the timeline)
279  std::shared_ptr<QImage> source_image = source_frame->GetWaveform(max_width, max_height, red, green, blue, alpha);
280  source_frame->AddImage(std::shared_ptr<QImage>(source_image));
281  }
282 
283  /* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
284  * effects on the top clip. */
285  if (is_top_clip && source_frame)
286  source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
287 
288  // Declare an image to hold the source frame's image
289  std::shared_ptr<QImage> source_image;
290 
291  /* COPY AUDIO - with correct volume */
292  if (source_clip->Reader()->info.has_audio) {
293 
294  // Debug output
295  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
296 
297  if (source_frame->GetAudioChannelsCount() == info.channels)
298  for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
299  {
300  float initial_volume = 1.0f;
301  float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1); // previous frame's percentage of volume (0 to 1)
302  float volume = source_clip->volume.GetValue(clip_frame_number); // percentage of volume (0 to 1)
303  int channel_filter = source_clip->channel_filter.GetInt(clip_frame_number); // optional channel to filter (if not -1)
304  int channel_mapping = source_clip->channel_mapping.GetInt(clip_frame_number); // optional channel to map this channel to (if not -1)
305 
306  // If channel filter enabled, check for correct channel (and skip non-matching channels)
307  if (channel_filter != -1 && channel_filter != channel)
308  continue; // skip to next channel
309 
310  // If channel mapping disabled, just use the current channel
311  if (channel_mapping == -1)
312  channel_mapping = channel;
313 
314  // If no ramp needed, set initial volume = clip's volume
315  if (isEqual(previous_volume, volume))
316  initial_volume = volume;
317 
318  // Apply ramp to source frame (if needed)
319  if (!isEqual(previous_volume, volume))
320  source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
321 
322  // TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
323  // Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
324  // number of samples returned is variable... and does not match the number expected.
325  // This is a crude solution at best. =)
326  if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
327  // Force timeline frame to match the source frame
328  new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
329 
330  // Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
331  // be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
332  new_frame->AddAudio(false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);
333 
334  }
335  else
336  // Debug output
337  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
338 
339  }
340 
341  // Skip out if only an audio frame
342  if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
343  // Skip the rest of the image processing for performance reasons
344  return;
345 
346  // Debug output
347  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
348 
349  // Get actual frame image data
350  source_image = source_frame->GetImage();
351 
352  // Get some basic image properties
353  int source_width = source_image->width();
354  int source_height = source_image->height();
355 
356  /* ALPHA & OPACITY */
357  if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
358  {
359  float alpha = source_clip->alpha.GetValue(clip_frame_number);
360 
361  // Get source image's pixels
362  unsigned char *pixels = (unsigned char *) source_image->bits();
363 
364  // Loop through pixels
365  for (int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
366  {
367  // Get the alpha values from the pixel
368  int A = pixels[byte_index + 3];
369 
370  // Apply alpha to pixel
371  pixels[byte_index + 3] *= alpha;
372  }
373 
374  // Debug output
375  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Set Alpha & Opacity)", "alpha", alpha, "source_frame->number", source_frame->number, "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
376  }
377 
378  /* RESIZE SOURCE IMAGE - based on scale type */
379  switch (source_clip->scale)
380  {
381  case (SCALE_FIT):
382  // keep aspect ratio
383  source_image = std::shared_ptr<QImage>(new QImage(source_image->scaled(max_width, max_height, Qt::KeepAspectRatio, Qt::SmoothTransformation)));
384  source_width = source_image->width();
385  source_height = source_image->height();
386 
387  // Debug output
388  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_FIT)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
389  break;
390 
391  case (SCALE_STRETCH):
392  // ignore aspect ratio
393  source_image = std::shared_ptr<QImage>(new QImage(source_image->scaled(max_width, max_height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation)));
394  source_width = source_image->width();
395  source_height = source_image->height();
396 
397  // Debug output
398  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_STRETCH)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
399  break;
400 
401  case (SCALE_CROP):
402  QSize width_size(max_width, round(max_width / (float(source_width) / float(source_height))));
403  QSize height_size(round(max_height / (float(source_height) / float(source_width))), max_height);
404 
405  // respect aspect ratio
406  if (width_size.width() >= max_width && width_size.height() >= max_height)
407  source_image = std::shared_ptr<QImage>(new QImage(source_image->scaled(width_size.width(), width_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)));
408  else
409  source_image = std::shared_ptr<QImage>(new QImage(source_image->scaled(height_size.width(), height_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation))); // height is larger, so resize to it
410  source_width = source_image->width();
411  source_height = source_image->height();
412 
413  // Debug output
414  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Scale: SCALE_CROP)", "source_frame->number", source_frame->number, "source_width", source_width, "source_height", source_height, "", -1, "", -1, "", -1);
415  break;
416  }
417 
418  /* GRAVITY LOCATION - Initialize X & Y to the correct values (before applying location curves) */
419  float x = 0.0; // left
420  float y = 0.0; // top
421 
422  // Adjust size for scale x and scale y
423  float sx = source_clip->scale_x.GetValue(clip_frame_number); // percentage X scale
424  float sy = source_clip->scale_y.GetValue(clip_frame_number); // percentage Y scale
425  float scaled_source_width = source_width * sx;
426  float scaled_source_height = source_height * sy;
427 
428  switch (source_clip->gravity)
429  {
430  case (GRAVITY_TOP):
431  x = (max_width - scaled_source_width) / 2.0; // center
432  break;
433  case (GRAVITY_TOP_RIGHT):
434  x = max_width - scaled_source_width; // right
435  break;
436  case (GRAVITY_LEFT):
437  y = (max_height - scaled_source_height) / 2.0; // center
438  break;
439  case (GRAVITY_CENTER):
440  x = (max_width - scaled_source_width) / 2.0; // center
441  y = (max_height - scaled_source_height) / 2.0; // center
442  break;
443  case (GRAVITY_RIGHT):
444  x = max_width - scaled_source_width; // right
445  y = (max_height - scaled_source_height) / 2.0; // center
446  break;
447  case (GRAVITY_BOTTOM_LEFT):
448  y = (max_height - scaled_source_height); // bottom
449  break;
450  case (GRAVITY_BOTTOM):
451  x = (max_width - scaled_source_width) / 2.0; // center
452  y = (max_height - scaled_source_height); // bottom
453  break;
454  case (GRAVITY_BOTTOM_RIGHT):
455  x = max_width - scaled_source_width; // right
456  y = (max_height - scaled_source_height); // bottom
457  break;
458  }
459 
460  // Debug output
461  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Gravity)", "source_frame->number", source_frame->number, "source_clip->gravity", source_clip->gravity, "info.width", info.width, "source_width", source_width, "info.height", info.height, "source_height", source_height);
462 
463  /* LOCATION, ROTATION, AND SCALE */
464  float r = source_clip->rotation.GetValue(clip_frame_number); // rotate in degrees
465  x += (max_width * source_clip->location_x.GetValue(clip_frame_number)); // move in percentage of final width
466  y += (max_height * source_clip->location_y.GetValue(clip_frame_number)); // move in percentage of final height
467  bool is_x_animated = source_clip->location_x.Points.size() > 1;
468  bool is_y_animated = source_clip->location_y.Points.size() > 1;
469  float shear_x = source_clip->shear_x.GetValue(clip_frame_number);
470  float shear_y = source_clip->shear_y.GetValue(clip_frame_number);
471 
472  int offset_x = -1;
473  int offset_y = -1;
474  bool transformed = false;
475  QTransform transform;
476 
477  // Transform source image (if needed)
478  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Build QTransform - if needed)", "source_frame->number", source_frame->number, "x", x, "y", y, "r", r, "sx", sx, "sy", sy);
479 
480  if (!isEqual(x, 0) || !isEqual(y, 0)) {
481  // TRANSLATE/MOVE CLIP
482  transform.translate(x, y);
483  transformed = true;
484  }
485 
486  if (!isEqual(sx, 0) || !isEqual(sy, 0)) {
487  // SCALE CLIP
488  transform.scale(sx, sy);
489  transformed = true;
490  }
491 
492  if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
493  // SHEAR HEIGHT/WIDTH
494  transform.shear(shear_x, shear_y);
495  transformed = true;
496  }
497 
498  if (!isEqual(r, 0)) {
499  // ROTATE CLIP
500  float origin_x = x + ((source_width * sx) / 2.0);
501  float origin_y = y + ((source_height * sy) / 2.0);
502  transform.translate(origin_x, origin_y);
503  transform.rotate(r);
504  transform.translate(-origin_x,-origin_y);
505  transformed = true;
506  }
507 
508  // Debug output
509  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Prepare)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
510 
511  /* COMPOSITE SOURCE IMAGE (LAYER) ONTO FINAL IMAGE */
512  std::shared_ptr<QImage> new_image = new_frame->GetImage();
513 
514  // Load timeline's new frame image into a QPainter
515  QPainter painter(new_image.get());
516  painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing, true);
517 
518  // Apply transform (translate, rotate, scale)... if any
519  if (transformed)
520  painter.setTransform(transform);
521 
522  // Composite a new layer onto the image
523  painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
524  painter.drawImage(0, 0, *source_image);
525 
526  // Draw frame #'s on top of image (if needed)
527  if (source_clip->display != FRAME_DISPLAY_NONE) {
528  stringstream frame_number_str;
529  switch (source_clip->display)
530  {
531  case (FRAME_DISPLAY_CLIP):
532  frame_number_str << clip_frame_number;
533  break;
534 
535  case (FRAME_DISPLAY_TIMELINE):
536  frame_number_str << timeline_frame_number;
537  break;
538 
539  case (FRAME_DISPLAY_BOTH):
540  frame_number_str << timeline_frame_number << " (" << clip_frame_number << ")";
541  break;
542  }
543 
544  // Draw frame number on top of image
545  painter.setPen(QColor("#ffffff"));
546  painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
547  }
548 
549  painter.end();
550 
551  // Debug output
552  ZmqLogger::Instance()->AppendDebugMethod("Timeline::add_layer (Transform: Composite Image Layer: Completed)", "source_frame->number", source_frame->number, "offset_x", offset_x, "offset_y", offset_y, "new_frame->GetImage()->width()", new_frame->GetImage()->width(), "transformed", transformed, "", -1);
553 }
554 
555 // Update the list of 'opened' clips
556 void Timeline::update_open_clips(Clip *clip, bool does_clip_intersect)
557 {
558  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (before)", "does_clip_intersect", does_clip_intersect, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1, "", -1);
559 
560  // is clip already in list?
561  bool clip_found = open_clips.count(clip);
562 
563  if (clip_found && !does_clip_intersect)
564  {
565  // Remove clip from 'opened' list, because it's closed now
566  open_clips.erase(clip);
567 
568  // Close clip
569  clip->Close();
570  }
571  else if (!clip_found && does_clip_intersect)
572  {
573  // Add clip to 'opened' list, because it's missing
574  open_clips[clip] = clip;
575 
576  // Open the clip
577  clip->Open();
578  }
579 
580  // Debug output
581  ZmqLogger::Instance()->AppendDebugMethod("Timeline::update_open_clips (after)", "does_clip_intersect", does_clip_intersect, "clip_found", clip_found, "closing_clips.size()", closing_clips.size(), "open_clips.size()", open_clips.size(), "", -1, "", -1);
582 }
583 
584 // Sort clips by position on the timeline
585 void Timeline::sort_clips()
586 {
587  // Debug output
588  ZmqLogger::Instance()->AppendDebugMethod("Timeline::SortClips", "clips.size()", clips.size(), "", -1, "", -1, "", -1, "", -1, "", -1);
589 
590  // sort clips
591  clips.sort(CompareClips());
592 }
593 
594 // Sort effects by position on the timeline
595 void Timeline::sort_effects()
596 {
597  // sort clips
598  effects.sort(CompareEffects());
599 }
600 
601 // Close the reader (and any resources it was consuming)
603 {
604  ZmqLogger::Instance()->AppendDebugMethod("Timeline::Close", "", -1, "", -1, "", -1, "", -1, "", -1, "", -1);
605 
606  // Close all open clips
607  list<Clip*>::iterator clip_itr;
608  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
609  {
610  // Get clip object from the iterator
611  Clip *clip = (*clip_itr);
612 
613  // Open or Close this clip, based on if it's intersecting or not
614  update_open_clips(clip, false);
615  }
616 
617  // Mark timeline as closed
618  is_open = false;
619 
620  // Clear cache
621  final_cache->Clear();
622 }
623 
624 // Open the reader (and start consuming resources)
626 {
627  is_open = true;
628 }
629 
630 // Compare 2 floating point numbers for equality
631 bool Timeline::isEqual(double a, double b)
632 {
633  return fabs(a - b) < 0.000001;
634 }
635 
636 // Get an openshot::Frame object for a specific frame number of this reader.
637 std::shared_ptr<Frame> Timeline::GetFrame(long int requested_frame) throw(ReaderClosed, OutOfBoundsFrame)
638 {
639  // Adjust out of bounds frame number
640  if (requested_frame < 1)
641  requested_frame = 1;
642 
643  // Check cache
644  std::shared_ptr<Frame> frame = final_cache->GetFrame(requested_frame);
645  if (frame) {
646  // Debug output
647  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
648 
649  // Return cached frame
650  return frame;
651  }
652  else
653  {
654  // Create a scoped lock, allowing only a single thread to run the following code at one time
655  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
656 
657  // Check for open reader (or throw exception)
658  if (!is_open)
659  throw ReaderClosed("The Timeline is closed. Call Open() before calling this method.", "");
660 
661  // Check cache again (due to locking)
662  frame = final_cache->GetFrame(requested_frame);
663  if (frame) {
664  // Debug output
665  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Cached frame found on 2nd look)", "requested_frame", requested_frame, "", -1, "", -1, "", -1, "", -1, "", -1);
666 
667  // Return cached frame
668  return frame;
669  }
670 
671  // Minimum number of frames to process (for performance reasons)
672  int minimum_frames = OPEN_MP_NUM_PROCESSORS;
673 
674  // Get a list of clips that intersect with the requested section of timeline
675  // This also opens the readers for intersecting clips, and marks non-intersecting clips as 'needs closing'
676  vector<Clip*> nearby_clips = find_intersecting_clips(requested_frame, minimum_frames, true);
677 
678  omp_set_num_threads(OPEN_MP_NUM_PROCESSORS);
679  // Allow nested OpenMP sections
680  omp_set_nested(true);
681 
682  // Debug output
683  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame", "requested_frame", requested_frame, "minimum_frames", minimum_frames, "OPEN_MP_NUM_PROCESSORS", OPEN_MP_NUM_PROCESSORS, "", -1, "", -1, "", -1);
684 
685  // GENERATE CACHE FOR CLIPS (IN FRAME # SEQUENCE)
686  // Determine all clip frames, and request them in order (to keep resampled audio in sequence)
687  for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
688  {
689  // Loop through clips
690  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
691  {
692  // Get clip object from the iterator
693  Clip *clip = nearby_clips[clip_index];
694  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
695  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
696 
697  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
698  if (does_clip_intersect)
699  {
700  // Get clip frame #
701  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
702  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
703  // Cache clip object
704  clip->GetFrame(clip_frame_number);
705  }
706  }
707  }
708 
709  #pragma omp parallel
710  {
711  // Loop through all requested frames
712  #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames)
713  for (long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
714  {
715  // Debug output
716  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (processing frame)", "frame_number", frame_number, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
717 
718  // Init some basic properties about this frame
719  int samples_in_frame = Frame::GetSamplesPerFrame(frame_number, info.fps, info.sample_rate, info.channels);
720 
721  // Create blank frame (which will become the requested frame)
722  std::shared_ptr<Frame> new_frame(std::make_shared<Frame>(frame_number, max_width, max_height, "#000000", samples_in_frame, info.channels));
723  new_frame->AddAudioSilence(samples_in_frame);
724  new_frame->SampleRate(info.sample_rate);
725  new_frame->ChannelsLayout(info.channel_layout);
726 
727  // Debug output
728  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Adding solid color)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
729 
730  // Add Background Color to 1st layer (if animated or not black)
731  if ((color.red.Points.size() > 1 || color.green.Points.size() > 1 || color.blue.Points.size() > 1) ||
732  (color.red.GetValue(frame_number) != 0.0 || color.green.GetValue(frame_number) != 0.0 || color.blue.GetValue(frame_number) != 0.0))
733  new_frame->AddColor(max_width, max_height, color.GetColorHex(frame_number));
734 
735  // Debug output
736  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Loop through clips)", "frame_number", frame_number, "clips.size()", clips.size(), "nearby_clips.size()", nearby_clips.size(), "", -1, "", -1, "", -1);
737 
738  // Find Clips near this time
739  for (int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
740  {
741  // Get clip object from the iterator
742  Clip *clip = nearby_clips[clip_index];
743  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
744  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
745 
746  bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
747 
748  // Debug output
749  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Does clip intersect)", "frame_number", frame_number, "clip->Position()", clip->Position(), "clip->Duration()", clip->Duration(), "does_clip_intersect", does_clip_intersect, "", -1, "", -1);
750 
751  // Clip is visible
752  if (does_clip_intersect)
753  {
754  // Determine if clip is "top" clip on this layer (only happens when multiple clips are overlapping)
755  bool is_top_clip = true;
756  for (int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
757  {
758  Clip *nearby_clip = nearby_clips[top_clip_index];
759  long nearby_clip_start_position = round(nearby_clip->Position() * info.fps.ToDouble()) + 1;
760  long nearby_clip_end_position = round((nearby_clip->Position() + nearby_clip->Duration()) * info.fps.ToDouble()) + 1;
761 
762  if (clip->Id() != nearby_clip->Id() && clip->Layer() == nearby_clip->Layer() &&
763  nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
764  nearby_clip_start_position > clip_start_position) {
765  is_top_clip = false;
766  break;
767  }
768  }
769 
770  // Determine the frame needed for this clip (based on the position on the timeline)
771  long clip_start_frame = (clip->Start() * info.fps.ToDouble()) + 1;
772  long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
773 
774  // Debug output
775  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Calculate clip's frame #)", "clip->Position()", clip->Position(), "clip->Start()", clip->Start(), "info.fps.ToFloat()", info.fps.ToFloat(), "clip_frame_number", clip_frame_number, "", -1, "", -1);
776 
777  // Add clip's frame as layer
778  add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip);
779 
780  } else
781  // Debug output
782  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (clip does not intersect)", "frame_number", frame_number, "does_clip_intersect", does_clip_intersect, "", -1, "", -1, "", -1, "", -1);
783 
784  } // end clip loop
785 
786  // Debug output
787  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (Add frame to cache)", "frame_number", frame_number, "info.width", info.width, "info.height", info.height, "", -1, "", -1, "", -1);
788 
789  // Set frame # on mapped frame
790  new_frame->SetFrameNumber(frame_number);
791 
792  // Add final frame to cache
793  final_cache->Add(new_frame);
794 
795  } // end frame loop
796  } // end parallel
797 
798  // Debug output
799  ZmqLogger::Instance()->AppendDebugMethod("Timeline::GetFrame (end parallel region)", "requested_frame", requested_frame, "omp_get_thread_num()", omp_get_thread_num(), "", -1, "", -1, "", -1, "", -1);
800 
801  // Return frame (or blank frame)
802  return final_cache->GetFrame(requested_frame);
803  }
804 }
805 
806 
807 // Find intersecting clips (or non intersecting clips)
808 vector<Clip*> Timeline::find_intersecting_clips(long int requested_frame, int number_of_frames, bool include)
809 {
810  // Find matching clips
811  vector<Clip*> matching_clips;
812 
813  // Calculate time of frame
814  float min_requested_frame = requested_frame;
815  float max_requested_frame = requested_frame + (number_of_frames - 1);
816 
817  // Re-Sort Clips (since they likely changed)
818  sort_clips();
819 
820  // Find Clips at this time
821  list<Clip*>::iterator clip_itr;
822  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
823  {
824  // Get clip object from the iterator
825  Clip *clip = (*clip_itr);
826 
827  // Does clip intersect the current requested time
828  long clip_start_position = round(clip->Position() * info.fps.ToDouble()) + 1;
829  long clip_end_position = round((clip->Position() + clip->Duration()) * info.fps.ToDouble()) + 1;
830 
831  bool does_clip_intersect =
832  (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
833  (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
834 
835  // Debug output
836  ZmqLogger::Instance()->AppendDebugMethod("Timeline::find_intersecting_clips (Is clip near or intersecting)", "requested_frame", requested_frame, "min_requested_frame", min_requested_frame, "max_requested_frame", max_requested_frame, "clip->Position()", clip->Position(), "does_clip_intersect", does_clip_intersect, "", -1);
837 
838  // Open (or schedule for closing) this clip, based on if it's intersecting or not
839  #pragma omp critical (reader_lock)
840  update_open_clips(clip, does_clip_intersect);
841 
842  // Clip is visible
843  if (does_clip_intersect && include)
844  // Add the intersecting clip
845  matching_clips.push_back(clip);
846 
847  else if (!does_clip_intersect && !include)
848  // Add the non-intersecting clip
849  matching_clips.push_back(clip);
850 
851  } // end clip loop
852 
853  // return list
854  return matching_clips;
855 }
856 
857 // Get the cache object used by this reader
858 void Timeline::SetCache(CacheBase* new_cache) {
859  // Set new cache
860  final_cache = new_cache;
861 }
862 
863 // Generate JSON string of this object
864 string Timeline::Json() {
865 
866  // Return formatted string
867  return JsonValue().toStyledString();
868 }
869 
870 // Generate Json::JsonValue for this object
871 Json::Value Timeline::JsonValue() {
872 
873  // Create root json object
874  Json::Value root = ReaderBase::JsonValue(); // get parent properties
875  root["type"] = "Timeline";
876  root["viewport_scale"] = viewport_scale.JsonValue();
877  root["viewport_x"] = viewport_x.JsonValue();
878  root["viewport_y"] = viewport_y.JsonValue();
879  root["color"] = color.JsonValue();
880 
881  // Add array of clips
882  root["clips"] = Json::Value(Json::arrayValue);
883 
884  // Find Clips at this time
885  list<Clip*>::iterator clip_itr;
886  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
887  {
888  // Get clip object from the iterator
889  Clip *existing_clip = (*clip_itr);
890  root["clips"].append(existing_clip->JsonValue());
891  }
892 
893  // Add array of effects
894  root["effects"] = Json::Value(Json::arrayValue);
895 
896  // loop through effects
897  list<EffectBase*>::iterator effect_itr;
898  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
899  {
900  // Get clip object from the iterator
901  EffectBase *existing_effect = (*effect_itr);
902  root["effects"].append(existing_effect->JsonValue());
903  }
904 
905  // return JsonValue
906  return root;
907 }
908 
909 // Load JSON string into this object
910 void Timeline::SetJson(string value) throw(InvalidJSON) {
911 
912  // Get lock (prevent getting frames while this happens)
913  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
914 
915  // Parse JSON string into JSON objects
916  Json::Value root;
917  Json::Reader reader;
918  bool success = reader.parse( value, root );
919  if (!success)
920  // Raise exception
921  throw InvalidJSON("JSON could not be parsed (or is invalid)", "");
922 
923  try
924  {
925  // Set all values that match
926  SetJsonValue(root);
927  }
928  catch (exception e)
929  {
930  // Error parsing JSON (or missing keys)
931  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
932  }
933 }
934 
935 // Load Json::JsonValue into this object
936 void Timeline::SetJsonValue(Json::Value root) throw(InvalidFile, ReaderClosed) {
937 
938  // Close timeline before we do anything (this also removes all open and closing clips)
939  bool was_open = is_open;
940  Close();
941 
942  // Set parent data
944 
945  if (!root["clips"].isNull()) {
946  // Clear existing clips
947  clips.clear();
948 
949  // loop through clips
950  for (int x = 0; x < root["clips"].size(); x++) {
951  // Get each clip
952  Json::Value existing_clip = root["clips"][x];
953 
954  // Create Clip
955  Clip *c = new Clip();
956 
957  // Load Json into Clip
958  c->SetJsonValue(existing_clip);
959 
960  // Add Clip to Timeline
961  AddClip(c);
962  }
963  }
964 
965  if (!root["effects"].isNull()) {
966  // Clear existing effects
967  effects.clear();
968 
969  // loop through effects
970  for (int x = 0; x < root["effects"].size(); x++) {
971  // Get each effect
972  Json::Value existing_effect = root["effects"][x];
973 
974  // Create Effect
975  EffectBase *e = NULL;
976 
977  if (!existing_effect["type"].isNull()) {
978  // Create instance of effect
979  e = EffectInfo().CreateEffect(existing_effect["type"].asString());
980 
981  // Load Json into Effect
982  e->SetJsonValue(existing_effect);
983 
984  // Add Effect to Timeline
985  AddEffect(e);
986  }
987  }
988  }
989 
990  if (!root["duration"].isNull()) {
991  // Update duration of timeline
992  info.duration = root["duration"].asDouble();
994  }
995 
996  // Re-open if needed
997  if (was_open)
998  Open();
999 }
1000 
1001 // Apply a special formatted JSON object, which represents a change to the timeline (insert, update, delete)
1003 
1004  // Get lock (prevent getting frames while this happens)
1005  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1006 
1007  // Parse JSON string into JSON objects
1008  Json::Value root;
1009  Json::Reader reader;
1010  bool success = reader.parse( value, root );
1011  if (!success || !root.isArray())
1012  // Raise exception
1013  throw InvalidJSON("JSON could not be parsed (or is invalid).", "");
1014 
1015  try
1016  {
1017  // Process the JSON change array, loop through each item
1018  for (int x = 0; x < root.size(); x++) {
1019  // Get each change
1020  Json::Value change = root[x];
1021  string root_key = change["key"][(uint)0].asString();
1022 
1023  // Process each type of change
1024  if (root_key == "clips")
1025  // Apply to CLIPS
1026  apply_json_to_clips(change);
1027 
1028  else if (root_key == "effects")
1029  // Apply to EFFECTS
1030  apply_json_to_effects(change);
1031 
1032  else
1033  // Apply to TIMELINE
1034  apply_json_to_timeline(change);
1035 
1036  }
1037  }
1038  catch (exception e)
1039  {
1040  // Error parsing JSON (or missing keys)
1041  throw InvalidJSON("JSON is invalid (missing keys or invalid data types)", "");
1042  }
1043 }
1044 
1045 // Apply JSON diff to clips
1046 void Timeline::apply_json_to_clips(Json::Value change) throw(InvalidJSONKey) {
1047 
1048  // Get key and type of change
1049  string change_type = change["type"].asString();
1050  string clip_id = "";
1051  Clip *existing_clip = NULL;
1052 
1053  // Find id of clip (if any)
1054  for (int x = 0; x < change["key"].size(); x++) {
1055  // Get each change
1056  Json::Value key_part = change["key"][x];
1057 
1058  if (key_part.isObject()) {
1059  // Check for id
1060  if (!key_part["id"].isNull()) {
1061  // Set the id
1062  clip_id = key_part["id"].asString();
1063 
1064  // Find matching clip in timeline (if any)
1065  list<Clip*>::iterator clip_itr;
1066  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1067  {
1068  // Get clip object from the iterator
1069  Clip *c = (*clip_itr);
1070  if (c->Id() == clip_id) {
1071  existing_clip = c;
1072  break; // clip found, exit loop
1073  }
1074  }
1075  break; // id found, exit loop
1076  }
1077  }
1078  }
1079 
1080  // Check for a more specific key (targetting this clip's effects)
1081  // For example: ["clips", {"id:123}, "effects", {"id":432}]
1082  if (existing_clip && change["key"].size() == 4 && change["key"][2] == "effects")
1083  {
1084  // This change is actually targetting a specific effect under a clip (and not the clip)
1085  Json::Value key_part = change["key"][3];
1086 
1087  if (key_part.isObject()) {
1088  // Check for id
1089  if (!key_part["id"].isNull())
1090  {
1091  // Set the id
1092  string effect_id = key_part["id"].asString();
1093 
1094  // Find matching effect in timeline (if any)
1095  list<EffectBase*> effect_list = existing_clip->Effects();
1096  list<EffectBase*>::iterator effect_itr;
1097  for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1098  {
1099  // Get effect object from the iterator
1100  EffectBase *e = (*effect_itr);
1101  if (e->Id() == effect_id) {
1102  // Apply the change to the effect directly
1103  apply_json_to_effects(change, e);
1104 
1105  // Calculate start and end frames that this impacts, and remove those frames from the cache
1106  long int new_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1107  long int new_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1108  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1109 
1110  return; // effect found, don't update clip
1111  }
1112  }
1113  }
1114  }
1115  }
1116 
1117  // Calculate start and end frames that this impacts, and remove those frames from the cache
1118  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1119  long int new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1120  long int new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1121  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1122  }
1123 
1124  // Determine type of change operation
1125  if (change_type == "insert") {
1126 
1127  // Create new clip
1128  Clip *clip = new Clip();
1129  clip->SetJsonValue(change["value"]); // Set properties of new clip from JSON
1130  AddClip(clip); // Add clip to timeline
1131 
1132  } else if (change_type == "update") {
1133 
1134  // Update existing clip
1135  if (existing_clip) {
1136 
1137  // Calculate start and end frames that this impacts, and remove those frames from the cache
1138  long int old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1139  long int old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1140  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1141 
1142  // Remove cache on clip's Reader (if found)
1143  if (existing_clip->Reader() && existing_clip->Reader()->GetCache())
1144  existing_clip->Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1145 
1146  // Update clip properties from JSON
1147  existing_clip->SetJsonValue(change["value"]);
1148 
1149  // Clear any cached image sizes (since size might have changed)
1150  existing_clip->SetMaxSize(0, 0); // force clearing of cached image size
1151  if (existing_clip->Reader()) {
1152  existing_clip->Reader()->SetMaxSize(0, 0);
1153  if (existing_clip->Reader()->Name() == "FrameMapper") {
1154  FrameMapper *nested_reader = (FrameMapper *) existing_clip->Reader();
1155  if (nested_reader->Reader())
1156  nested_reader->Reader()->SetMaxSize(0, 0);
1157  }
1158  }
1159  }
1160 
1161  } else if (change_type == "delete") {
1162 
1163  // Remove existing clip
1164  if (existing_clip) {
1165 
1166  // Calculate start and end frames that this impacts, and remove those frames from the cache
1167  long int old_starting_frame = (existing_clip->Position() * info.fps.ToDouble()) + 1;
1168  long int old_ending_frame = ((existing_clip->Position() + existing_clip->Duration()) * info.fps.ToDouble()) + 1;
1169  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1170 
1171  // Remove clip from timeline
1172  RemoveClip(existing_clip);
1173  }
1174 
1175  }
1176 
1177 }
1178 
1179 // Apply JSON diff to effects
1180 void Timeline::apply_json_to_effects(Json::Value change) throw(InvalidJSONKey) {
1181 
1182  // Get key and type of change
1183  string change_type = change["type"].asString();
1184  EffectBase *existing_effect = NULL;
1185 
1186  // Find id of an effect (if any)
1187  for (int x = 0; x < change["key"].size(); x++) {
1188  // Get each change
1189  Json::Value key_part = change["key"][x];
1190 
1191  if (key_part.isObject()) {
1192  // Check for id
1193  if (!key_part["id"].isNull())
1194  {
1195  // Set the id
1196  string effect_id = key_part["id"].asString();
1197 
1198  // Find matching effect in timeline (if any)
1199  list<EffectBase*>::iterator effect_itr;
1200  for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1201  {
1202  // Get effect object from the iterator
1203  EffectBase *e = (*effect_itr);
1204  if (e->Id() == effect_id) {
1205  existing_effect = e;
1206  break; // effect found, exit loop
1207  }
1208  }
1209  break; // id found, exit loop
1210  }
1211  }
1212  }
1213 
1214  // Now that we found the effect, apply the change to it
1215  if (existing_effect || change_type == "insert")
1216  // Apply change to effect
1217  apply_json_to_effects(change, existing_effect);
1218 }
1219 
1220 // Apply JSON diff to effects (if you already know which effect needs to be updated)
1221 void Timeline::apply_json_to_effects(Json::Value change, EffectBase* existing_effect) throw(InvalidJSONKey) {
1222 
1223  // Get key and type of change
1224  string change_type = change["type"].asString();
1225 
1226  // Calculate start and end frames that this impacts, and remove those frames from the cache
1227  if (!change["value"].isArray() && !change["value"]["position"].isNull()) {
1228  long int new_starting_frame = (change["value"]["position"].asDouble() * info.fps.ToDouble()) + 1;
1229  long int new_ending_frame = ((change["value"]["position"].asDouble() + change["value"]["end"].asDouble() - change["value"]["start"].asDouble()) * info.fps.ToDouble()) + 1;
1230  final_cache->Remove(new_starting_frame - 8, new_ending_frame + 8);
1231  }
1232 
1233  // Determine type of change operation
1234  if (change_type == "insert") {
1235 
1236  // Determine type of effect
1237  string effect_type = change["value"]["type"].asString();
1238 
1239  // Create Effect
1240  EffectBase *e = NULL;
1241 
1242  // Init the matching effect object
1243  e = EffectInfo().CreateEffect(effect_type);
1244 
1245  // Load Json into Effect
1246  e->SetJsonValue(change["value"]);
1247 
1248  // Add Effect to Timeline
1249  AddEffect(e);
1250 
1251  } else if (change_type == "update") {
1252 
1253  // Update existing effect
1254  if (existing_effect) {
1255 
1256  // Calculate start and end frames that this impacts, and remove those frames from the cache
1257  long int old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1258  long int old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1259  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1260 
1261  // Update effect properties from JSON
1262  existing_effect->SetJsonValue(change["value"]);
1263  }
1264 
1265  } else if (change_type == "delete") {
1266 
1267  // Remove existing effect
1268  if (existing_effect) {
1269 
1270  // Calculate start and end frames that this impacts, and remove those frames from the cache
1271  long int old_starting_frame = (existing_effect->Position() * info.fps.ToDouble()) + 1;
1272  long int old_ending_frame = ((existing_effect->Position() + existing_effect->Duration()) * info.fps.ToDouble()) + 1;
1273  final_cache->Remove(old_starting_frame - 8, old_ending_frame + 8);
1274 
1275  // Remove effect from timeline
1276  RemoveEffect(existing_effect);
1277  }
1278 
1279  }
1280 }
1281 
1282 // Apply JSON diff to timeline properties
1283 void Timeline::apply_json_to_timeline(Json::Value change) throw(InvalidJSONKey) {
1284 
1285  // Get key and type of change
1286  string change_type = change["type"].asString();
1287  string root_key = change["key"][(uint)0].asString();
1288  string sub_key = "";
1289  if (change["key"].size() >= 2)
1290  sub_key = change["key"][(uint)1].asString();
1291 
1292  // Clear entire cache
1293  final_cache->Clear();
1294 
1295  // Determine type of change operation
1296  if (change_type == "insert" || change_type == "update") {
1297 
1298  // INSERT / UPDATE
1299  // Check for valid property
1300  if (root_key == "color")
1301  // Set color
1302  color.SetJsonValue(change["value"]);
1303  else if (root_key == "viewport_scale")
1304  // Set viewport scale
1305  viewport_scale.SetJsonValue(change["value"]);
1306  else if (root_key == "viewport_x")
1307  // Set viewport x offset
1308  viewport_x.SetJsonValue(change["value"]);
1309  else if (root_key == "viewport_y")
1310  // Set viewport y offset
1311  viewport_y.SetJsonValue(change["value"]);
1312  else if (root_key == "duration") {
1313  // Update duration of timeline
1314  info.duration = change["value"].asDouble();
1316  }
1317  else if (root_key == "width")
1318  // Set width
1319  info.width = change["value"].asInt();
1320  else if (root_key == "height")
1321  // Set height
1322  info.height = change["value"].asInt();
1323  else if (root_key == "fps" && sub_key == "" && change["value"].isObject()) {
1324  // Set fps fraction
1325  if (!change["value"]["num"].isNull())
1326  info.fps.num = change["value"]["num"].asInt();
1327  if (!change["value"]["den"].isNull())
1328  info.fps.den = change["value"]["den"].asInt();
1329  }
1330  else if (root_key == "fps" && sub_key == "num")
1331  // Set fps.num
1332  info.fps.num = change["value"].asInt();
1333  else if (root_key == "fps" && sub_key == "den")
1334  // Set fps.den
1335  info.fps.den = change["value"].asInt();
1336  else if (root_key == "sample_rate")
1337  // Set sample rate
1338  info.sample_rate = change["value"].asInt();
1339  else if (root_key == "channels")
1340  // Set channels
1341  info.channels = change["value"].asInt();
1342  else if (root_key == "channel_layout")
1343  // Set channel layout
1344  info.channel_layout = (ChannelLayout) change["value"].asInt();
1345 
1346  else
1347 
1348  // Error parsing JSON (or missing keys)
1349  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1350 
1351 
1352  } else if (change["type"].asString() == "delete") {
1353 
1354  // DELETE / RESET
1355  // Reset the following properties (since we can't delete them)
1356  if (root_key == "color") {
1357  color = Color();
1358  color.red = Keyframe(0.0);
1359  color.green = Keyframe(0.0);
1360  color.blue = Keyframe(0.0);
1361  }
1362  else if (root_key == "viewport_scale")
1363  viewport_scale = Keyframe(1.0);
1364  else if (root_key == "viewport_x")
1365  viewport_x = Keyframe(0.0);
1366  else if (root_key == "viewport_y")
1367  viewport_y = Keyframe(0.0);
1368  else
1369  // Error parsing JSON (or missing keys)
1370  throw InvalidJSONKey("JSON change key is invalid", change.toStyledString());
1371 
1372  }
1373 
1374 }
1375 
1376 // Clear all caches
1378 
1379  // Get lock (prevent getting frames while this happens)
1380  const GenericScopedLock<CriticalSection> lock(getFrameCriticalSection);
1381 
1382  // Clear primary cache
1383  final_cache->Clear();
1384 
1385  // Loop through all clips
1386  list<Clip*>::iterator clip_itr;
1387  for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1388  {
1389  // Get clip object from the iterator
1390  Clip *clip = (*clip_itr);
1391 
1392  // Clear cache on clip
1393  clip->Reader()->GetCache()->Clear();
1394 
1395  // Clear nested Reader (if any)
1396  if (clip->Reader()->Name() == "FrameMapper") {
1397  FrameMapper* nested_reader = (FrameMapper*) clip->Reader();
1398  if (nested_reader->Reader() && nested_reader->Reader()->GetCache())
1399  nested_reader->Reader()->GetCache()->Clear();
1400  }
1401 
1402  }
1403 }
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Timeline.cpp:936
int max_height
The maximium image height needed by this clip (used for optimizations)
Definition: ReaderBase.h:103
Display the timeline&#39;s frame number.
Definition: Enums.h:69
void Close()
Close the internal reader.
Definition: Clip.cpp:222
string Json()
Get and Set JSON methods.
Definition: Timeline.cpp:864
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Color.cpp:92
int num
Numerator for the fraction.
Definition: Fraction.h:44
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Definition: Clip.h:220
ReaderBase * Reader()
Get the current reader.
Definition: FrameMapper.cpp:70
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
Definition: ReaderBase.h:99
This abstract class is the base class, used by all effects in libopenshot.
Definition: EffectBase.h:66
EffectBase * CreateEffect(string effect_type)
Definition: EffectInfo.cpp:42
Align clip to the right of its parent (middle aligned)
Definition: Enums.h:42
Keyframe green
Curve representing the green value (0 - 255)
Definition: Color.h:46
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Definition: Timeline.h:249
Align clip to the bottom right of its parent.
Definition: Enums.h:45
void SetCache(CacheBase *new_cache)
Get the cache object used by this reader.
Definition: Timeline.cpp:858
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, long int frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: KeyFrame.cpp:321
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
Definition: ReaderBase.h:83
GravityType gravity
The gravity of a clip determines where it snaps to it&#39;s parent.
Definition: Clip.h:151
Keyframe alpha
Curve representing the alpha value (0 - 255)
Definition: Color.h:48
int width
The width of the video (in pixesl)
Definition: ReaderBase.h:67
Keyframe volume
Curve representing the volume (0 to 1)
Definition: Clip.h:230
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:41
Keyframe red
Curve representing the red value (0 - 255)
Definition: Color.h:45
float duration
Length of time (in seconds)
Definition: ReaderBase.h:64
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Timeline.cpp:871
double GetValue(long int index)
Get the value at a specific index.
Definition: KeyFrame.cpp:226
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Definition: Enums.h:51
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Definition: Timeline.h:251
Fraction Reciprocal()
Return the reciprocal as a Fraction.
Definition: Fraction.cpp:81
This abstract class is the base class, used by all readers in libopenshot.
Definition: ReaderBase.h:95
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
Definition: ClipBase.h:84
#define OPEN_MP_NUM_PROCESSORS
Exception when a reader is closed, and a frame is requested.
Definition: Exceptions.h:234
bool has_video
Determines if this file has a video stream.
Definition: ReaderBase.h:61
void SetMaxBytesFromInfo(long int number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Definition: CacheBase.cpp:46
Do not display the frame number.
Definition: Enums.h:67
Color wave_color
Curve representing the color of the audio wave form.
Definition: Clip.h:233
Align clip to the top right of its parent.
Definition: Enums.h:39
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: EffectBase.cpp:69
Align clip to the bottom left of its parent.
Definition: Enums.h:43
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Clip.cpp:810
virtual std::shared_ptr< Frame > GetFrame(long int frame_number)=0
Get a frame from the cache.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: KeyFrame.cpp:362
Exception for missing JSON Change key.
Definition: Exceptions.h:182
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:221
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
Definition: Clip.h:222
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ReaderBase.h:143
bool has_audio
Determines if this file has an audio stream.
Definition: ReaderBase.h:62
This class represents a clip (used to arrange readers on the timeline)
Definition: Clip.h:109
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
Definition: Color.h:47
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
Definition: Clip.h:243
bool Waveform()
Waveform property.
Definition: Clip.h:215
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
Definition: ClipBase.h:97
ScaleType scale
The scale determines how a clip should be resized to fit it&#39;s parent.
Definition: Clip.h:152
int height
The height of the video (in pixels)
Definition: ReaderBase.h:66
Align clip to the bottom center of its parent.
Definition: Enums.h:44
Exception for files that can not be found or opened.
Definition: Exceptions.h:132
string Id()
Get basic properties.
Definition: ClipBase.h:82
Keyframe channel_filter
Audio channel filter and mappings.
Definition: Clip.h:255
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
Definition: Timeline.cpp:1377
float Position()
Get position on timeline (in seconds)
Definition: ClipBase.h:83
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline&#39;s framerate and samplerate to all clips.
Definition: Timeline.cpp:145
void Reader(ReaderBase *new_reader)
Set the current reader.
Definition: Clip.cpp:188
list< EffectBase * > Effects()
Return the list of effects on the timeline.
Definition: Clip.h:178
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
Definition: ZmqLogger.cpp:162
FrameDisplayType display
The format to display the frame number (if any)
Definition: Clip.h:154
This class represents a fraction.
Definition: Fraction.h:42
All cache managers in libopenshot are based on this CacheBase class.
Definition: CacheBase.h:45
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
Definition: Clip.h:256
virtual void Add(std::shared_ptr< Frame > frame)=0
Add a Frame to the cache.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
Definition: Enums.h:40
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
Definition: Timeline.cpp:71
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Definition: ReaderBase.cpp:106
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: EffectBase.cpp:109
void Close()
Close the timeline reader (and any resources it was consuming)
Definition: Timeline.cpp:602
Keyframe rotation
Curve representing the rotation (0 to 360)
Definition: Clip.h:226
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Definition: ReaderBase.cpp:155
void SetTimelineFrameOffset(long int offset)
std::shared_ptr< Frame > GetFrame(long int requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Definition: Clip.cpp:258
Scale the clip until both height and width fill the canvas (distort to fit)
Definition: Enums.h:53
Display the clip&#39;s internal frame number.
Definition: Enums.h:68
vector< Point > Points
Vector of all Points.
Definition: KeyFrame.h:92
ReaderInfo info
Information about the current media file.
Definition: ReaderBase.h:111
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Definition: Clip.h:244
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Definition: ReaderBase.h:69
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Definition: ReaderBase.h:76
Exception for frames that are out of bounds.
Definition: Exceptions.h:202
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
Definition: FrameMapper.h:139
void Open()
Open the internal reader.
Definition: Clip.cpp:205
This class represents a color (used on the timeline and clips)
Definition: Color.h:42
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
Definition: ZmqLogger.cpp:38
int GetInt(long int index)
Get the rounded INT value at a specific index.
Definition: KeyFrame.cpp:248
Align clip to the center of its parent (middle aligned)
Definition: Enums.h:41
void Open()
Open the reader (and start consuming resources)
Definition: Timeline.cpp:625
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Definition: Timeline.cpp:1002
Display both the clip&#39;s and timeline&#39;s frame number.
Definition: Enums.h:70
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
Definition: FrameMapper.h:64
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
Definition: Timeline.cpp:102
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Definition: Timeline.cpp:96
Exception for invalid JSON.
Definition: Exceptions.h:152
Keyframe alpha
Curve representing the alpha (1 to 0)
Definition: Clip.h:225
virtual CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
Definition: Timeline.h:250
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Definition: Color.cpp:129
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
Definition: Clip.h:219
string GetColorHex(long int frame_number)
Get the HEX value of a color at a specific frame.
Definition: Color.cpp:64
Color color
Background color of timeline canvas.
Definition: Timeline.h:254
virtual void Remove(long int frame_number)=0
Remove a specific frame.
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
Definition: Timeline.cpp:33
This class returns a listing of all effects supported by libopenshot.
Definition: EffectInfo.h:45
Align clip to the top center of its parent.
Definition: Enums.h:38
void SetJson(string value)
Load JSON string into this object.
Definition: Timeline.cpp:910
int den
Denominator for the fraction.
Definition: Fraction.h:45
int channels
The number of audio channels used in the audio stream.
Definition: ReaderBase.h:82
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Definition: KeyFrame.h:64
Scale the clip until either height or width fills the canvas (with no cropping)
Definition: Enums.h:52
long int video_length
The number of frames in the video stream.
Definition: ReaderBase.h:74
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
Definition: Timeline.cpp:86
int max_width
The maximum image width needed by this clip (used for optimizations)
Definition: ReaderBase.h:102
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Definition: Frame.cpp:505
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Definition: Clip.cpp:730
float Duration()
Get the length of this clip (in seconds)
Definition: ClipBase.h:87
This class is a memory-based cache manager for Frame objects.
Definition: CacheMemory.h:48
float Start()
Get start position (in seconds) of clip (trim start of video)
Definition: ClipBase.h:85
std::shared_ptr< Frame > GetFrame(long int requested_frame)
Definition: Timeline.cpp:637
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
Definition: Fraction.cpp:46
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Definition: ReaderBase.h:81
Exception when too many seek attempts happen.
Definition: Exceptions.h:254