28 #include "../include/Timeline.h" 34 is_open(false), auto_map_clips(true)
76 apply_mapper_to_clip(clip);
79 clips.push_back(clip);
89 effects.push_back(effect);
98 effects.remove(effect);
108 void Timeline::apply_mapper_to_clip(
Clip* clip)
115 if (clip->
Reader()->Name() ==
"FrameMapper")
134 if (clip_offset != 0)
141 clip->
Reader(clip_reader);
151 list<Clip*>::iterator clip_itr;
152 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
155 Clip *clip = (*clip_itr);
158 apply_mapper_to_clip(clip);
163 double Timeline::calculate_time(
long int number,
Fraction rate)
166 double raw_fps = rate.
ToFloat();
169 return double(number - 1) / raw_fps;
173 std::shared_ptr<Frame> Timeline::apply_effects(std::shared_ptr<Frame> frame,
long int timeline_frame_number,
int layer)
176 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::apply_effects",
"frame->number", frame->number,
"timeline_frame_number", timeline_frame_number,
"layer", layer,
"", -1,
"", -1,
"", -1);
179 list<EffectBase*>::iterator effect_itr;
180 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
189 bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->
Layer() == layer);
192 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::apply_effects (Does effect intersect)",
"effect->Position()", effect->
Position(),
"does_effect_intersect", does_effect_intersect,
"timeline_frame_number", timeline_frame_number,
"layer", layer,
"", -1,
"", -1);
195 if (does_effect_intersect)
199 long effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
202 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::apply_effects (Process Effect)",
"effect_frame_number", effect_frame_number,
"does_effect_intersect", does_effect_intersect,
"", -1,
"", -1,
"", -1,
"", -1);
205 frame = effect->
GetFrame(frame, effect_frame_number);
215 std::shared_ptr<Frame> Timeline::GetOrCreateFrame(
Clip* clip,
long int number)
217 std::shared_ptr<Frame> new_frame;
224 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetOrCreateFrame (from reader)",
"number", number,
"samples_in_frame", samples_in_frame,
"", -1,
"", -1,
"", -1,
"", -1);
230 new_frame = std::shared_ptr<Frame>(clip->
GetFrame(number));
244 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetOrCreateFrame (create blank)",
"number", number,
"samples_in_frame", samples_in_frame,
"", -1,
"", -1,
"", -1,
"", -1);
254 void Timeline::add_layer(std::shared_ptr<Frame> new_frame,
Clip* source_clip,
long int clip_frame_number,
long int timeline_frame_number,
bool is_top_clip)
257 std::shared_ptr<Frame> source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
264 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer",
"new_frame->number", new_frame->number,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number,
"", -1,
"", -1,
"", -1);
270 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Generate Waveform Image)",
"source_frame->number", source_frame->number,
"source_clip->Waveform()", source_clip->
Waveform(),
"clip_frame_number", clip_frame_number,
"", -1,
"", -1,
"", -1);
279 std::shared_ptr<QImage> source_image = source_frame->GetWaveform(
max_width,
max_height, red, green, blue, alpha);
280 source_frame->AddImage(std::shared_ptr<QImage>(source_image));
285 if (is_top_clip && source_frame)
286 source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->
Layer());
289 std::shared_ptr<QImage> source_image;
292 if (source_clip->
Reader()->info.has_audio) {
295 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Copy Audio)",
"source_clip->Reader()->info.has_audio", source_clip->
Reader()->info.has_audio,
"source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
"info.channels",
info.
channels,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number,
"", -1);
297 if (source_frame->GetAudioChannelsCount() ==
info.
channels)
298 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
300 float initial_volume = 1.0f;
301 float previous_volume = source_clip->
volume.
GetValue(clip_frame_number - 1);
307 if (channel_filter != -1 && channel_filter != channel)
311 if (channel_mapping == -1)
312 channel_mapping = channel;
315 if (isEqual(previous_volume, volume))
316 initial_volume = volume;
319 if (!isEqual(previous_volume, volume))
320 source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
326 if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
332 new_frame->AddAudio(
false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);
337 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (No Audio Copied - Wrong # of Channels)",
"source_clip->Reader()->info.has_audio", source_clip->
Reader()->info.has_audio,
"source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
"info.channels",
info.
channels,
"clip_frame_number", clip_frame_number,
"timeline_frame_number", timeline_frame_number,
"", -1);
342 if (!source_clip->
Waveform() && !source_clip->
Reader()->info.has_video)
347 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Get Source Image)",
"source_frame->number", source_frame->number,
"source_clip->Waveform()", source_clip->
Waveform(),
"clip_frame_number", clip_frame_number,
"", -1,
"", -1,
"", -1);
350 source_image = source_frame->GetImage();
353 int source_width = source_image->width();
354 int source_height = source_image->height();
359 float alpha = source_clip->
alpha.
GetValue(clip_frame_number);
362 unsigned char *pixels = (
unsigned char *) source_image->bits();
365 for (
int pixel = 0, byte_index=0; pixel < source_image->width() * source_image->height(); pixel++, byte_index+=4)
368 int A = pixels[byte_index + 3];
371 pixels[byte_index + 3] *= alpha;
375 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Set Alpha & Opacity)",
"alpha", alpha,
"source_frame->number", source_frame->number,
"clip_frame_number", clip_frame_number,
"", -1,
"", -1,
"", -1);
379 switch (source_clip->
scale)
383 source_image = std::shared_ptr<QImage>(
new QImage(source_image->scaled(
max_width,
max_height, Qt::KeepAspectRatio, Qt::SmoothTransformation)));
384 source_width = source_image->width();
385 source_height = source_image->height();
388 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_FIT)",
"source_frame->number", source_frame->number,
"source_width", source_width,
"source_height", source_height,
"", -1,
"", -1,
"", -1);
393 source_image = std::shared_ptr<QImage>(
new QImage(source_image->scaled(
max_width,
max_height, Qt::IgnoreAspectRatio, Qt::SmoothTransformation)));
394 source_width = source_image->width();
395 source_height = source_image->height();
398 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_STRETCH)",
"source_frame->number", source_frame->number,
"source_width", source_width,
"source_height", source_height,
"", -1,
"", -1,
"", -1);
402 QSize width_size(
max_width, round(
max_width / (
float(source_width) /
float(source_height))));
403 QSize height_size(round(
max_height / (
float(source_height) /
float(source_width))),
max_height);
407 source_image = std::shared_ptr<QImage>(
new QImage(source_image->scaled(width_size.width(), width_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)));
409 source_image = std::shared_ptr<QImage>(
new QImage(source_image->scaled(height_size.width(), height_size.height(), Qt::KeepAspectRatio, Qt::SmoothTransformation)));
410 source_width = source_image->width();
411 source_height = source_image->height();
414 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Scale: SCALE_CROP)",
"source_frame->number", source_frame->number,
"source_width", source_width,
"source_height", source_height,
"", -1,
"", -1,
"", -1);
425 float scaled_source_width = source_width * sx;
426 float scaled_source_height = source_height * sy;
431 x = (
max_width - scaled_source_width) / 2.0;
437 y = (
max_height - scaled_source_height) / 2.0;
440 x = (
max_width - scaled_source_width) / 2.0;
441 y = (
max_height - scaled_source_height) / 2.0;
445 y = (
max_height - scaled_source_height) / 2.0;
451 x = (
max_width - scaled_source_width) / 2.0;
461 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Gravity)",
"source_frame->number", source_frame->number,
"source_clip->gravity", source_clip->
gravity,
"info.width",
info.
width,
"source_width", source_width,
"info.height",
info.
height,
"source_height", source_height);
474 bool transformed =
false;
475 QTransform transform;
478 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Build QTransform - if needed)",
"source_frame->number", source_frame->number,
"x", x,
"y", y,
"r", r,
"sx", sx,
"sy", sy);
480 if (!isEqual(x, 0) || !isEqual(y, 0)) {
482 transform.translate(x, y);
486 if (!isEqual(sx, 0) || !isEqual(sy, 0)) {
488 transform.scale(sx, sy);
492 if (!isEqual(shear_x, 0) || !isEqual(shear_y, 0)) {
494 transform.shear(shear_x, shear_y);
498 if (!isEqual(r, 0)) {
500 float origin_x = x + ((source_width * sx) / 2.0);
501 float origin_y = y + ((source_height * sy) / 2.0);
502 transform.translate(origin_x, origin_y);
504 transform.translate(-origin_x,-origin_y);
509 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Transform: Composite Image Layer: Prepare)",
"source_frame->number", source_frame->number,
"offset_x", offset_x,
"offset_y", offset_y,
"new_frame->GetImage()->width()", new_frame->GetImage()->width(),
"transformed", transformed,
"", -1);
512 std::shared_ptr<QImage> new_image = new_frame->GetImage();
515 QPainter painter(new_image.get());
516 painter.setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform | QPainter::TextAntialiasing,
true);
520 painter.setTransform(transform);
523 painter.setCompositionMode(QPainter::CompositionMode_SourceOver);
524 painter.drawImage(0, 0, *source_image);
528 stringstream frame_number_str;
532 frame_number_str << clip_frame_number;
536 frame_number_str << timeline_frame_number;
540 frame_number_str << timeline_frame_number <<
" (" << clip_frame_number <<
")";
545 painter.setPen(QColor(
"#ffffff"));
546 painter.drawText(20, 20, QString(frame_number_str.str().c_str()));
552 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::add_layer (Transform: Composite Image Layer: Completed)",
"source_frame->number", source_frame->number,
"offset_x", offset_x,
"offset_y", offset_y,
"new_frame->GetImage()->width()", new_frame->GetImage()->width(),
"transformed", transformed,
"", -1);
556 void Timeline::update_open_clips(
Clip *clip,
bool does_clip_intersect)
558 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::update_open_clips (before)",
"does_clip_intersect", does_clip_intersect,
"closing_clips.size()", closing_clips.size(),
"open_clips.size()", open_clips.size(),
"", -1,
"", -1,
"", -1);
561 bool clip_found = open_clips.count(clip);
563 if (clip_found && !does_clip_intersect)
566 open_clips.erase(clip);
571 else if (!clip_found && does_clip_intersect)
574 open_clips[clip] = clip;
581 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::update_open_clips (after)",
"does_clip_intersect", does_clip_intersect,
"clip_found", clip_found,
"closing_clips.size()", closing_clips.size(),
"open_clips.size()", open_clips.size(),
"", -1,
"", -1);
585 void Timeline::sort_clips()
588 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::SortClips",
"clips.size()", clips.size(),
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
595 void Timeline::sort_effects()
604 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::Close",
"", -1,
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
607 list<Clip*>::iterator clip_itr;
608 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
611 Clip *clip = (*clip_itr);
614 update_open_clips(clip,
false);
621 final_cache->
Clear();
631 bool Timeline::isEqual(
double a,
double b)
633 return fabs(a - b) < 0.000001;
640 if (requested_frame < 1)
644 std::shared_ptr<Frame> frame = final_cache->
GetFrame(requested_frame);
647 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Cached frame found)",
"requested_frame", requested_frame,
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
659 throw ReaderClosed(
"The Timeline is closed. Call Open() before calling this method.",
"");
662 frame = final_cache->
GetFrame(requested_frame);
665 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Cached frame found on 2nd look)",
"requested_frame", requested_frame,
"", -1,
"", -1,
"", -1,
"", -1,
"", -1);
676 vector<Clip*> nearby_clips = find_intersecting_clips(requested_frame, minimum_frames,
true);
680 omp_set_nested(
true);
683 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame",
"requested_frame", requested_frame,
"minimum_frames", minimum_frames,
"OPEN_MP_NUM_PROCESSORS",
OPEN_MP_NUM_PROCESSORS,
"", -1,
"", -1,
"", -1);
687 for (
long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
690 for (
int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
693 Clip *clip = nearby_clips[clip_index];
697 bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
698 if (does_clip_intersect)
702 long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
712 #pragma omp for ordered firstprivate(nearby_clips, requested_frame, minimum_frames) 713 for (
long int frame_number = requested_frame; frame_number < requested_frame + minimum_frames; frame_number++)
716 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (processing frame)",
"frame_number", frame_number,
"omp_get_thread_num()", omp_get_thread_num(),
"", -1,
"", -1,
"", -1,
"", -1);
723 new_frame->AddAudioSilence(samples_in_frame);
728 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Adding solid color)",
"frame_number", frame_number,
"info.width",
info.
width,
"info.height",
info.
height,
"", -1,
"", -1,
"", -1);
736 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Loop through clips)",
"frame_number", frame_number,
"clips.size()", clips.size(),
"nearby_clips.size()", nearby_clips.size(),
"", -1,
"", -1,
"", -1);
739 for (
int clip_index = 0; clip_index < nearby_clips.size(); clip_index++)
742 Clip *clip = nearby_clips[clip_index];
746 bool does_clip_intersect = (clip_start_position <= frame_number && clip_end_position >= frame_number);
749 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Does clip intersect)",
"frame_number", frame_number,
"clip->Position()", clip->
Position(),
"clip->Duration()", clip->
Duration(),
"does_clip_intersect", does_clip_intersect,
"", -1,
"", -1);
752 if (does_clip_intersect)
755 bool is_top_clip =
true;
756 for (
int top_clip_index = 0; top_clip_index < nearby_clips.size(); top_clip_index++)
758 Clip *nearby_clip = nearby_clips[top_clip_index];
762 if (clip->
Id() != nearby_clip->
Id() && clip->
Layer() == nearby_clip->
Layer() &&
763 nearby_clip_start_position <= frame_number && nearby_clip_end_position >= frame_number &&
764 nearby_clip_start_position > clip_start_position) {
772 long clip_frame_number = frame_number - clip_start_position + clip_start_frame;
775 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Calculate clip's frame #)",
"clip->Position()", clip->
Position(),
"clip->Start()", clip->
Start(),
"info.fps.ToFloat()",
info.
fps.
ToFloat(),
"clip_frame_number", clip_frame_number,
"", -1,
"", -1);
778 add_layer(new_frame, clip, clip_frame_number, frame_number, is_top_clip);
782 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (clip does not intersect)",
"frame_number", frame_number,
"does_clip_intersect", does_clip_intersect,
"", -1,
"", -1,
"", -1,
"", -1);
787 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (Add frame to cache)",
"frame_number", frame_number,
"info.width",
info.
width,
"info.height",
info.
height,
"", -1,
"", -1,
"", -1);
790 new_frame->SetFrameNumber(frame_number);
793 final_cache->
Add(new_frame);
799 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::GetFrame (end parallel region)",
"requested_frame", requested_frame,
"omp_get_thread_num()", omp_get_thread_num(),
"", -1,
"", -1,
"", -1,
"", -1);
802 return final_cache->
GetFrame(requested_frame);
808 vector<Clip*> Timeline::find_intersecting_clips(
long int requested_frame,
int number_of_frames,
bool include)
811 vector<Clip*> matching_clips;
814 float min_requested_frame = requested_frame;
815 float max_requested_frame = requested_frame + (number_of_frames - 1);
821 list<Clip*>::iterator clip_itr;
822 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
825 Clip *clip = (*clip_itr);
831 bool does_clip_intersect =
832 (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
833 (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
836 ZmqLogger::Instance()->
AppendDebugMethod(
"Timeline::find_intersecting_clips (Is clip near or intersecting)",
"requested_frame", requested_frame,
"min_requested_frame", min_requested_frame,
"max_requested_frame", max_requested_frame,
"clip->Position()", clip->
Position(),
"does_clip_intersect", does_clip_intersect,
"", -1);
839 #pragma omp critical (reader_lock) 840 update_open_clips(clip, does_clip_intersect);
843 if (does_clip_intersect && include)
845 matching_clips.push_back(clip);
847 else if (!does_clip_intersect && !include)
849 matching_clips.push_back(clip);
854 return matching_clips;
860 final_cache = new_cache;
875 root[
"type"] =
"Timeline";
882 root[
"clips"] = Json::Value(Json::arrayValue);
885 list<Clip*>::iterator clip_itr;
886 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
889 Clip *existing_clip = (*clip_itr);
890 root[
"clips"].append(existing_clip->
JsonValue());
894 root[
"effects"] = Json::Value(Json::arrayValue);
897 list<EffectBase*>::iterator effect_itr;
898 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
902 root[
"effects"].append(existing_effect->
JsonValue());
918 bool success = reader.parse( value, root );
921 throw InvalidJSON(
"JSON could not be parsed (or is invalid)",
"");
931 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)",
"");
939 bool was_open = is_open;
945 if (!root[
"clips"].isNull()) {
950 for (
int x = 0; x < root[
"clips"].size(); x++) {
952 Json::Value existing_clip = root[
"clips"][x];
965 if (!root[
"effects"].isNull()) {
970 for (
int x = 0; x < root[
"effects"].size(); x++) {
972 Json::Value existing_effect = root[
"effects"][x];
977 if (!existing_effect[
"type"].isNull()) {
990 if (!root[
"duration"].isNull()) {
1009 Json::Reader reader;
1010 bool success = reader.parse( value, root );
1011 if (!success || !root.isArray())
1013 throw InvalidJSON(
"JSON could not be parsed (or is invalid).",
"");
1018 for (
int x = 0; x < root.size(); x++) {
1020 Json::Value change = root[x];
1021 string root_key = change[
"key"][(uint)0].asString();
1024 if (root_key ==
"clips")
1026 apply_json_to_clips(change);
1028 else if (root_key ==
"effects")
1030 apply_json_to_effects(change);
1034 apply_json_to_timeline(change);
1041 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)",
"");
1046 void Timeline::apply_json_to_clips(Json::Value change)
throw(
InvalidJSONKey) {
1049 string change_type = change[
"type"].asString();
1050 string clip_id =
"";
1051 Clip *existing_clip = NULL;
1054 for (
int x = 0; x < change[
"key"].size(); x++) {
1056 Json::Value key_part = change[
"key"][x];
1058 if (key_part.isObject()) {
1060 if (!key_part[
"id"].isNull()) {
1062 clip_id = key_part[
"id"].asString();
1065 list<Clip*>::iterator clip_itr;
1066 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1069 Clip *c = (*clip_itr);
1070 if (c->
Id() == clip_id) {
1082 if (existing_clip && change[
"key"].size() == 4 && change[
"key"][2] ==
"effects")
1085 Json::Value key_part = change[
"key"][3];
1087 if (key_part.isObject()) {
1089 if (!key_part[
"id"].isNull())
1092 string effect_id = key_part[
"id"].asString();
1095 list<EffectBase*> effect_list = existing_clip->
Effects();
1096 list<EffectBase*>::iterator effect_itr;
1097 for (effect_itr=effect_list.begin(); effect_itr != effect_list.end(); ++effect_itr)
1101 if (e->
Id() == effect_id) {
1103 apply_json_to_effects(change, e);
1108 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1118 if (!change[
"value"].isArray() && !change[
"value"][
"position"].isNull()) {
1119 long int new_starting_frame = (change[
"value"][
"position"].asDouble() *
info.
fps.
ToDouble()) + 1;
1120 long int new_ending_frame = ((change[
"value"][
"position"].asDouble() + change[
"value"][
"end"].asDouble() - change[
"value"][
"start"].asDouble()) *
info.
fps.
ToDouble()) + 1;
1121 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1125 if (change_type ==
"insert") {
1132 }
else if (change_type ==
"update") {
1135 if (existing_clip) {
1140 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1143 if (existing_clip->
Reader() && existing_clip->
Reader()->GetCache())
1144 existing_clip->
Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1151 if (existing_clip->
Reader()) {
1152 existing_clip->
Reader()->SetMaxSize(0, 0);
1153 if (existing_clip->
Reader()->Name() ==
"FrameMapper") {
1155 if (nested_reader->
Reader())
1161 }
else if (change_type ==
"delete") {
1164 if (existing_clip) {
1169 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1180 void Timeline::apply_json_to_effects(Json::Value change)
throw(
InvalidJSONKey) {
1183 string change_type = change[
"type"].asString();
1187 for (
int x = 0; x < change[
"key"].size(); x++) {
1189 Json::Value key_part = change[
"key"][x];
1191 if (key_part.isObject()) {
1193 if (!key_part[
"id"].isNull())
1196 string effect_id = key_part[
"id"].asString();
1199 list<EffectBase*>::iterator effect_itr;
1200 for (effect_itr=effects.begin(); effect_itr != effects.end(); ++effect_itr)
1204 if (e->
Id() == effect_id) {
1205 existing_effect =
e;
1215 if (existing_effect || change_type ==
"insert")
1217 apply_json_to_effects(change, existing_effect);
1224 string change_type = change[
"type"].asString();
1227 if (!change[
"value"].isArray() && !change[
"value"][
"position"].isNull()) {
1228 long int new_starting_frame = (change[
"value"][
"position"].asDouble() *
info.
fps.
ToDouble()) + 1;
1229 long int new_ending_frame = ((change[
"value"][
"position"].asDouble() + change[
"value"][
"end"].asDouble() - change[
"value"][
"start"].asDouble()) *
info.
fps.
ToDouble()) + 1;
1230 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1234 if (change_type ==
"insert") {
1237 string effect_type = change[
"value"][
"type"].asString();
1251 }
else if (change_type ==
"update") {
1254 if (existing_effect) {
1259 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1265 }
else if (change_type ==
"delete") {
1268 if (existing_effect) {
1273 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1283 void Timeline::apply_json_to_timeline(Json::Value change)
throw(
InvalidJSONKey) {
1286 string change_type = change[
"type"].asString();
1287 string root_key = change[
"key"][(uint)0].asString();
1288 string sub_key =
"";
1289 if (change[
"key"].size() >= 2)
1290 sub_key = change[
"key"][(uint)1].asString();
1293 final_cache->
Clear();
1296 if (change_type ==
"insert" || change_type ==
"update") {
1300 if (root_key ==
"color")
1303 else if (root_key ==
"viewport_scale")
1306 else if (root_key ==
"viewport_x")
1309 else if (root_key ==
"viewport_y")
1312 else if (root_key ==
"duration") {
1317 else if (root_key ==
"width")
1320 else if (root_key ==
"height")
1323 else if (root_key ==
"fps" && sub_key ==
"" && change[
"value"].isObject()) {
1325 if (!change[
"value"][
"num"].isNull())
1326 info.
fps.
num = change[
"value"][
"num"].asInt();
1327 if (!change[
"value"][
"den"].isNull())
1328 info.
fps.
den = change[
"value"][
"den"].asInt();
1330 else if (root_key ==
"fps" && sub_key ==
"num")
1333 else if (root_key ==
"fps" && sub_key ==
"den")
1336 else if (root_key ==
"sample_rate")
1339 else if (root_key ==
"channels")
1342 else if (root_key ==
"channel_layout")
1349 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1352 }
else if (change[
"type"].asString() ==
"delete") {
1356 if (root_key ==
"color") {
1362 else if (root_key ==
"viewport_scale")
1364 else if (root_key ==
"viewport_x")
1366 else if (root_key ==
"viewport_y")
1370 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1383 final_cache->
Clear();
1386 list<Clip*>::iterator clip_itr;
1387 for (clip_itr=clips.begin(); clip_itr != clips.end(); ++clip_itr)
1390 Clip *clip = (*clip_itr);
1393 clip->
Reader()->GetCache()->Clear();
1396 if (clip->
Reader()->Name() ==
"FrameMapper") {
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
int max_height
The maximium image height needed by this clip (used for optimizations)
Display the timeline's frame number.
void Close()
Close the internal reader.
string Json()
Get and Set JSON methods.
Json::Value JsonValue()
Generate Json::JsonValue for this object.
int num
Numerator for the fraction.
Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
ReaderBase * Reader()
Get the current reader.
CriticalSection getFrameCriticalSection
Section lock for multiple threads.
This abstract class is the base class, used by all effects in libopenshot.
EffectBase * CreateEffect(string effect_type)
Align clip to the right of its parent (middle aligned)
Keyframe green
Curve representing the green value (0 - 255)
Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
Align clip to the bottom right of its parent.
void SetCache(CacheBase *new_cache)
Get the cache object used by this reader.
virtual std::shared_ptr< Frame > GetFrame(std::shared_ptr< Frame > frame, long int frame_number)=0
This method is required for all derived classes of EffectBase, and returns a modified openshot::Frame...
Json::Value JsonValue()
Generate Json::JsonValue for this object.
ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
GravityType gravity
The gravity of a clip determines where it snaps to it's parent.
Keyframe alpha
Curve representing the alpha value (0 - 255)
int width
The width of the video (in pixesl)
Keyframe volume
Curve representing the volume (0 to 1)
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
Keyframe red
Curve representing the red value (0 - 255)
float duration
Length of time (in seconds)
Json::Value JsonValue()
Generate Json::JsonValue for this object.
double GetValue(long int index)
Get the value at a specific index.
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
Keyframe viewport_y
Curve representing the y coordinate for the viewport.
Fraction Reciprocal()
Return the reciprocal as a Fraction.
This abstract class is the base class, used by all readers in libopenshot.
int Layer()
Get layer of clip on timeline (lower number is covered by higher numbers)
#define OPEN_MP_NUM_PROCESSORS
Exception when a reader is closed, and a frame is requested.
bool has_video
Determines if this file has a video stream.
void SetMaxBytesFromInfo(long int number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Do not display the frame number.
Color wave_color
Curve representing the color of the audio wave form.
Align clip to the top right of its parent.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
Align clip to the bottom left of its parent.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
virtual std::shared_ptr< Frame > GetFrame(long int frame_number)=0
Get a frame from the cache.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Exception for missing JSON Change key.
Keyframe location_x
Curve representing the relative X position in percent based on the gravity (-1 to 1) ...
Keyframe location_y
Curve representing the relative Y position in percent based on the gravity (-1 to 1) ...
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
bool has_audio
Determines if this file has an audio stream.
This class represents a clip (used to arrange readers on the timeline)
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
Keyframe blue
Curve representing the red value (0 - 255)
Keyframe shear_x
Curve representing X shear angle in degrees (-45.0=left, 45.0=right)
bool Waveform()
Waveform property.
void SetMaxSize(int width, int height)
Set Max Image Size (used for performance optimization)
ScaleType scale
The scale determines how a clip should be resized to fit it's parent.
int height
The height of the video (in pixels)
Align clip to the bottom center of its parent.
Exception for files that can not be found or opened.
string Id()
Get basic properties.
Keyframe channel_filter
Audio channel filter and mappings.
void ClearAllCache()
Clear all cache for this timeline instance, and all clips, mappers, and readers under it...
float Position()
Get position on timeline (in seconds)
static CrashHandler * Instance()
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
void Reader(ReaderBase *new_reader)
Set the current reader.
list< EffectBase * > Effects()
Return the list of effects on the timeline.
void AppendDebugMethod(string method_name, string arg1_name, float arg1_value, string arg2_name, float arg2_value, string arg3_name, float arg3_value, string arg4_name, float arg4_value, string arg5_name, float arg5_value, string arg6_name, float arg6_value)
Append debug information.
FrameDisplayType display
The format to display the frame number (if any)
This class represents a fraction.
All cache managers in libopenshot are based on this CacheBase class.
Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel) ...
virtual void Add(std::shared_ptr< Frame > frame)=0
Add a Frame to the cache.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
Align clip to the left of its parent (middle aligned)
void AddClip(Clip *clip)
Add an openshot::Clip to the timeline.
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
void Close()
Close the timeline reader (and any resources it was consuming)
Keyframe rotation
Curve representing the rotation (0 to 360)
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
void SetTimelineFrameOffset(long int offset)
std::shared_ptr< Frame > GetFrame(long int requested_frame)
Get an openshot::Frame object for a specific frame number of this timeline.
Scale the clip until both height and width fill the canvas (distort to fit)
Display the clip's internal frame number.
vector< Point > Points
Vector of all Points.
ReaderInfo info
Information about the current media file.
Keyframe shear_y
Curve representing Y shear angle in degrees (-45.0=down, 45.0=up)
Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Exception for frames that are out of bounds.
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
void Open()
Open the internal reader.
This class represents a color (used on the timeline and clips)
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
int GetInt(long int index)
Get the rounded INT value at a specific index.
Align clip to the center of its parent (middle aligned)
void Open()
Open the reader (and start consuming resources)
void ApplyJsonDiff(string value)
Apply a special formatted JSON object, which represents a change to the timeline (add, update, delete) This is primarily designed to keep the timeline (and its child objects... such as clips and effects) in sync with another application... such as OpenShot Video Editor (http://www.openshot.org).
Display both the clip's and timeline's frame number.
This namespace is the default namespace for all code in the openshot library.
Do not apply pull-down techniques, just repeat or skip entire frames.
virtual void Clear()=0
Clear the cache of all frames.
void RemoveClip(Clip *clip)
Remove an openshot::Clip from the timeline.
void RemoveEffect(EffectBase *effect)
Remove an effect from the timeline.
Exception for invalid JSON.
Keyframe alpha
Curve representing the alpha (1 to 0)
virtual CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
Keyframe viewport_x
Curve representing the x coordinate for the viewport.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
string GetColorHex(long int frame_number)
Get the HEX value of a color at a specific frame.
Color color
Background color of timeline canvas.
virtual void Remove(long int frame_number)=0
Remove a specific frame.
Timeline(int width, int height, Fraction fps, int sample_rate, int channels, ChannelLayout channel_layout)
Default Constructor for the timeline (which sets the canvas width and height and FPS) ...
This class returns a listing of all effects supported by libopenshot.
Align clip to the top center of its parent.
void SetJson(string value)
Load JSON string into this object.
int den
Denominator for the fraction.
int channels
The number of audio channels used in the audio stream.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time...
Scale the clip until either height or width fills the canvas (with no cropping)
long int video_length
The number of frames in the video stream.
void AddEffect(EffectBase *effect)
Add an effect to the timeline.
int max_width
The maximum image width needed by this clip (used for optimizations)
int GetSamplesPerFrame(Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Json::Value JsonValue()
Generate Json::JsonValue for this object.
float Duration()
Get the length of this clip (in seconds)
This class is a memory-based cache manager for Frame objects.
float Start()
Get start position (in seconds) of clip (trim start of video)
std::shared_ptr< Frame > GetFrame(long int requested_frame)
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
Exception when too many seek attempts happen.