import React from "react";
import {
AbsoluteFill,
interpolate,
Sequence,
spring,
useCurrentFrame,
useVideoConfig,
} from "remotion";
import {
characters,
type SpeechEvent,
type StillEvent,
type TimelineEvent,
} from "./data/pizza-oven-project-01/script";
import {
audioFileForSpeech,
hasAudioForSpeech,
pizzaOvenProject01Scenario,
} from "./data/pizza-oven-project-01/timing";
import {roundedFontFamily} from "./fonts";
import {
activeVQChronologicalScenarioSegmentForFrame,
scheduleVQChronologicalScenario,
VQSpeechOverlay,
VQStageCornerStandee,
VQStillBackground,
VQWarmGradientBackground,
type VQStageCornerStandeeLayouts,
vqDefaultStageCornerStandeeLayouts,
} from "./lib/VQRemotionLib";
import {getMouthForSpeechFrame} from "./lipsync/manifest";
const sayoAvatar = characters.sayo.avatar;
const PizzaOvenSpeechOverlay = VQSpeechOverlay<SpeechEvent>;
const pizzaOvenStandeeLayouts = {
...vqDefaultStageCornerStandeeLayouts,
stage: {
...vqDefaultStageCornerStandeeLayouts.stage,
frameWidth: 520,
frameHeight: 720,
right: 330,
bottom: -82,
},
} as const satisfies VQStageCornerStandeeLayouts;
const clampInterpolation = {
extrapolateLeft: "clamp",
extrapolateRight: "clamp",
} as const;
const SayoStandee: React.FC<
Readonly<{
mode: "stage" | "corner";
frame: number;
fps: number;
activeSpeech?: SpeechEvent;
speakingLocalFrame: number;
}>
> = ({mode, frame, fps, activeSpeech, speakingLocalFrame}) => {
const entrance = spring({
frame,
fps,
config: {damping: 18, mass: 0.6},
});
const translateY = interpolate(entrance, [0, 1], [32, 0], clampInterpolation);
const mouth = activeSpeech
? getMouthForSpeechFrame(activeSpeech.id, speakingLocalFrame, fps)
: "rest";
return React.createElement(VQStageCornerStandee, {
mode,
imagePath: sayoAvatar.imagePath,
mouthImageDir: sayoAvatar.mouthImageDir,
mouth,
translateY,
layouts: pizzaOvenStandeeLayouts,
zIndex: 2,
});
};
const TimelineOverlay: React.FC<Readonly<{event: TimelineEvent}>> = ({
event,
}) => {
if (event.type === "still") {
return null;
}
const character = characters[event.character];
return React.createElement(PizzaOvenSpeechOverlay, {
speech: event,
speakerName: character.displayName,
accentColor: character.avatar.accentColor,
hasAudio: hasAudioForSpeech,
getAudioPath: audioFileForSpeech,
subtitleOptions: {
fontFamily: roundedFontFamily,
fontSize: 40,
lineHeight: 1.35,
backgroundColor: "rgba(255, 255, 255, 0.9)",
},
containerStyle: {zIndex: 3},
});
};
export const PizzaOvenProject01: React.FC = () => {
const frame = useCurrentFrame();
const {fps} = useVideoConfig();
const scheduledEvents = scheduleVQChronologicalScenario(
pizzaOvenProject01Scenario,
fps
);
const activeSegment = activeVQChronologicalScenarioSegmentForFrame(
scheduledEvents,
frame
);
const isInsideActiveSegment = activeSegment
? frame < activeSegment.from + activeSegment.durationInFrames
: false;
const activeSpeech =
activeSegment && isInsideActiveSegment && activeSegment.event.type === "say"
? activeSegment.event
: undefined;
const activeStill = scheduledEvents.reduce<StillEvent | undefined>(
(currentStill, scheduledEvent) =>
scheduledEvent.from <= frame && scheduledEvent.event.type === "still"
? scheduledEvent.event
: currentStill,
undefined
);
const speakingLocalFrame =
activeSegment && activeSpeech ? frame - activeSegment.from : 0;
const sequences = scheduledEvents.map((scheduledEvent) =>
React.createElement(
Sequence,
{
key: scheduledEvent.event.id,
from: scheduledEvent.from,
durationInFrames: scheduledEvent.durationInFrames,
premountFor: Math.min(fps, scheduledEvent.from),
},
React.createElement(TimelineOverlay, {event: scheduledEvent.event})
)
);
return React.createElement(
AbsoluteFill,
{
style: {
display: "flex",
flexDirection: "column",
alignItems: "center",
},
},
React.createElement(VQWarmGradientBackground, null),
React.createElement(VQStillBackground, {still: activeStill}),
React.createElement(SayoStandee, {
mode: activeStill ? "corner" : "stage",
frame,
fps,
activeSpeech,
speakingLocalFrame,
}),
sequences
);
};