import React from "react";
import {
  AbsoluteFill,
  interpolate,
  Sequence,
  spring,
  useCurrentFrame,
  useVideoConfig,
} from "remotion";
import {
  characters,
  initialVisibleCharacters,
  timeline,
  type CharacterId,
  type TimelineEvent,
} from "../data/yukkuri-composition/script";
import {
  audioFileForSpeech,
  GAP_FRAMES,
  durationForTimelineEvent,
  hasAudioForSpeech,
} from "../data/yukkuri-composition/timing";
import {roundedFontFamily} from "../fonts";
import {
  VQCaptionOverlay,
  VQCharacterStage,
  VQSpeechOverlay,
  VQWarmGradientBackground,
  type VQMouthResolver,
} from "../lib/VQRemotionLib";
import {getMouthForSpeechFrame} from "../lipsync/manifest";

type ScheduledTimelineEvent = Readonly<{
  event: TimelineEvent;
  from: number;
  durationInFrames: number;
  visibleCharacters: CharacterId[];
  focusedCharacter: CharacterId;
}>;

const scheduleTimeline = (fps: number): ScheduledTimelineEvent[] => {
  let cursor = 0;
  const visibleCharacters = new Set<CharacterId>(initialVisibleCharacters);

  return timeline.map((event, index) => {
    visibleCharacters.add(event.character);

    const durationInFrames = durationForTimelineEvent(event, fps);
    const scheduledEvent = {
      event,
      from: cursor,
      durationInFrames,
      visibleCharacters: Array.from(visibleCharacters),
      focusedCharacter: event.character,
    };

    cursor += durationInFrames;
    if (index < timeline.length - 1) {
      cursor += GAP_FRAMES;
    }

    return scheduledEvent;
  });
};

const activeSegmentForFrame = (
  scheduledEvents: ScheduledTimelineEvent[],
  frame: number
) => {
  let activeSegment = scheduledEvents[0];

  for (const scheduledEvent of scheduledEvents) {
    if (frame >= scheduledEvent.from) {
      activeSegment = scheduledEvent;
    } else {
      break;
    }
  }

  return activeSegment;
};

const clampInterpolation = {
  extrapolateLeft: "clamp",
  extrapolateRight: "clamp",
} as const;

const yukkuriSubtitleOptions = {
  fontFamily: roundedFontFamily,
  fontSize: 36,
  lineHeight: 1.4,
  backgroundColor: "rgba(255, 255, 255, 0.88)",
} as const;

const resolveMouth: VQMouthResolver<CharacterId> = ({
  speechId,
  speakingLocalFrame,
  fps,
}) => getMouthForSpeechFrame(speechId, speakingLocalFrame, fps);

const Title: React.FC<Readonly<{progress: number}>> = ({progress}) => {
  const opacity = interpolate(progress, [0, 1], [0, 1], clampInterpolation);
  const translateY = interpolate(progress, [0, 1], [-30, 0], clampInterpolation);

  return (
    <div
      style={{
        fontFamily: roundedFontFamily,
        fontSize: 54,
        fontWeight: 700,
        color: "#1f2a44",
        letterSpacing: 1,
        textAlign: "center",
        marginTop: 40,
        opacity,
        transform: `translateY(${translateY}px)`,
        textShadow: "0 6px 18px rgba(31, 42, 68, 0.2)",
      }}
    >
      ネコミミはなぜかわいい？
    </div>
  );
};

const TimelineOverlay: React.FC<Readonly<{event: TimelineEvent}>> = ({event}) => {
  if (event.type === "say") {
    const character = characters[event.character];

    return (
      <VQSpeechOverlay
        speech={event}
        speakerName={character.displayName}
        accentColor={character.avatar.accentColor}
        hasAudio={hasAudioForSpeech}
        getAudioPath={audioFileForSpeech}
        subtitleOptions={yukkuriSubtitleOptions}
      />
    );
  }

  return (
    <VQCaptionOverlay
      text={event.caption}
      subtitleOptions={yukkuriSubtitleOptions}
    />
  );
};

const keyForEvent = (event: TimelineEvent, index: number) => {
  if (event.type === "say") {
    return event.id;
  }

  return `show-${event.character}-${index}`;
};

export const YukkuriComposition: React.FC = () => {
  const frame = useCurrentFrame();
  const {fps} = useVideoConfig();
  const scheduledEvents = scheduleTimeline(fps);
  const activeSegment = activeSegmentForFrame(scheduledEvents, frame);
  const isInsideActiveSegment =
    frame < activeSegment.from + activeSegment.durationInFrames;

  const titleProgress = spring({
    frame,
    fps,
    config: {damping: 18, mass: 0.6},
  });
  const activeSpeech =
    isInsideActiveSegment && activeSegment.event.type === "say"
      ? activeSegment.event
      : undefined;
  const speakingCharacter = activeSpeech?.character;
  const speakingLocalFrame = activeSpeech ? frame - activeSegment.from : 0;

  const sequences = scheduledEvents.map((scheduledEvent, index) => (
    <Sequence
      key={keyForEvent(scheduledEvent.event, index)}
      from={scheduledEvent.from}
      durationInFrames={scheduledEvent.durationInFrames}
      premountFor={Math.min(fps, scheduledEvent.from)}
    >
      <TimelineOverlay event={scheduledEvent.event} />
    </Sequence>
  ));

  return (
    <AbsoluteFill
      style={{
        display: "flex",
        flexDirection: "column",
        alignItems: "center",
      }}
    >
      <VQWarmGradientBackground />
      <Title progress={titleProgress} />
      <VQCharacterStage
        characters={characters}
        visibleCharacters={activeSegment.visibleCharacters}
        focusedCharacter={
          isInsideActiveSegment ? activeSegment.focusedCharacter : undefined
        }
        speakingCharacter={speakingCharacter}
        speakingSpeechId={activeSpeech?.id}
        speakingLocalFrame={speakingLocalFrame}
        frame={frame}
        fps={fps}
        resolveMouth={resolveMouth}
        fontFamily={roundedFontFamily}
      />
      {sequences}
    </AbsoluteFill>
  );
};
