diff --git a/package.json b/package.json
index a6cd20d792..c003065642 100644
--- a/package.json
+++ b/package.json
@@ -13,7 +13,7 @@
"testwebcodecs": "turbo run testwebcodecs --no-update-notifier",
"testlambda": "turbo run testlambda --concurrency=1 --no-update-notifier",
"ci": "turbo run make test --concurrency=1 --no-update-notifier",
- "watch": "turbo watch make --concurrency=2 --experimental-write-cache --ui=tui",
+ "watch": "turbo watch make --concurrency=2 --experimental-write-cache",
"makewhisperweb": "turbo run make --filter='@remotion/whisper-web'",
"watchwhisperweb": "turbo watch make --experimental-write-cache --filter='@remotion/whisper-web'",
"makewebcodecs": "turbo run make --filter='@remotion/media-parser' --filter='@remotion/webcodecs'",
diff --git a/packages/core/src/CompositionManager.tsx b/packages/core/src/CompositionManager.tsx
index 0b1eda8d85..baf53744db 100644
--- a/packages/core/src/CompositionManager.tsx
+++ b/packages/core/src/CompositionManager.tsx
@@ -136,6 +136,17 @@ export type AudioOrVideoAsset = {
audioStreamIndex: number;
};
+export type InlineAudioAsset = {
+ type: 'inline-audio';
+ id: string;
+ audio: number[];
+ frame: number;
+ sampleRate: number;
+ numberOfChannels: number;
+ timestamp: number;
+ duration: number;
+};
+
type DiscriminatedArtifact =
| {
contentType: 'binary';
@@ -157,7 +168,7 @@ export type ArtifactAsset = {
downloadBehavior: DownloadBehavior | null;
} & DiscriminatedArtifact;
-export type TRenderAsset = AudioOrVideoAsset | ArtifactAsset;
+export type TRenderAsset = AudioOrVideoAsset | ArtifactAsset | InlineAudioAsset;
export const compositionsRef = React.createRef<{
getCompositions: () => AnyComposition[];
diff --git a/packages/core/src/no-react.ts b/packages/core/src/no-react.ts
index 6f19d6776d..d540618a79 100644
--- a/packages/core/src/no-react.ts
+++ b/packages/core/src/no-react.ts
@@ -1,6 +1,7 @@
export type {
ArtifactAsset,
AudioOrVideoAsset,
+ InlineAudioAsset,
TRenderAsset,
} from './CompositionManager';
export {DownloadBehavior} from './download-behavior';
diff --git a/packages/docs/docs/new-video/index.mdx b/packages/docs/docs/new-video/index.mdx
index dfaeb81869..a6b3057a29 100644
--- a/packages/docs/docs/new-video/index.mdx
+++ b/packages/docs/docs/new-video/index.mdx
@@ -5,7 +5,7 @@ crumb: 'API'
---
:::warning
-**Very experimental**: This component is in a very early stage and **slower** than OffthreadVideo.
+**Very experimental**: This component is in a very early stage and does not support some basic features such as volume or playback rate or CSS styling.
The current focus is on correctness, not on performance.
We recommend that you use [` `](/docs/offthreadvideo) for now.
@@ -49,7 +49,7 @@ export const MyComposition = () => {
The URL of the video to be rendered. Can be a remote URL or a local file referenced with [`staticFile()`](/docs/staticfile).
-### `trimBefore?`
+### `trimBefore?`
Will remove a portion of the video at the beginning (left side).
@@ -76,107 +76,29 @@ export const MyComposition = () => {
};
```
-### `trimAfter?`
+### `trimAfter?`
Removes a portion of the video at the end (right side). See [`trimBefore`](/docs/video#trimbefore) for an explanation.
### `volume?`
-Allows you to control the volume for the whole track or change it on a per-frame basis. Refer to the [using audio](/docs/audio/volume) guide to learn how to use it.
+**Currently not supported!**
-```tsx twoslash title="Example using static volume"
-import {AbsoluteFill, staticFile} from 'remotion';
-import {experimental_NewVideo as NewVideo} from '@remotion/video';
-
-// ---cut---
-export const MyComposition = () => {
- return (
-
-
-
- );
-};
-```
+### `loopVolumeCurveBehavior?`
-```tsx twoslash title="Example of a ramp up over 100 frames"
-import {AbsoluteFill, interpolate, staticFile} from 'remotion';
-import {experimental_NewVideo as NewVideo} from '@remotion/video';
+**Currently not supported!**
-// ---cut---
-export const MyComposition = () => {
- return (
-
- interpolate(f, [0, 100], [0, 1], {extrapolateLeft: 'clamp'})} src={staticFile('video.webm')} />
-
- );
-};
-```
-
-By default, volumes between 0 and 1 are supported, where in iOS Safari, the volume is always 1.
-See [Volume Limitations](/docs/audio/volume#limitations) for more information.
-
-### `loopVolumeCurveBehavior?`
-
-Controls the `frame` which is returned when using the [`volume`](#volume) callback function and wrapping `OffthreadVideo` in a [``](/docs/loop).
-
-Can be either `"repeat"` (default, start from 0 on each iteration) or `"extend"` (keep increasing frames).
-
-### `style?`
-
-You can pass any style you can pass to a native HTML element. Keep in mind that during rendering, `` renders a `canvas` tag, but a `` tag is used during preview.
-
-```tsx twoslash
-import {AbsoluteFill, Img, staticFile} from 'remotion';
-
-// ---cut---
-export const MyComposition = () => {
- return (
-
-
-
- );
-};
-```
-
-### `name?`
+### `name?`
A name and that will be shown as the label of the sequence in the timeline of the Remotion Studio. This property is purely for helping you keep track of items in the timeline.
-### `toneFrequency?`
-
-Adjust the pitch of the audio - will only be applied during rendering.
-
-Accepts a number between `0.01` and `2`, where `1` represents the original pitch. Values less than `1` will decrease the pitch, while values greater than `1` will increase it.
-
-A `toneFrequency` of 0.5 would lower the pitch by half, and a `toneFrequency` of `1.5` would increase the pitch by 50%.
-
### `onError?`
-Handle an error playing the video. From v3.3.89, if you pass an `onError` callback, then no exception will be thrown. Previously, the error could not be caught.
-
-### `playbackRate?`
-
-Controls the speed of the video. `1` is the default and means regular speed, `0.5` slows down the video so it's twice as long and `2` speeds up the video so it's twice as fast.
+**Currently not supported!**
-While Remotion doesn't limit the range of possible playback speeds, in development mode the [`HTMLMediaElement.playbackRate`](https://developer.mozilla.org/en-US/docs/Web/API/HTMLMediaElement/playbackRate) API is used which throws errors on extreme values. At the time of writing, Google Chrome throws an exception if the playback rate is below `0.0625` or above `16`.
-
-```tsx twoslash title="Example of a video playing twice as fast"
-import {AbsoluteFill, staticFile} from 'remotion';
-import {experimental_NewVideo as NewVideo} from '@remotion/video';
-
-// ---cut---
-export const MyComposition = () => {
- return (
-
-
-
- );
-};
-```
+### `playbackRate?`
-:::note
-Playing a video in reverse is not supported.
-:::
+**Currently not supported!**
### `muted?`
@@ -195,87 +117,46 @@ export const MyComposition = () => {
};
```
-### `acceptableTimeShiftInSeconds?`
-
-In the [Studio](/docs/terminology/studio) or in the [Remotion Player](/docs/player), Remotion will seek the video if it gets too much out of sync with Remotion's internal time - be it due to the video loading or the page being too slow to keep up in real-time. By default, a seek is triggered if `0.45` seconds of time shift is encountered. Using this prop, you can customize the threshold.
-
-### `toneFrequency?`
+### `acceptableTimeShiftInSeconds?`
-Adjust the pitch of the audio - will only be applied during rendering.
-
-Accepts a number between `0.01` and `2`, where `1` represents the original pitch. Values less than `1` will decrease the pitch, while values greater than `1` will increase it.
-
-A `toneFrequency` of 0.5 would lower the pitch by half, and a `toneFrequency` of `1.5` would increase the pitch by 50%.
-
-### `audioStreamIndex?`
-
-Select the audio stream to use. The default is `0`.
-
-```tsx twoslash
-import {AbsoluteFill, Audio} from 'remotion';
-
-// ---cut---
-export const MyComposition = () => {
- return (
-
-
-
- );
-};
-```
-
-:::note
-This prop only works during rendering.
-Browsers do not support selecting the audio track without enabling experimental flags.
-
-Not to be confused with audio channels. A video can have multiple audio streams, each stream can have multiple channels.
-Multiple audio streams can be used for example for adding multiple languages to a video.
-
-Audio streams are zero-indexed.
-:::
+**Pending removal**: Future iterations will also have client-side playback and not support this prop which was designed for the `` tag.
-### `pauseWhenBuffering?`
+### `pauseWhenBuffering?`
If set to `true` and the video is loading, the Player will enter into the [native buffering state](/docs/player/buffer-state). The default is `false`, but will become `true` in Remotion 5.0.
-### `showInTimeline?`
+### `showInTimeline?`
If set to `false`, no layer will be shown in the timeline of the Remotion Studio. The default is `true`.
-### `delayRenderTimeoutInMilliseconds?`
+### `delayRenderTimeoutInMilliseconds?`
Customize the [timeout](/docs/delay-render#modifying-the-timeout) of the [`delayRender()`](/docs/delay-render) call that this component makes.
-### `delayRenderRetries?`
+### `delayRenderRetries?`
Customize the [number of retries](/docs/delay-render#retrying) of the [`delayRender()`](/docs/delay-render) call that this component makes.
-### `onAutoPlayError?`
+### `onAutoPlayError?`
-A callback function that gets called when the video fails to play due to autoplay restrictions.
-If you don't pass a callback, the video will be muted and be retried once.
-This prop is useful if you want to handle the error yourself, e.g. for pausing the Player.
-Read more here about [autoplay restrictions](/docs/player/autoplay).
+**Pending removal**: Future iterations will also have client-side playback and not support this prop which was designed for the `` tag.
-### `onVideoFrame?`
+### `onVideoFrame?`
A callback function that gets called when a frame is extracted from the video.
Useful for [video manipulation](/docs/video-manipulation).
-The callback is called with a [`CanvasImageSource`](https://developer.mozilla.org/en-US/docs/Web/API/CanvasImageSource) object.
-During preview, this is a `HTMLVideoElement` object, during rendering, it is an `HTMLImageElement`.
+The callback is called with a [`CanvasImageSource`](https://developer.mozilla.org/en-US/docs/Web/API/CanvasImageSource) object, more specifically, either an `ImageBitmap` or a `VideoFrame`.
### `crossOrigin?`
-Corresponds to the [`crossOrigin`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/video#attr-crossorigin) attribute of the `` element.
-One of `"anonymous"`, `"use-credentials"` or `undefined`.
-Default: `"anonymous"` if `onVideoFrame` is specified, `undefined`, otherwise.
+**Pending removal**: Future iterations will also have client-side playback and not support this prop which was designed for the `` tag.
### `useWebAudioApi?`
-Enable the [Web Audio API](/docs/audio/volume#limitations) for the video tag.
+**Pending removal**: Future iterations will also have client-side playback and not support this prop which was designed for the `` tag.
## See also
-- [Source code for this component](https://github.com/remotion-dev/remotion/blob/main/packages/video/src/video.tsx)
+- [Source code for this component](https://github.com/remotion-dev/remotion/blob/main/packages/video/src/new-video.tsx)
- [` `](/docs/video)
- [``](/docs/offthreadvideo)
diff --git a/packages/renderer/src/assets/download-map.ts b/packages/renderer/src/assets/download-map.ts
index abab0133d1..fd218d79af 100644
--- a/packages/renderer/src/assets/download-map.ts
+++ b/packages/renderer/src/assets/download-map.ts
@@ -6,6 +6,10 @@ import {OffthreadVideoServerEmitter} from '../offthread-video-server';
import type {FrameAndAssets} from '../render-frames';
import {tmpDir} from '../tmp-dir';
import type {RenderMediaOnDownload} from './download-and-map-assets-to-file';
+import {
+ makeInlineAudioMixing,
+ type InlineAudioMixing,
+} from './inline-audio-mixing';
export type AudioChannelsAndDurationResultCache = {
channels: number;
@@ -44,6 +48,7 @@ export type DownloadMap = {
preventCleanup: () => void;
allowCleanup: () => void;
isPreventedFromCleanup: () => boolean;
+ inlineAudioMixing: InlineAudioMixing;
};
export type RenderAssetInfo = {
@@ -57,7 +62,7 @@ export type RenderAssetInfo = {
forSeamlessAacConcatenation: boolean;
};
-const makeAndReturn = (dir: string, name: string) => {
+export const makeAndReturn = (dir: string, name: string) => {
const p = path.join(dir, name);
mkdirSync(p);
return p;
@@ -93,6 +98,7 @@ export const makeDownloadMap = (): DownloadMap => {
isPreventedFromCleanup: () => {
return prevented;
},
+ inlineAudioMixing: makeInlineAudioMixing(dir),
};
};
@@ -104,6 +110,9 @@ export const cleanDownloadMap = (downloadMap: DownloadMap) => {
deleteDirectory(downloadMap.downloadDir);
deleteDirectory(downloadMap.complexFilter);
deleteDirectory(downloadMap.compositingDir);
+
+ downloadMap.inlineAudioMixing.cleanup();
+
// Assets dir must be last since the others are contained
deleteDirectory(downloadMap.assetDir);
};
diff --git a/packages/renderer/src/assets/inline-audio-mixing.ts b/packages/renderer/src/assets/inline-audio-mixing.ts
new file mode 100644
index 0000000000..71c07d9e84
--- /dev/null
+++ b/packages/renderer/src/assets/inline-audio-mixing.ts
@@ -0,0 +1,227 @@
+import fs, {writeSync} from 'node:fs';
+import path from 'node:path';
+import type {InlineAudioAsset} from 'remotion/no-react';
+import {deleteDirectory} from '../delete-directory';
+import {DEFAULT_SAMPLE_RATE} from '../sample-rate';
+import {makeAndReturn} from './download-map';
+
+const numberTo32BiIntLittleEndian = (num: number) => {
+ return new Uint8Array([
+ num & 0xff,
+ (num >> 8) & 0xff,
+ (num >> 16) & 0xff,
+ (num >> 24) & 0xff,
+ ]);
+};
+
+const numberTo16BitLittleEndian = (num: number) => {
+ return new Uint8Array([num & 0xff, (num >> 8) & 0xff]);
+};
+
+const BIT_DEPTH = 16;
+const BYTES_PER_SAMPLE = BIT_DEPTH / 8;
+
+export const makeInlineAudioMixing = (dir: string) => {
+ const folderToAdd = makeAndReturn(dir, 'remotion-inline-audio-mixing');
+ // asset id -> file descriptor
+ const openFiles: Record = {};
+ const writtenHeaders: Record = {};
+
+ const cleanup = () => {
+ for (const fd of Object.values(openFiles)) {
+ try {
+ fs.closeSync(fd);
+ } catch {}
+ }
+
+ deleteDirectory(folderToAdd);
+ };
+
+ const getListOfAssets = () => {
+ return Object.keys(openFiles);
+ };
+
+ const getFilePath = (asset: InlineAudioAsset) => {
+ return path.join(folderToAdd, `${asset.id}.wav`);
+ };
+
+ const ensureAsset = ({
+ asset,
+ fps,
+ totalNumberOfFrames,
+ trimLeftOffset,
+ trimRightOffset,
+ }: {
+ asset: InlineAudioAsset;
+ fps: number;
+ totalNumberOfFrames: number;
+ trimLeftOffset: number;
+ trimRightOffset: number;
+ }) => {
+ const filePath = getFilePath(asset);
+ if (!openFiles[filePath]) {
+ openFiles[filePath] = fs.openSync(filePath, 'w');
+ }
+
+ if (writtenHeaders[filePath]) {
+ return;
+ }
+
+ writtenHeaders[filePath] = true;
+
+ const expectedDataSize = Math.round(
+ (totalNumberOfFrames / fps - trimLeftOffset + trimRightOffset) *
+ asset.numberOfChannels *
+ DEFAULT_SAMPLE_RATE *
+ BYTES_PER_SAMPLE,
+ );
+
+ const expectedSize = 40 + expectedDataSize;
+
+ const {numberOfChannels} = asset;
+
+ const fd = openFiles[filePath];
+ writeSync(fd, new Uint8Array([0x52, 0x49, 0x46, 0x46]), 0, 4, 0); // "RIFF"
+ writeSync(
+ fd,
+ new Uint8Array(numberTo32BiIntLittleEndian(expectedSize)),
+ 0,
+ 4,
+ 4,
+ ); // Remaining size
+ writeSync(fd, new Uint8Array([0x57, 0x41, 0x56, 0x45]), 0, 4, 8); // "WAVE"
+ writeSync(fd, new Uint8Array([0x66, 0x6d, 0x74, 0x20]), 0, 4, 12); // "fmt "
+ writeSync(fd, new Uint8Array([BIT_DEPTH, 0x00, 0x00, 0x00]), 0, 4, 16); // fmt chunk size = 16
+ writeSync(fd, new Uint8Array([0x01, 0x00]), 0, 2, 20); // Audio format (PCM) = 1, set 3 if float32 would be true
+ writeSync(fd, new Uint8Array([numberOfChannels, 0x00]), 0, 2, 22); // Number of channels
+ writeSync(
+ fd,
+ new Uint8Array(numberTo32BiIntLittleEndian(DEFAULT_SAMPLE_RATE)),
+ 0,
+ 4,
+ 24,
+ ); // Sample rate
+ writeSync(
+ fd,
+ new Uint8Array(
+ numberTo32BiIntLittleEndian(
+ DEFAULT_SAMPLE_RATE * numberOfChannels * BYTES_PER_SAMPLE,
+ ),
+ ),
+ 0,
+ 4,
+ 28,
+ ); // Byte rate
+ writeSync(
+ fd,
+ new Uint8Array(
+ numberTo16BitLittleEndian(numberOfChannels * BYTES_PER_SAMPLE),
+ ),
+ 0,
+ 2,
+ 32,
+ ); // Block align
+ writeSync(fd, numberTo16BitLittleEndian(BIT_DEPTH), 0, 2, 34); // Bits per sample
+ writeSync(fd, new Uint8Array([0x64, 0x61, 0x74, 0x61]), 0, 4, 36); // "data"
+ writeSync(
+ fd,
+ new Uint8Array(numberTo32BiIntLittleEndian(expectedDataSize)),
+ 0,
+ 4,
+ 40,
+ ); // Remaining size
+ };
+
+ const addAsset = ({
+ asset,
+ fps,
+ totalNumberOfFrames,
+ firstFrame,
+ trimLeftOffset,
+ trimRightOffset,
+ }: {
+ asset: InlineAudioAsset;
+ fps: number;
+ totalNumberOfFrames: number;
+ firstFrame: number;
+ trimLeftOffset: number;
+ trimRightOffset: number;
+ }) => {
+ ensureAsset({
+ asset,
+ fps,
+ totalNumberOfFrames,
+ trimLeftOffset,
+ trimRightOffset,
+ });
+ const filePath = getFilePath(asset);
+ const fileDescriptor = openFiles[filePath];
+
+ let arr = new Int16Array(asset.audio);
+ const isFirst = asset.frame === firstFrame;
+ const isLast = asset.frame === totalNumberOfFrames + firstFrame - 1;
+ const samplesToShaveFromStart = trimLeftOffset * DEFAULT_SAMPLE_RATE;
+ const samplesToShaveFromEnd = trimRightOffset * DEFAULT_SAMPLE_RATE;
+ if (
+ Math.abs(Math.round(samplesToShaveFromEnd) - samplesToShaveFromEnd) >
+ 0.00000001
+ ) {
+ throw new Error(
+ 'samplesToShaveFromEnd should be approximately an integer',
+ );
+ }
+
+ if (
+ Math.abs(Math.round(samplesToShaveFromStart) - samplesToShaveFromStart) >
+ 0.00000001
+ ) {
+ throw new Error(
+ 'samplesToShaveFromStart should be approximately an integer',
+ );
+ }
+
+ if (isFirst) {
+ arr = arr.slice(
+ Math.round(samplesToShaveFromStart) * asset.numberOfChannels,
+ );
+ }
+
+ if (isLast) {
+ arr = arr.slice(
+ 0,
+ arr.length + Math.round(samplesToShaveFromEnd) * asset.numberOfChannels,
+ );
+ }
+
+ const positionInSeconds =
+ (asset.frame - firstFrame) / fps - (isFirst ? 0 : trimLeftOffset);
+
+ const position = Math.round(
+ positionInSeconds *
+ asset.numberOfChannels *
+ DEFAULT_SAMPLE_RATE *
+ BYTES_PER_SAMPLE,
+ );
+
+ writeSync(
+ // fs
+ fileDescriptor,
+ // data
+ arr,
+ // offset of data
+ 0,
+ // length
+ arr.byteLength,
+ // position
+ 44 + position,
+ );
+ };
+
+ return {
+ cleanup,
+ addAsset,
+ getListOfAssets,
+ };
+};
+
+export type InlineAudioMixing = ReturnType;
diff --git a/packages/renderer/src/create-audio.ts b/packages/renderer/src/create-audio.ts
index 53ac71deca..54f006ceb0 100644
--- a/packages/renderer/src/create-audio.ts
+++ b/packages/renderer/src/create-audio.ts
@@ -13,6 +13,7 @@ import type {CancelSignal} from './make-cancel-signal';
import {mergeAudioTrack} from './merge-audio-track';
import type {AudioCodec} from './options/audio-codec';
import {getExtensionFromAudioCodec} from './options/audio-codec';
+import type {PreprocessedAudioTrack} from './preprocess-audio-track';
import {preprocessAudioTrack} from './preprocess-audio-track';
import type {FrameAndAssets} from './render-frames';
import {truthy} from './truthy';
@@ -122,7 +123,19 @@ export const createAudio = async ({
}),
);
- const preprocessed = audioTracks.filter(truthy);
+ const inlinedAudio = downloadMap.inlineAudioMixing.getListOfAssets();
+
+ const preprocessed: PreprocessedAudioTrack[] = [
+ ...audioTracks.filter(truthy),
+ ...inlinedAudio.map((asset) => ({
+ outName: asset,
+ filter: {
+ filter: null,
+ pad_start: null,
+ pad_end: null,
+ },
+ })),
+ ];
const merged = path.join(downloadMap.audioPreprocessing, 'merged.wav');
const extension = getExtensionFromAudioCodec(audioCodec);
const outName = path.join(
diff --git a/packages/renderer/src/ffmpeg-filter-file.ts b/packages/renderer/src/ffmpeg-filter-file.ts
index 1d6c0836c4..60640d5380 100644
--- a/packages/renderer/src/ffmpeg-filter-file.ts
+++ b/packages/renderer/src/ffmpeg-filter-file.ts
@@ -10,6 +10,13 @@ export const makeFfmpegFilterFile = (
complexFilter: FilterWithoutPaddingApplied,
downloadMap: DownloadMap,
) => {
+ if (complexFilter.filter === null) {
+ return {
+ file: null,
+ cleanup: () => undefined,
+ };
+ }
+
return makeFfmpegFilterFileStr(complexFilter.filter, downloadMap);
};
diff --git a/packages/renderer/src/filter-asset-types.ts b/packages/renderer/src/filter-asset-types.ts
index 9377ee709f..c6093d90f5 100644
--- a/packages/renderer/src/filter-asset-types.ts
+++ b/packages/renderer/src/filter-asset-types.ts
@@ -1,6 +1,7 @@
import type {
ArtifactAsset,
AudioOrVideoAsset,
+ InlineAudioAsset,
TRenderAsset,
} from 'remotion/no-react';
import type {EmittedArtifact} from './serialize-artifact';
@@ -59,3 +60,9 @@ export const onlyArtifact = ({
})
.filter(truthy);
};
+
+export const onlyInlineAudio = (assets: TRenderAsset[]): InlineAudioAsset[] => {
+ return assets.filter(
+ (asset) => asset.type === 'inline-audio',
+ ) as InlineAudioAsset[];
+};
diff --git a/packages/renderer/src/preprocess-audio-track.ts b/packages/renderer/src/preprocess-audio-track.ts
index a5609197cc..d09435d112 100644
--- a/packages/renderer/src/preprocess-audio-track.ts
+++ b/packages/renderer/src/preprocess-audio-track.ts
@@ -13,6 +13,7 @@ import {resolveAssetSrc} from './resolve-asset-src';
import {DEFAULT_SAMPLE_RATE} from './sample-rate';
import type {ProcessedTrack} from './stringify-ffmpeg-filter';
import {stringifyFfmpegFilter} from './stringify-ffmpeg-filter';
+import {truthy} from './truthy';
type Options = {
outName: string;
@@ -88,11 +89,13 @@ const preprocessAudioTrackUnlimited = async ({
['-i', resolveAssetSrc(asset.src)],
audioStreamIndex ? ['-map', `0:a:${audioStreamIndex}`] : [],
['-ac', '2'],
- ['-filter_script:a', file],
+ file ? ['-filter_script:a', file] : null,
['-c:a', 'pcm_s16le'],
['-ar', String(DEFAULT_SAMPLE_RATE)],
['-y', outName],
- ].flat(2);
+ ]
+ .flat(2)
+ .filter(truthy);
Log.verbose(
{indent, logLevel},
diff --git a/packages/renderer/src/render-frame-and-retry-target-close.ts b/packages/renderer/src/render-frame-and-retry-target-close.ts
index fea1549060..e932b5be03 100644
--- a/packages/renderer/src/render-frame-and-retry-target-close.ts
+++ b/packages/renderer/src/render-frame-and-retry-target-close.ts
@@ -51,6 +51,9 @@ export const renderFrameAndRetryTargetClose = async ({
onFrameUpdate,
nextFrameToRender,
imageSequencePattern,
+ trimLeftOffset,
+ trimRightOffset,
+ allFramesAndExtraFrames,
}: {
retriesLeft: number;
attempt: number;
@@ -90,6 +93,9 @@ export const renderFrameAndRetryTargetClose = async ({
) => void);
nextFrameToRender: NextFrameToRender;
imageSequencePattern: string | null;
+ trimLeftOffset: number;
+ trimRightOffset: number;
+ allFramesAndExtraFrames: number[];
}): Promise => {
const currentPool = await poolPromise;
@@ -104,6 +110,9 @@ export const renderFrameAndRetryTargetClose = async ({
try {
await Promise.race([
renderFrame({
+ trimLeftOffset,
+ trimRightOffset,
+ allFramesAndExtraFrames,
attempt,
assets,
binariesDirectory,
@@ -215,6 +224,9 @@ export const renderFrameAndRetryTargetClose = async ({
onFrameUpdate,
nextFrameToRender,
imageSequencePattern,
+ trimLeftOffset,
+ trimRightOffset,
+ allFramesAndExtraFrames,
});
}
@@ -269,6 +281,9 @@ export const renderFrameAndRetryTargetClose = async ({
onFrameUpdate,
nextFrameToRender,
imageSequencePattern,
+ trimLeftOffset,
+ trimRightOffset,
+ allFramesAndExtraFrames,
});
}
};
diff --git a/packages/renderer/src/render-frame-with-option-to-reject.ts b/packages/renderer/src/render-frame-with-option-to-reject.ts
index 1c2633138d..21a9d6f1bb 100644
--- a/packages/renderer/src/render-frame-with-option-to-reject.ts
+++ b/packages/renderer/src/render-frame-with-option-to-reject.ts
@@ -3,9 +3,14 @@ import type {RenderMediaOnDownload} from './assets/download-and-map-assets-to-fi
import {downloadAndMapAssetsToFileUrl} from './assets/download-and-map-assets-to-file';
import type {DownloadMap} from './assets/download-map';
import type {Page} from './browser/BrowserPage';
+import {collectAssets} from './collect-assets';
import {compressAsset} from './compress-assets';
import {handleJavascriptException} from './error-handling/handle-javascript-exception';
-import {onlyArtifact, onlyAudioAndVideoAssets} from './filter-asset-types';
+import {
+ onlyArtifact,
+ onlyAudioAndVideoAssets,
+ onlyInlineAudio,
+} from './filter-asset-types';
import type {CountType} from './get-frame-padded-index';
import {getFrameOutputFileName} from './get-frame-padded-index';
import type {VideoImageFormat} from './image-format';
@@ -48,6 +53,10 @@ export const renderFrameWithOptionToReject = async ({
frame,
page,
imageSequencePattern,
+ fps,
+ trimLeftOffset,
+ trimRightOffset,
+ allFramesAndExtraFrames,
}: {
reject: (err: Error) => void;
width: number;
@@ -85,6 +94,10 @@ export const renderFrameWithOptionToReject = async ({
frame: number;
page: Page;
imageSequencePattern: string | null;
+ fps: number;
+ trimLeftOffset: number;
+ trimRightOffset: number;
+ allFramesAndExtraFrames: number[];
}) => {
const startTime = performance.now();
@@ -138,32 +151,38 @@ export const renderFrameWithOptionToReject = async ({
);
}
- const {buffer, collectedAssets} = await takeFrame({
- frame,
- freePage: page,
- height,
- imageFormat: assetsOnly ? 'none' : imageFormat,
- output:
- index === null
- ? null
- : path.join(
- frameDir,
- getFrameOutputFileName({
- frame,
- imageFormat,
- index,
- countType,
- lastFrame,
- totalFrames: framesToRender.length,
- imageSequencePattern,
- }),
- ),
- jpegQuality,
- width,
- scale,
- wantsBuffer: Boolean(onFrameBuffer),
- timeoutInMilliseconds,
- });
+ const [buffer, collectedAssets] = await Promise.all([
+ takeFrame({
+ freePage: page,
+ height,
+ imageFormat: assetsOnly ? 'none' : imageFormat,
+ output:
+ index === null
+ ? null
+ : path.join(
+ frameDir,
+ getFrameOutputFileName({
+ frame,
+ imageFormat,
+ index,
+ countType,
+ lastFrame,
+ totalFrames: framesToRender.length,
+ imageSequencePattern,
+ }),
+ ),
+ jpegQuality,
+ width,
+ scale,
+ wantsBuffer: Boolean(onFrameBuffer),
+ timeoutInMilliseconds,
+ }),
+ collectAssets({
+ frame,
+ freePage: page,
+ timeoutInMilliseconds,
+ }),
+ ]);
if (onFrameBuffer && !assetsOnly) {
if (!buffer) {
throw new Error('unexpected null buffer');
@@ -206,6 +225,8 @@ export const renderFrameWithOptionToReject = async ({
return compressAsset(previousAudioRenderAssets, asset);
});
+ const inlineAudioAssets = onlyInlineAudio(collectedAssets);
+
assets.push({
audioAndVideoAssets: compressedAssets,
frame,
@@ -215,7 +236,9 @@ export const renderFrameWithOptionToReject = async ({
filename: a.filename,
};
}),
+ inlineAudioAssets,
});
+
for (const renderAsset of compressedAssets) {
downloadAndMapAssetsToFileUrl({
renderAsset,
@@ -238,6 +261,17 @@ export const renderFrameWithOptionToReject = async ({
});
}
+ for (const renderAsset of inlineAudioAssets) {
+ downloadMap.inlineAudioMixing.addAsset({
+ asset: renderAsset,
+ fps,
+ totalNumberOfFrames: allFramesAndExtraFrames.length,
+ firstFrame: allFramesAndExtraFrames[0],
+ trimLeftOffset,
+ trimRightOffset,
+ });
+ }
+
cleanupPageError();
page.off('error', errorCallbackOnFrame);
diff --git a/packages/renderer/src/render-frame.ts b/packages/renderer/src/render-frame.ts
index 2ca7c30153..1427c6022e 100644
--- a/packages/renderer/src/render-frame.ts
+++ b/packages/renderer/src/render-frame.ts
@@ -38,6 +38,9 @@ export const renderFrame = ({
frame,
page,
imageSequencePattern,
+ trimLeftOffset,
+ trimRightOffset,
+ allFramesAndExtraFrames,
}: {
attempt: number;
indent: boolean;
@@ -73,6 +76,9 @@ export const renderFrame = ({
frame: number;
page: Page;
imageSequencePattern: string | null;
+ trimLeftOffset: number;
+ trimRightOffset: number;
+ allFramesAndExtraFrames: number[];
}) => {
return new Promise((resolve, reject) => {
renderFrameWithOptionToReject({
@@ -106,6 +112,10 @@ export const renderFrame = ({
frame,
page,
imageSequencePattern,
+ fps: composition.fps,
+ trimLeftOffset,
+ trimRightOffset,
+ allFramesAndExtraFrames,
})
.then(() => {
resolve();
diff --git a/packages/renderer/src/render-frames.ts b/packages/renderer/src/render-frames.ts
index ecb21e8082..d6ec141d69 100644
--- a/packages/renderer/src/render-frames.ts
+++ b/packages/renderer/src/render-frames.ts
@@ -1,7 +1,11 @@
import fs from 'node:fs';
import path from 'node:path';
-import type {AudioOrVideoAsset, VideoConfig} from 'remotion/no-react';
+import type {
+ AudioOrVideoAsset,
+ InlineAudioAsset,
+ VideoConfig,
+} from 'remotion/no-react';
import {NoReactInternals} from 'remotion/no-react';
import type {RenderMediaOnDownload} from './assets/download-and-map-assets-to-file';
import type {DownloadMap} from './assets/download-map';
@@ -146,6 +150,7 @@ export type FrameAndAssets = {
frame: number;
audioAndVideoAssets: AudioOrVideoAsset[];
artifactAssets: ArtifactWithoutContent[];
+ inlineAudioAssets: InlineAudioAsset[];
};
export type RenderFramesOptions = {
@@ -401,6 +406,9 @@ const innerRenderFrames = async ({
onFrameUpdate,
nextFrameToRender,
imageSequencePattern: pattern,
+ trimLeftOffset,
+ trimRightOffset,
+ allFramesAndExtraFrames,
});
}),
);
diff --git a/packages/renderer/src/render-still.ts b/packages/renderer/src/render-still.ts
index c79c7c802d..7fadf33ae9 100644
--- a/packages/renderer/src/render-still.ts
+++ b/packages/renderer/src/render-still.ts
@@ -11,6 +11,7 @@ import {DEFAULT_TIMEOUT} from './browser/TimeoutSettings';
import {defaultBrowserDownloadProgress} from './browser/browser-download-progress-bar';
import type {SourceMapGetter} from './browser/source-map-getter';
import type {Codec} from './codec';
+import {collectAssets} from './collect-assets';
import {convertToPositiveFrameIndex} from './convert-to-positive-frame-index';
import {ensureOutputDirectory} from './ensure-output-directory';
import {handleJavascriptException} from './error-handling/handle-javascript-exception';
@@ -320,18 +321,24 @@ const innerRenderStill = async ({
attempt: 0,
});
- const {buffer, collectedAssets} = await takeFrame({
- frame: stillFrame,
- freePage: page,
- height: composition.height,
- width: composition.width,
- imageFormat,
- scale,
- output,
- jpegQuality,
- wantsBuffer: !output,
- timeoutInMilliseconds,
- });
+ const [buffer, collectedAssets] = await Promise.all([
+ takeFrame({
+ freePage: page,
+ height: composition.height,
+ width: composition.width,
+ imageFormat,
+ scale,
+ output,
+ jpegQuality,
+ wantsBuffer: !output,
+ timeoutInMilliseconds,
+ }),
+ collectAssets({
+ frame,
+ freePage: page,
+ timeoutInMilliseconds,
+ }),
+ ]);
const artifactAssets = onlyArtifact({
assets: collectedAssets,
diff --git a/packages/renderer/src/seamless-aac-trim.ts b/packages/renderer/src/seamless-aac-trim.ts
new file mode 100644
index 0000000000..260e0ced2f
--- /dev/null
+++ b/packages/renderer/src/seamless-aac-trim.ts
@@ -0,0 +1,44 @@
+export const getActualTrimLeft = ({
+ fps,
+ trimLeftOffset,
+ seamless,
+ assetDuration,
+ audioStartFrame,
+ trimLeft,
+ playbackRate,
+}: {
+ trimLeft: number;
+ audioStartFrame: number;
+ fps: number;
+ trimLeftOffset: number;
+ seamless: boolean;
+ assetDuration: number | null;
+ playbackRate: number;
+}): {
+ trimLeft: number;
+ maxTrim: number | null;
+} => {
+ const sinceStart = trimLeft - audioStartFrame;
+
+ if (!seamless) {
+ return {
+ trimLeft:
+ audioStartFrame / fps +
+ (sinceStart / fps) * playbackRate +
+ trimLeftOffset,
+ maxTrim: assetDuration,
+ };
+ }
+
+ if (seamless) {
+ return {
+ trimLeft:
+ audioStartFrame / fps / playbackRate +
+ sinceStart / fps +
+ trimLeftOffset,
+ maxTrim: assetDuration ? assetDuration / playbackRate : null,
+ };
+ }
+
+ throw new Error('This should never happen');
+};
diff --git a/packages/renderer/src/stringify-ffmpeg-filter.ts b/packages/renderer/src/stringify-ffmpeg-filter.ts
index d307ef2d1e..118b916a37 100644
--- a/packages/renderer/src/stringify-ffmpeg-filter.ts
+++ b/packages/renderer/src/stringify-ffmpeg-filter.ts
@@ -4,10 +4,11 @@ import type {AssetVolume, MediaAsset} from './assets/types';
import type {LogLevel} from './log-level';
import {Log} from './logger';
import {DEFAULT_SAMPLE_RATE} from './sample-rate';
+import {getActualTrimLeft} from './seamless-aac-trim';
import {truthy} from './truthy';
export type FilterWithoutPaddingApplied = ProcessedTrack & {
- filter: string;
+ filter: string | null;
actualTrimLeft: number;
};
@@ -41,47 +42,6 @@ const stringifyTrim = (trim: number) => {
return asString;
};
-export const getActualTrimLeft = ({
- asset,
- fps,
- trimLeftOffset,
- seamless,
- assetDuration,
-}: {
- asset: MediaAsset;
- fps: number;
- trimLeftOffset: number;
- seamless: boolean;
- assetDuration: number | null;
-}): {
- trimLeft: number;
- maxTrim: number | null;
-} => {
- const sinceStart = asset.trimLeft - asset.audioStartFrame;
-
- if (!seamless) {
- return {
- trimLeft:
- asset.audioStartFrame / fps +
- (sinceStart / fps) * asset.playbackRate +
- trimLeftOffset,
- maxTrim: assetDuration,
- };
- }
-
- if (seamless) {
- return {
- trimLeft:
- asset.audioStartFrame / fps / asset.playbackRate +
- sinceStart / fps +
- trimLeftOffset,
- maxTrim: assetDuration ? assetDuration / asset.playbackRate : null,
- };
- }
-
- throw new Error('This should never happen');
-};
-
const trimAndSetTempo = ({
assetDuration,
asset,
@@ -108,11 +68,13 @@ const trimAndSetTempo = ({
// It creates a small offset and the offset needs to be the same for all audio tracks, before processing it further.
// This also affects the trimLeft and trimRight values, as they need to be adjusted.
const {trimLeft, maxTrim} = getActualTrimLeft({
- asset,
+ trimLeft: asset.trimLeft,
fps,
trimLeftOffset,
seamless: true,
assetDuration,
+ audioStartFrame: asset.audioStartFrame,
+ playbackRate: asset.playbackRate,
});
const trimRight =
trimLeft + asset.duration / fps - trimLeftOffset + trimRightOffset;
@@ -181,11 +143,13 @@ export const stringifyFfmpegFilter = ({
const startInVideoSeconds = startInVideo / fps;
const {trimLeft, maxTrim} = getActualTrimLeft({
- asset,
+ trimLeft: asset.trimLeft,
fps,
trimLeftOffset,
seamless: forSeamlessAacConcatenation,
assetDuration,
+ audioStartFrame: asset.audioStartFrame,
+ playbackRate: asset.playbackRate,
});
if (maxTrim && trimLeft >= maxTrim) {
diff --git a/packages/renderer/src/take-frame.ts b/packages/renderer/src/take-frame.ts
index d9a0540e9c..3fb6c60539 100644
--- a/packages/renderer/src/take-frame.ts
+++ b/packages/renderer/src/take-frame.ts
@@ -1,6 +1,4 @@
-import type {TRenderAsset} from 'remotion/no-react';
import type {Page} from './browser/BrowserPage';
-import {collectAssets} from './collect-assets';
import type {StillImageFormat, VideoImageFormat} from './image-format';
import {puppeteerEvaluateWithCatch} from './puppeteer-evaluate';
import {screenshot} from './puppeteer-screenshot';
@@ -9,7 +7,6 @@ export const takeFrame = async ({
freePage,
imageFormat,
jpegQuality,
- frame,
width,
height,
output,
@@ -20,22 +17,15 @@ export const takeFrame = async ({
freePage: Page;
imageFormat: VideoImageFormat | StillImageFormat;
jpegQuality: number | undefined;
- frame: number;
height: number;
width: number;
output: string | null;
scale: number;
wantsBuffer: boolean;
timeoutInMilliseconds: number;
-}): Promise<{buffer: Buffer | null; collectedAssets: TRenderAsset[]}> => {
- const collectedAssets = await collectAssets({
- frame,
- freePage,
- timeoutInMilliseconds,
- });
-
+}): Promise => {
if (imageFormat === 'none') {
- return {buffer: null, collectedAssets};
+ return null;
}
if (
@@ -75,5 +65,5 @@ export const takeFrame = async ({
scale,
});
- return {buffer: buf, collectedAssets};
+ return buf;
};
diff --git a/packages/video/src/convert-audiodata/combine-audiodata.ts b/packages/video/src/convert-audiodata/combine-audiodata.ts
new file mode 100644
index 0000000000..f3a9109620
--- /dev/null
+++ b/packages/video/src/convert-audiodata/combine-audiodata.ts
@@ -0,0 +1,50 @@
+import type {PcmS16AudioData} from './convert-audiodata';
+
+export const combineAudioDataAndClosePrevious = (
+ audioDataArray: PcmS16AudioData[],
+): PcmS16AudioData => {
+ let numberOfFrames = 0;
+ let numberOfChannels: number | null = null;
+ let sampleRate: number | null = null;
+ const {timestamp} = audioDataArray[0];
+
+ for (const audioData of audioDataArray) {
+ numberOfFrames += audioData.numberOfFrames;
+
+ if (!numberOfChannels) {
+ numberOfChannels = audioData.numberOfChannels;
+ } else if (numberOfChannels !== audioData.numberOfChannels) {
+ throw new Error('Number of channels do not match');
+ }
+
+ if (!sampleRate) {
+ sampleRate = audioData.sampleRate;
+ } else if (sampleRate !== audioData.sampleRate) {
+ throw new Error('Sample rates do not match');
+ }
+ }
+
+ if (!numberOfChannels) {
+ throw new Error('Number of channels is not set');
+ }
+
+ if (!sampleRate) {
+ throw new Error('Sample rate is not set');
+ }
+
+ const arr = new Int16Array(numberOfFrames * numberOfChannels);
+
+ let offset = 0;
+ for (const audioData of audioDataArray) {
+ arr.set(audioData.data, offset);
+ offset += audioData.data.length;
+ }
+
+ return {
+ data: arr,
+ numberOfChannels,
+ numberOfFrames,
+ sampleRate,
+ timestamp,
+ };
+};
diff --git a/packages/video/src/convert-audiodata/convert-audiodata.ts b/packages/video/src/convert-audiodata/convert-audiodata.ts
new file mode 100644
index 0000000000..673334faca
--- /dev/null
+++ b/packages/video/src/convert-audiodata/convert-audiodata.ts
@@ -0,0 +1,95 @@
+import {resampleAudioData} from './resample-audiodata';
+
+export type ConvertAudioDataOptions = {
+ audioData: AudioData;
+ newSampleRate: number;
+ trimStartInSeconds: number;
+ trimEndInSeconds: number;
+ targetNumberOfChannels: number;
+};
+
+const FORMAT: AudioSampleFormat = 's16';
+
+export type PcmS16AudioData = {
+ data: Int16Array;
+ sampleRate: number;
+ numberOfChannels: number;
+ numberOfFrames: number;
+ timestamp: number;
+};
+
+export const convertAudioData = ({
+ audioData,
+ newSampleRate,
+ trimStartInSeconds,
+ trimEndInSeconds,
+ targetNumberOfChannels,
+}: ConvertAudioDataOptions): PcmS16AudioData => {
+ const {
+ numberOfChannels: srcNumberOfChannels,
+ sampleRate: currentSampleRate,
+ numberOfFrames,
+ } = audioData;
+ const ratio = currentSampleRate / newSampleRate;
+
+ const frameOffset = Math.round(trimStartInSeconds * audioData.sampleRate);
+ const frameCount =
+ numberOfFrames -
+ Math.round((trimEndInSeconds + trimStartInSeconds) * audioData.sampleRate);
+
+ const newNumberOfFrames = Math.floor(frameCount / ratio);
+
+ if (newNumberOfFrames === 0) {
+ throw new Error(
+ 'Cannot resample - the given sample rate would result in less than 1 sample',
+ );
+ }
+
+ if (newSampleRate < 3000 || newSampleRate > 768000) {
+ throw new Error('newSampleRate must be between 3000 and 768000');
+ }
+
+ const srcChannels = new Int16Array(srcNumberOfChannels * frameCount);
+
+ audioData.copyTo(srcChannels, {
+ planeIndex: 0,
+ format: FORMAT,
+ frameOffset,
+ frameCount,
+ });
+
+ const data = new Int16Array(newNumberOfFrames * targetNumberOfChannels);
+ const chunkSize = frameCount / newNumberOfFrames;
+
+ if (
+ newNumberOfFrames === frameCount &&
+ targetNumberOfChannels === srcNumberOfChannels
+ ) {
+ return {
+ data: srcChannels,
+ numberOfChannels: targetNumberOfChannels,
+ numberOfFrames: newNumberOfFrames,
+ sampleRate: newSampleRate,
+ timestamp: audioData.timestamp + trimStartInSeconds * 1_000_000,
+ };
+ }
+
+ resampleAudioData({
+ srcNumberOfChannels,
+ source: srcChannels,
+ destination: data,
+ newNumberOfFrames,
+ chunkSize,
+ });
+
+ const newAudioData = {
+ data: srcChannels,
+ format: FORMAT,
+ numberOfChannels: targetNumberOfChannels,
+ numberOfFrames: newNumberOfFrames,
+ sampleRate: newSampleRate,
+ timestamp: audioData.timestamp + trimStartInSeconds * 1_000_000,
+ };
+
+ return newAudioData;
+};
diff --git a/packages/video/src/convert-audiodata/resample-audiodata.ts b/packages/video/src/convert-audiodata/resample-audiodata.ts
new file mode 100644
index 0000000000..656253f5a7
--- /dev/null
+++ b/packages/video/src/convert-audiodata/resample-audiodata.ts
@@ -0,0 +1,104 @@
+// Remotion exports all videos with 2 channels.
+export const TARGET_NUMBER_OF_CHANNELS = 2;
+
+export const resampleAudioData = ({
+ srcNumberOfChannels,
+ source: srcChannels,
+ destination,
+ newNumberOfFrames,
+ chunkSize,
+}: {
+ srcNumberOfChannels: number;
+ source: Int16Array;
+ destination: Int16Array;
+ newNumberOfFrames: number;
+ chunkSize: number;
+}) => {
+ for (
+ let newFrameIndex = 0;
+ newFrameIndex < newNumberOfFrames;
+ newFrameIndex++
+ ) {
+ const start = Math.floor(newFrameIndex * chunkSize);
+ const end = Math.max(Math.floor(start + chunkSize), start + 1);
+
+ const sourceValues = new Array(srcNumberOfChannels).fill(0);
+
+ for (
+ let channelIndex = 0;
+ channelIndex < srcNumberOfChannels;
+ channelIndex++
+ ) {
+ const sampleCountAvg = end - start;
+
+ let itemSum = 0;
+ let itemCount = 0;
+ for (let k = 0; k < sampleCountAvg; k++) {
+ const num =
+ srcChannels[(start + k) * srcNumberOfChannels + channelIndex];
+ itemSum += num;
+ itemCount++;
+ }
+
+ const average = itemSum / itemCount;
+
+ sourceValues[channelIndex] = average;
+ }
+
+ if (TARGET_NUMBER_OF_CHANNELS === srcNumberOfChannels) {
+ for (let i = 0; i < srcNumberOfChannels; i++) {
+ destination[newFrameIndex * srcNumberOfChannels + i] = sourceValues[i];
+ }
+ }
+
+ // The following formulas were taken from Mediabunnys audio resampler:
+ // https://github.com/Vanilagy/mediabunny/blob/b9f7ab2fa2b9167784cbded044d466185308999f/src/conversion.ts
+
+ // Mono to Stereo: M -> L, M -> R
+ if (srcNumberOfChannels === 1) {
+ const m = sourceValues[0];
+ const l = m;
+ const r = m;
+
+ destination[newFrameIndex * 2 + 0] = l;
+ destination[newFrameIndex * 2 + 1] = r;
+ }
+
+ // Quad to Stereo: 0.5 * (L + SL), 0.5 * (R + SR)
+ else if (srcNumberOfChannels === 4) {
+ const l = sourceValues[0];
+ const r = sourceValues[1];
+ const sl = sourceValues[2];
+ const sr = sourceValues[3];
+
+ const l2 = 0.5 * (l + sl);
+ const r2 = 0.5 * (r + sr);
+
+ destination[newFrameIndex * 2 + 0] = l2;
+ destination[newFrameIndex * 2 + 1] = r2;
+ }
+
+ // 5.1 to Stereo: L + sqrt(1/2) * (C + SL), R + sqrt(1/2) * (C + SR)
+ else if (srcNumberOfChannels === 6) {
+ const l = sourceValues[0];
+ const r = sourceValues[1];
+ const c = sourceValues[2];
+ const sl = sourceValues[3];
+ const sr = sourceValues[4];
+
+ const l2 = l + Math.sqrt(1 / 2) * (c + sl);
+ const r2 = r + Math.sqrt(1 / 2) * (c + sr);
+
+ destination[newFrameIndex * 2 + 0] = l2;
+ destination[newFrameIndex * 2 + 1] = r2;
+ }
+
+ // Discrete fallback: direct mapping with zero-fill or drop
+ else {
+ for (let i = 0; i < srcNumberOfChannels; i++) {
+ destination[newFrameIndex * TARGET_NUMBER_OF_CHANNELS + i] =
+ sourceValues[i];
+ }
+ }
+ }
+};
diff --git a/packages/video/src/extract-frame-via-broadcast-channel.ts b/packages/video/src/extract-frame-via-broadcast-channel.ts
index ef46363c97..d355d4c01a 100644
--- a/packages/video/src/extract-frame-via-broadcast-channel.ts
+++ b/packages/video/src/extract-frame-via-broadcast-channel.ts
@@ -1,12 +1,15 @@
-import {extractFrame} from './extract-frame';
+import type {PcmS16AudioData} from './convert-audiodata/convert-audiodata';
+import {extractFrameAndAudio} from './extract-frame';
import type {LogLevel} from './log';
type ExtractFrameRequest = {
type: 'request';
src: string;
timeInSeconds: number;
+ durationInSeconds: number;
id: string;
logLevel: LogLevel;
+ shouldRenderAudio: boolean;
};
type ExtractFrameResponse =
@@ -14,6 +17,7 @@ type ExtractFrameResponse =
type: 'response-success';
id: string;
frame: ImageBitmap | null;
+ audio: PcmS16AudioData | null;
}
| {
type: 'response-error';
@@ -22,38 +26,38 @@ type ExtractFrameResponse =
};
// Doesn't exist in studio
-if (window.remotion_broadcastChannel) {
+if (window.remotion_broadcastChannel && window.remotion_isMainTab) {
window.remotion_broadcastChannel.addEventListener(
'message',
async (event) => {
- if (!window.remotion_isMainTab) {
- // Other tabs will also get this message, but only the main tab should process it
- return;
- }
-
const data = event.data as ExtractFrameRequest;
if (data.type === 'request') {
try {
- const sample = await extractFrame({
+ const {frame, audio} = await extractFrameAndAudio({
src: data.src,
- timestamp: data.timeInSeconds,
+ timeInSeconds: data.timeInSeconds,
logLevel: data.logLevel,
+ durationInSeconds: data.durationInSeconds,
+ shouldRenderAudio: data.shouldRenderAudio,
});
- const frame = sample?.toVideoFrame() ?? null;
- const imageBitmap = frame ? await createImageBitmap(frame) : null;
- if (frame) {
- frame.close();
+ const videoFrame = frame;
+ const imageBitmap = videoFrame
+ ? await createImageBitmap(videoFrame)
+ : null;
+ if (videoFrame) {
+ videoFrame.close();
}
const response: ExtractFrameResponse = {
type: 'response-success',
id: data.id,
frame: imageBitmap,
+ audio,
};
window.remotion_broadcastChannel!.postMessage(response);
- frame?.close();
+ videoFrame?.close();
} catch (error) {
const response: ExtractFrameResponse = {
type: 'response-error',
@@ -72,27 +76,35 @@ if (window.remotion_broadcastChannel) {
export const extractFrameViaBroadcastChannel = async ({
src,
- timestamp,
+ timeInSeconds,
logLevel,
+ durationInSeconds,
+ shouldRenderAudio,
isClientSideRendering,
}: {
src: string;
- timestamp: number;
+ timeInSeconds: number;
+ durationInSeconds: number;
logLevel: LogLevel;
+ shouldRenderAudio: boolean;
isClientSideRendering: boolean;
-}): Promise => {
+}): Promise<{
+ frame: ImageBitmap | VideoFrame | null;
+ audio: PcmS16AudioData | null;
+}> => {
if (isClientSideRendering || window.remotion_isMainTab) {
- const sample = await extractFrame({
+ const {frame, audio} = await extractFrameAndAudio({
logLevel,
src,
- timestamp,
+ timeInSeconds,
+ durationInSeconds,
+ shouldRenderAudio,
});
- if (!sample) {
- return null;
- }
-
- return sample.toVideoFrame();
+ return {
+ frame,
+ audio,
+ };
}
if (typeof window.remotion_isMainTab === 'undefined') {
@@ -101,7 +113,10 @@ export const extractFrameViaBroadcastChannel = async ({
const requestId = crypto.randomUUID();
- const resolvePromise = new Promise((resolve, reject) => {
+ const resolvePromise = new Promise<{
+ frame: ImageBitmap | null;
+ audio: PcmS16AudioData | null;
+ }>((resolve, reject) => {
const onMessage = (event: MessageEvent) => {
const data = event.data as ExtractFrameResponse;
@@ -110,7 +125,10 @@ export const extractFrameViaBroadcastChannel = async ({
}
if (data.type === 'response-success' && data.id === requestId) {
- resolve(data.frame ? data.frame : null);
+ resolve({
+ frame: data.frame ? data.frame : null,
+ audio: data.audio ? data.audio : null,
+ });
window.remotion_broadcastChannel!.removeEventListener(
'message',
onMessage,
@@ -130,9 +148,11 @@ export const extractFrameViaBroadcastChannel = async ({
const request: ExtractFrameRequest = {
type: 'request',
src,
- timeInSeconds: timestamp,
+ timeInSeconds,
id: requestId,
logLevel,
+ durationInSeconds,
+ shouldRenderAudio,
};
window.remotion_broadcastChannel!.postMessage(request);
@@ -149,7 +169,7 @@ export const extractFrameViaBroadcastChannel = async ({
() => {
reject(
new Error(
- `Timeout while extracting frame ${timestamp} from ${src}`,
+ `Timeout while extracting frame at time ${timeInSeconds}sec from ${src}`,
),
);
},
diff --git a/packages/video/src/extract-frame.ts b/packages/video/src/extract-frame.ts
index 9ca580651f..885e8ba537 100644
--- a/packages/video/src/extract-frame.ts
+++ b/packages/video/src/extract-frame.ts
@@ -1,4 +1,9 @@
-import {getVideoSink, type GetSink} from './get-frames-since-keyframe';
+import type {AudioSample} from 'mediabunny';
+import {combineAudioDataAndClosePrevious} from './convert-audiodata/combine-audiodata';
+import type {PcmS16AudioData} from './convert-audiodata/convert-audiodata';
+import {convertAudioData} from './convert-audiodata/convert-audiodata';
+import {TARGET_NUMBER_OF_CHANNELS} from './convert-audiodata/resample-audiodata';
+import {getSinks, type GetSink} from './get-frames-since-keyframe';
import {makeKeyframeManager} from './keyframe-manager';
import type {LogLevel} from './log';
@@ -7,28 +12,205 @@ const sinkPromise: Record> = {};
export const extractFrame = async ({
src,
- timestamp,
+ timeInSeconds,
logLevel,
}: {
src: string;
- timestamp: number;
+ timeInSeconds: number;
logLevel: LogLevel;
}) => {
if (!sinkPromise[src]) {
- sinkPromise[src] = getVideoSink(src);
+ sinkPromise[src] = getSinks(src);
}
- const {packetSink, videoSampleSink} = await sinkPromise[src];
+ const {video} = await sinkPromise[src];
const keyframeBank = await keyframeManager.requestKeyframeBank({
- packetSink,
- videoSampleSink,
- timestamp,
+ packetSink: video.packetSink,
+ videoSampleSink: video.sampleSink,
+ timestamp: timeInSeconds,
src,
logLevel,
});
- const frame = await keyframeBank.getFrameFromTimestamp(timestamp);
+ const frame = await keyframeBank.getFrameFromTimestamp(timeInSeconds);
return frame;
};
+
+export const extractAudio = async ({
+ src,
+ timeInSeconds,
+ durationInSeconds,
+}: {
+ src: string;
+ timeInSeconds: number;
+ logLevel: LogLevel;
+ durationInSeconds: number;
+}): Promise => {
+ if (!sinkPromise[src]) {
+ sinkPromise[src] = getSinks(src);
+ }
+
+ const {audio, actualMatroskaTimestamps, isMatroska} = await sinkPromise[src];
+
+ if (audio === null) {
+ return null;
+ }
+
+ // https://discord.com/channels/@me/1409810025844838481/1415028953093111870
+ // Audio frames might have dependencies on previous and next frames so we need to decode a bit more
+ // and then discard it.
+ // The worst case seems to be FLAC files with a 65'535 sample window, which would be 1486.0ms at 44.1Khz.
+ // So let's set a threshold of 1.5 seconds.
+
+ const extraThreshold = 1.5;
+
+ // Matroska timestamps are not accurate unless we start from the beginning
+ // So for matroska, we need to decode all samples :(
+
+ // https://github.com/Vanilagy/mediabunny/issues/105
+ const sampleIterator = audio.sampleSink.samples(
+ isMatroska ? 0 : Math.max(0, timeInSeconds - extraThreshold),
+ timeInSeconds + durationInSeconds,
+ );
+ const samples: AudioSample[] = [];
+
+ for await (const sample of sampleIterator) {
+ const realTimestamp = actualMatroskaTimestamps.getRealTimestamp(
+ sample.timestamp,
+ );
+
+ if (realTimestamp !== null && realTimestamp !== sample.timestamp) {
+ sample.setTimestamp(realTimestamp);
+ }
+
+ actualMatroskaTimestamps.observeTimestamp(sample.timestamp);
+ actualMatroskaTimestamps.observeTimestamp(
+ sample.timestamp + sample.duration,
+ );
+ if (sample.timestamp + sample.duration - 0.0000000001 <= timeInSeconds) {
+ continue;
+ }
+
+ if (sample.timestamp >= timeInSeconds + durationInSeconds - 0.0000000001) {
+ continue;
+ }
+
+ samples.push(sample);
+ }
+
+ const audioDataArray: PcmS16AudioData[] = [];
+ for (let i = 0; i < samples.length; i++) {
+ const sample = samples[i];
+
+ // Less than 1 sample would be included - we did not need it after all!
+ if (
+ Math.abs(sample.timestamp - (timeInSeconds + durationInSeconds)) *
+ sample.sampleRate <
+ 1
+ ) {
+ sample.close();
+ continue;
+ }
+
+ // Less than 1 sample would be included - we did not need it after all!
+ if (sample.timestamp + sample.duration <= timeInSeconds) {
+ sample.close();
+ continue;
+ }
+
+ const isFirstSample = i === 0;
+ const isLastSample = i === samples.length - 1;
+
+ const audioDataRaw = sample.toAudioData();
+
+ // amount of samples to shave from start and end
+ let trimStartInSeconds = 0;
+ let trimEndInSeconds = 0;
+
+ // TODO: Apply volume
+ // TODO: Apply playback rate
+ // TODO: Apply tone frequency
+
+ if (isFirstSample) {
+ trimStartInSeconds = timeInSeconds - sample.timestamp;
+ }
+
+ if (isLastSample) {
+ trimEndInSeconds =
+ // clamp to 0 in case the audio ends early
+ Math.max(
+ 0,
+ sample.timestamp +
+ sample.duration -
+ (timeInSeconds + durationInSeconds),
+ );
+ }
+
+ const audioData = convertAudioData({
+ audioData: audioDataRaw,
+ newSampleRate: 48000,
+ trimStartInSeconds,
+ trimEndInSeconds,
+ targetNumberOfChannels: TARGET_NUMBER_OF_CHANNELS,
+ });
+ audioDataRaw.close();
+
+ if (audioData.numberOfFrames === 0) {
+ sample.close();
+
+ continue;
+ }
+
+ audioDataArray.push(audioData);
+
+ sample.close();
+ }
+
+ if (audioDataArray.length === 0) {
+ return null;
+ }
+
+ const combined = combineAudioDataAndClosePrevious(audioDataArray);
+
+ return combined;
+};
+
+export const extractFrameAndAudio = async ({
+ src,
+ timeInSeconds,
+ logLevel,
+ durationInSeconds,
+ shouldRenderAudio,
+}: {
+ src: string;
+ timeInSeconds: number;
+ logLevel: LogLevel;
+ durationInSeconds: number;
+ shouldRenderAudio: boolean;
+}): Promise<{
+ frame: VideoFrame | null;
+ audio: PcmS16AudioData | null;
+}> => {
+ const [frame, audio] = await Promise.all([
+ extractFrame({
+ src,
+ timeInSeconds,
+ logLevel,
+ }),
+ shouldRenderAudio
+ ? extractAudio({
+ src,
+ timeInSeconds,
+ logLevel,
+ durationInSeconds,
+ })
+ : null,
+ ]);
+
+ return {
+ frame: frame?.toVideoFrame() ?? null,
+ audio,
+ };
+};
diff --git a/packages/video/src/get-frames-since-keyframe.ts b/packages/video/src/get-frames-since-keyframe.ts
index 6bca38382c..a3d4a0e797 100644
--- a/packages/video/src/get-frames-since-keyframe.ts
+++ b/packages/video/src/get-frames-since-keyframe.ts
@@ -1,31 +1,48 @@
import type {EncodedPacket} from 'mediabunny';
import {
ALL_FORMATS,
+ AudioSampleSink,
EncodedPacketSink,
Input,
+ MATROSKA,
UrlSource,
VideoSampleSink,
} from 'mediabunny';
import {makeKeyframeBank} from './keyframe-bank';
+import {rememberActualMatroskaTimestamps} from './remember-actual-matroska-timestamps';
-export const getVideoSink = async (src: string) => {
+export const getSinks = async (src: string) => {
const input = new Input({
formats: ALL_FORMATS,
source: new UrlSource(src),
});
- const track = await input.getPrimaryVideoTrack();
- if (!track) {
+ const format = await input.getFormat();
+
+ const videoTrack = await input.getPrimaryVideoTrack();
+ if (!videoTrack) {
throw new Error(`No video track found for ${src}`);
}
- const videoSampleSink = new VideoSampleSink(track);
- const packetSink = new EncodedPacketSink(track);
+ const audioTrack = await input.getPrimaryAudioTrack();
+ const isMatroska = format === MATROSKA;
- return {videoSampleSink, packetSink};
+ return {
+ video: {
+ sampleSink: new VideoSampleSink(videoTrack),
+ packetSink: new EncodedPacketSink(videoTrack),
+ },
+ audio: audioTrack
+ ? {
+ sampleSink: new AudioSampleSink(audioTrack),
+ }
+ : null,
+ actualMatroskaTimestamps: rememberActualMatroskaTimestamps(isMatroska),
+ isMatroska,
+ };
};
-export type GetSink = Awaited>;
+export type GetSink = Awaited>;
export const getFramesSinceKeyframe = async ({
packetSink,
diff --git a/packages/video/src/keyframe-bank.ts b/packages/video/src/keyframe-bank.ts
index 90842f670e..aad1f5a0ee 100644
--- a/packages/video/src/keyframe-bank.ts
+++ b/packages/video/src/keyframe-bank.ts
@@ -128,16 +128,11 @@ export const makeKeyframeBank = ({
const prepareForDeletion = async () => {
// Cleanup frames that have been extracted that might not have been retrieved yet
- const {value, done} = await sampleIterator.return();
+ const {value} = await sampleIterator.return();
if (value) {
value.close();
}
- Log.verbose(
- 'verbose',
- `Closed sample iterator ${Boolean(value)}, was done?${done}`,
- );
-
for (const frameTimestamp of frameTimestamps) {
if (!frames[frameTimestamp]) {
continue;
@@ -161,6 +156,13 @@ export const makeKeyframeBank = ({
src: string;
}) => {
for (const frameTimestamp of frameTimestamps) {
+ const isLast =
+ frameTimestamp === frameTimestamps[frameTimestamps.length - 1];
+ // Don't delete the last frame, since it may be the last one in the video!
+ if (isLast) {
+ continue;
+ }
+
if (frameTimestamp < timestampInSeconds) {
if (!frames[frameTimestamp]) {
continue;
@@ -169,7 +171,10 @@ export const makeKeyframeBank = ({
frames[frameTimestamp].close();
delete frames[frameTimestamp];
framesOpen--;
- Log.verbose(logLevel, `Deleted frame ${frameTimestamp} for src ${src}`);
+ Log.verbose(
+ logLevel,
+ `[NewVideo] Deleted frame ${frameTimestamp} for src ${src}`,
+ );
}
}
};
@@ -189,7 +194,7 @@ export const makeKeyframeBank = ({
if (allocationSize === 0) {
Log.verbose(
'verbose',
- `Frame ${frame.timestamp} has allocation size! ${allocationSize}`,
+ `[NewVideo] Frame ${frame.timestamp} has allocation size! ${allocationSize}`,
);
}
diff --git a/packages/video/src/keyframe-manager.ts b/packages/video/src/keyframe-manager.ts
index 228f26925d..2446ae7968 100644
--- a/packages/video/src/keyframe-manager.ts
+++ b/packages/video/src/keyframe-manager.ts
@@ -37,14 +37,14 @@ export const makeKeyframeManager = () => {
Log.verbose(
logLevel,
- `Open frames for src ${src}: ${timestamps.join(', ')}, ${allocationSizes.join(', ')}`,
+ `[NewVideo] Open frames for src ${src}: ${timestamps.join(', ')}, ${allocationSizes.join(', ')}`,
);
}
}
Log.verbose(
logLevel,
- `Cache stats: ${count} open frames, ${totalSize} bytes, actually open: ${framesOpen}`,
+ `[NewVideo] Cache stats: ${count} open frames, ${totalSize} bytes, actually open: ${framesOpen}`,
);
};
@@ -77,7 +77,7 @@ export const makeKeyframeManager = () => {
await bank.prepareForDeletion();
Log.verbose(
logLevel,
- `Cleared frames for src ${src} from ${bank.startTimestampInSeconds}sec to ${bank.endTimestampInSeconds}sec`,
+ `[NewVideo] Cleared frames for src ${src} from ${bank.startTimestampInSeconds}sec to ${bank.endTimestampInSeconds}sec`,
);
delete sources[src][startTimeInSeconds as unknown as number];
} else {
@@ -134,7 +134,10 @@ export const makeKeyframeManager = () => {
return existingBank;
}
- Log.verbose(logLevel, `Bank exists but frames have already been evicted!`);
+ Log.verbose(
+ logLevel,
+ `[NewVideo] Bank exists but frames have already been evicted!`,
+ );
// Bank exists but frames have already been evicted!
// First delete it entirely
diff --git a/packages/video/src/new-video-for-rendering.tsx b/packages/video/src/new-video-for-rendering.tsx
index 603860025d..5d36a801a0 100644
--- a/packages/video/src/new-video-for-rendering.tsx
+++ b/packages/video/src/new-video-for-rendering.tsx
@@ -1,14 +1,13 @@
import React, {
useContext,
- useEffect,
useLayoutEffect,
useMemo,
useRef,
+ useState,
} from 'react';
import {
cancelRender,
Internals,
- random,
useCurrentFrame,
useDelayRender,
useRemotionEnvironment,
@@ -16,54 +15,31 @@ import {
import {extractFrameViaBroadcastChannel} from './extract-frame-via-broadcast-channel';
import type {NewVideoProps} from './props';
-const {
- useUnsafeVideoConfig,
- SequenceContext,
- useFrameForVolumeProp,
- useTimelinePosition,
- getAbsoluteSrc,
- RenderAssetManager,
- evaluateVolume,
-} = Internals;
-
export const NewVideoForRendering: React.FC = ({
volume: volumeProp,
playbackRate,
src,
muted,
- toneFrequency,
loopVolumeCurveBehavior,
delayRenderRetries,
delayRenderTimeoutInMilliseconds,
// call when a frame of the video, i.e. frame drawn on canvas
onVideoFrame,
- audioStreamIndex,
- logLevel,
+ logLevel = window.remotion_logLevel,
}) => {
- const absoluteFrame = useTimelinePosition();
- const videoConfig = useUnsafeVideoConfig();
- const sequenceContext = useContext(SequenceContext);
+ const absoluteFrame = Internals.useTimelinePosition();
+ const videoConfig = Internals.useUnsafeVideoConfig();
const canvasRef = useRef(null);
- const {registerRenderAsset, unregisterRenderAsset} =
- useContext(RenderAssetManager);
+ const {registerRenderAsset, unregisterRenderAsset} = useContext(
+ Internals.RenderAssetManager,
+ );
const frame = useCurrentFrame();
- const volumePropsFrame = useFrameForVolumeProp(
+ const volumePropsFrame = Internals.useFrameForVolumeProp(
loopVolumeCurveBehavior ?? 'repeat',
);
const environment = useRemotionEnvironment();
- const id = useMemo(
- () =>
- `newvideo-${random(
- src ?? '',
- )}-${sequenceContext?.cumulatedFrom}-${sequenceContext?.relativeFrom}-${sequenceContext?.durationInFrames}`,
- [
- src,
- sequenceContext?.cumulatedFrom,
- sequenceContext?.relativeFrom,
- sequenceContext?.durationInFrames,
- ],
- );
+ const [id] = useState(() => `${Math.random()}`.replace('0.', ''));
if (!videoConfig) {
throw new Error('No video config found');
@@ -73,7 +49,7 @@ export const NewVideoForRendering: React.FC = ({
throw new TypeError('No `src` was passed to .');
}
- const volume = evaluateVolume({
+ const volume = Internals.evaluateVolume({
volume: volumeProp,
frame: volumePropsFrame,
mediaVolume: 1,
@@ -81,51 +57,21 @@ export const NewVideoForRendering: React.FC = ({
Internals.warnAboutTooHighVolume(volume);
- useEffect(() => {
- if (!src) {
- throw new Error('No src passed');
- }
-
+ const shouldRenderAudio = useMemo(() => {
if (!window.remotion_audioEnabled) {
- return;
+ return false;
}
if (muted) {
- return;
+ return false;
}
if (volume <= 0) {
- return;
+ return false;
}
- registerRenderAsset({
- type: 'video',
- src: getAbsoluteSrc(src),
- id,
- frame: absoluteFrame,
- volume,
- mediaFrame: frame,
- playbackRate: playbackRate ?? 1,
- toneFrequency: toneFrequency ?? null,
- audioStartFrame: Math.max(0, -(sequenceContext?.relativeFrom ?? 0)),
- audioStreamIndex: audioStreamIndex ?? 0,
- });
-
- return () => unregisterRenderAsset(id);
- }, [
- muted,
- src,
- registerRenderAsset,
- id,
- unregisterRenderAsset,
- volume,
- frame,
- absoluteFrame,
- playbackRate,
- toneFrequency,
- sequenceContext?.relativeFrom,
- audioStreamIndex,
- ]);
+ return true;
+ }, [muted, volume]);
const {fps} = videoConfig;
@@ -138,19 +84,22 @@ export const NewVideoForRendering: React.FC = ({
const actualFps = playbackRate ? fps / playbackRate : fps;
const timestamp = frame / actualFps;
+ const durationInSeconds = 1 / actualFps;
- const newHandle = delayRender(`extracting frame number ${frame}`, {
+ const newHandle = delayRender(`Extracting frame number ${frame}`, {
retries: delayRenderRetries ?? undefined,
timeoutInMilliseconds: delayRenderTimeoutInMilliseconds ?? undefined,
});
extractFrameViaBroadcastChannel({
src,
- timestamp,
+ timeInSeconds: timestamp,
+ durationInSeconds,
logLevel: logLevel ?? 'info',
+ shouldRenderAudio,
isClientSideRendering: environment.isClientSideRendering,
})
- .then((imageBitmap) => {
+ .then(({frame: imageBitmap, audio}) => {
if (!imageBitmap) {
cancelRender(new Error('No video frame found'));
}
@@ -159,6 +108,19 @@ export const NewVideoForRendering: React.FC = ({
canvasRef.current?.getContext('2d')?.drawImage(imageBitmap, 0, 0);
imageBitmap.close();
+ if (audio) {
+ registerRenderAsset({
+ type: 'inline-audio',
+ id,
+ audio: Array.from(audio.data),
+ sampleRate: audio.sampleRate,
+ numberOfChannels: audio.numberOfChannels,
+ frame: absoluteFrame,
+ timestamp: audio.timestamp,
+ duration: (audio.numberOfFrames / audio.sampleRate) * 1_000_000,
+ });
+ }
+
continueRender(newHandle);
})
.catch((error) => {
@@ -167,19 +129,25 @@ export const NewVideoForRendering: React.FC = ({
return () => {
continueRender(newHandle);
+ unregisterRenderAsset(id);
};
}, [
+ absoluteFrame,
+ continueRender,
+ delayRender,
delayRenderRetries,
delayRenderTimeoutInMilliseconds,
+ environment.isClientSideRendering,
fps,
frame,
+ id,
+ logLevel,
onVideoFrame,
playbackRate,
+ registerRenderAsset,
+ shouldRenderAudio,
src,
- logLevel,
- environment.isClientSideRendering,
- delayRender,
- continueRender,
+ unregisterRenderAsset,
]);
return (
diff --git a/packages/video/src/props.ts b/packages/video/src/props.ts
index 38b3eb3e2a..621cd906d3 100644
--- a/packages/video/src/props.ts
+++ b/packages/video/src/props.ts
@@ -8,7 +8,6 @@ export type NewVideoProps = {
volume?: VolumeProp;
loopVolumeCurveBehavior?: LoopVolumeCurveBehavior;
name?: string;
- toneFrequency?: number;
pauseWhenBuffering?: boolean;
showInTimeline?: boolean;
onAutoPlayError?: null | (() => void);
@@ -26,6 +25,5 @@ export type NewVideoProps = {
* @deprecated For internal use only
*/
stack?: string;
- audioStreamIndex?: number;
logLevel?: LogLevel;
};
diff --git a/packages/video/src/remember-actual-matroska-timestamps.ts b/packages/video/src/remember-actual-matroska-timestamps.ts
new file mode 100644
index 0000000000..020402fab7
--- /dev/null
+++ b/packages/video/src/remember-actual-matroska-timestamps.ts
@@ -0,0 +1,28 @@
+export const rememberActualMatroskaTimestamps = (isMatroska: boolean) => {
+ const observations: number[] = [];
+
+ const observeTimestamp = (startTime: number) => {
+ if (!isMatroska) {
+ return;
+ }
+
+ observations.push(startTime);
+ };
+
+ const getRealTimestamp = (observedTimestamp: number) => {
+ if (!isMatroska) {
+ return observedTimestamp;
+ }
+
+ return (
+ observations.find(
+ (observation) => Math.abs(observedTimestamp - observation) < 0.001,
+ ) ?? null
+ );
+ };
+
+ return {
+ observeTimestamp,
+ getRealTimestamp,
+ };
+};