diff --git a/.github/workflows/smoketest.yml b/.github/workflows/smoketest.yml index 083b48584..69b20d492 100644 --- a/.github/workflows/smoketest.yml +++ b/.github/workflows/smoketest.yml @@ -131,19 +131,6 @@ jobs: apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} command: d1 migrations apply rail-announcements - - name: Setup Rclone - uses: AnimMouse/setup-rclone@v1 - with: - rclone_config: ${{ secrets.RCLONE_CONFIG }} - - - name: Upload audio files to Cloudflare R2 - # If it's a PR, only run if the PR head is not a fork - if: ${{ github.event_name != 'pull_request' || !github.event.pull_request.head.repo.fork }} - run: | - rclone --fast-list -v --transfers=128 --checkers=256 --progress copy "./audio" "Cloudflare R2:/rail-announcements-audio" - env: - RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASSWORD }} - - name: Publish to Cloudflare Pages uses: cloudflare/pages-action@1 if: ${{ github.event_name != 'pull_request_target' || contains(github.event.label.name, '🚀 request deploy') }} diff --git a/.github/workflows/upload_audio_to_r2.yml b/.github/workflows/upload_audio_to_r2.yml new file mode 100644 index 000000000..a4ac7425e --- /dev/null +++ b/.github/workflows/upload_audio_to_r2.yml @@ -0,0 +1,56 @@ +name: Smoketest + +on: + push: + branches: + - main + - deploy + paths: + - 'audio/**' + # Triggers the workflow on pull request event, but only for pull request from not forked repo + pull_request: + types: + - opened + - synchronize + paths: + - 'audio/**' + +permissions: + # default contents: read & write (in forked repos, only read) + contents: write + # default deployments: read & write (in forked repos, only read) + deployments: write + # default pull-requests: read & write (in forked repos, only read) + pull-requests: write + +env: + NODE_VERSION: 20.x + CF_PAGES_PROJECT_NAME: rail-announcements + +jobs: + smoketest: + runs-on: ubuntu-latest + name: Upload Audio to Cloudflare R2 + + concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.head.ref || github.ref }} + cancel-in-progress: true + + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha || github.ref }} + + - name: Setup Rclone + uses: AnimMouse/setup-rclone@v1 + with: + rclone_config: ${{ secrets.RCLONE_CONFIG }} + + - name: Upload audio files to Cloudflare R2 + # If it's a PR, only run if the PR head is not a fork + if: ${{ github.event_name != 'pull_request' || !github.event.pull_request.head.repo.fork }} + run: | + rclone --fast-list -v --transfers=128 --checkers=256 --progress copy "./audio" "Cloudflare R2:/rail-announcements-audio" + env: + RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASSWORD }} diff --git a/gatsby-node.ts b/gatsby-node.ts index 3f1bed6e9..a5555b3ac 100644 --- a/gatsby-node.ts +++ b/gatsby-node.ts @@ -7,7 +7,7 @@ export const onCreateWebpackConfig: GatsbyNode['onCreateWebpackConfig'] = ({ sta module: { rules: [ { - test: /crunker/, + test: /crunker/g, use: loaders.null(), }, ], diff --git a/src/announcement-data/AnnouncementSystem.ts b/src/announcement-data/AnnouncementSystem.ts index 370df819a..e5f92f5e5 100644 --- a/src/announcement-data/AnnouncementSystem.ts +++ b/src/announcement-data/AnnouncementSystem.ts @@ -1,4 +1,4 @@ -import Crunker from 'crunker' +import Crunker from '../helpers/crunker' import type { ICustomAnnouncementPaneProps } from '@components/PanelPanes/CustomAnnouncementPane' import type { ICustomButtonPaneProps } from '@components/PanelPanes/CustomButtonPane' @@ -170,6 +170,12 @@ export default abstract class AnnouncementSystem { return } + window.Crunker = Crunker + + if ('audioSession' in window.navigator) { + window.navigator.audioSession.type = 'playback' + } + window.__audio = fileIds console.info('Playing audio files:', fileIds) @@ -186,7 +192,6 @@ export default abstract class AnnouncementSystem { if (audio.numberOfChannels > 1) { // This is stereo. We need to mux it to mono. - audio.copyToChannel(audio.getChannelData(0), 1, 0) } @@ -194,12 +199,48 @@ export default abstract class AnnouncementSystem { crunker.download(crunker.export(audio, 'audio/wav').blob, 'announcement') window.__audio = undefined } else { - const source = crunker.play(audio) - return new Promise(resolve => { - source.addEventListener('ended', () => { - window.__audio = undefined - resolve() + crunker.play(audio, source => { + console.log('[Crunker] About to play audio...') + crunker._context.onstatechange = () => console.log('state changed to: ', audioContext.state) + console.log('Context state: ', crunker._context.state) + + if (crunker._context.state === 'suspended') { + console.log('[Crunker] Resuming audio context') + crunker._context.resume() + console.log('Context state: ', crunker._context.state) + + if (crunker._context.state === 'suspended') { + console.error('[Crunker] Failed to resume audio context') + + document.getElementById('resume-audio-button')?.remove() + + const button = document.createElement('button') + button.textContent = 'Resume audio' + button.id = 'resume-audio-button' + button.style.margin = '16px' + button.onclick = () => { + crunker._context.resume() + button.remove() + } + + const container = document.getElementById('resume-audio-container') + if (container) container.appendChild(button) + else document.body.appendChild(button) + + alert( + "Your device or web browser is refusing to let the website play audio.\n\nThis is especially common on iPhones and iPads. We'd recommend you try using a desktop computer or an alterantive device.\n\nTry scrolling to and pressing the 'Resume audio' button. If this doesn't help, there's nothing else that we can do. Sorry!", + ) + + button.scrollIntoView() + } + } + + source.addEventListener('ended', () => { + console.log('[Crunker] Finished playing audio') + window.__audio = undefined + resolve() + }) }) }) } diff --git a/src/components/AmeyLiveTrainAnnouncements.tsx b/src/components/AmeyLiveTrainAnnouncements.tsx index e3e4850e5..24e789bd0 100644 --- a/src/components/AmeyLiveTrainAnnouncements.tsx +++ b/src/components/AmeyLiveTrainAnnouncements.tsx @@ -388,7 +388,26 @@ export function LiveTrainAnnouncements({ false, x => x === true || x === false, ) - const [isPlaying, setIsPlaying] = useState(false) + const [isPlaying, _setIsPlaying] = useState(false) + const setIsPlaying = useCallback( + function setIsPlaying(val: boolean) { + console.log(`Setting isPlaying to ${val}`) + + _setIsPlaying(val) + }, + [_setIsPlaying], + ) + const setIsPlayingAfter = useCallback( + function setIsPlayingAfter(val: boolean, timeout: number) { + console.log(`Setting isPlaying to ${val} after ${timeout}ms`) + + setTimeout(() => { + setIsPlaying(val) + }, timeout) + }, + [setIsPlaying], + ) + const [enabledAnnouncements, setEnabledAnnouncements] = useStateWithLocalStorage('amey.live-trains.announcement-types', [ AnnouncementType.Next, AnnouncementType.Approaching, @@ -590,15 +609,26 @@ export function LiveTrainAnnouncements({ )})`, ) await standingTrainHandler[systemKey](options) + console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) + setIsPlayingAfter(false, 5000) } catch (e) { console.warn(`[Live Trains] Error playing announcement for ${train.rid}; see below`) console.error(e) - } finally { - console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) - setTimeout(() => setIsPlaying(false), 5000) + setIsPlaying(false) } }, - [markNextTrainAnnounced, systems, setIsPlaying, standingTrainHandler, selectedCrs, getStation, addLog, useLegacyTocNames, announceViaPoints], + [ + markNextTrainAnnounced, + systems, + setIsPlaying, + standingTrainHandler, + selectedCrs, + getStation, + addLog, + useLegacyTocNames, + announceViaPoints, + setIsPlayingAfter, + ], ) const announceApproachingTrain = useCallback( @@ -655,12 +685,12 @@ export function LiveTrainAnnouncements({ )})`, ) await approachingTrainHandler[systemKey](options) + console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) + setIsPlayingAfter(false, 5000) } catch (e) { console.warn(`[Live Trains] Error playing announcement for ${train.rid}; see below`) console.error(e) - } finally { - console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) - setTimeout(() => setIsPlaying(false), 5000) + setIsPlaying(false) } }, [ @@ -673,6 +703,7 @@ export function LiveTrainAnnouncements({ useLegacyTocNames, chimeType, announceViaPoints, + setIsPlayingAfter, ], ) @@ -732,15 +763,26 @@ export function LiveTrainAnnouncements({ )})`, ) await nextTrainHandler[systemKey](options) + console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) + setIsPlayingAfter(false, 5000) } catch (e) { console.warn(`[Live Trains] Error playing announcement for ${train.rid}; see below`) console.error(e) - } finally { - console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) - setTimeout(() => setIsPlaying(false), 5000) + setIsPlaying(false) } }, - [markNextTrainAnnounced, systems, setIsPlaying, nextTrainHandler, getStation, addLog, useLegacyTocNames, chimeType, announceViaPoints], + [ + markNextTrainAnnounced, + systems, + setIsPlaying, + nextTrainHandler, + getStation, + addLog, + useLegacyTocNames, + chimeType, + announceViaPoints, + setIsPlayingAfter, + ], ) const announceDisruptedTrain = useCallback( @@ -806,6 +848,8 @@ export function LiveTrainAnnouncements({ )})`, ) await disruptedTrainHandler[systemKey](options) + console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) + setIsPlayingAfter(false, 5000) } catch (e) { console.warn(`[Live Trains] Error playing announcement for ${train.rid}; see below`) @@ -820,19 +864,30 @@ export function LiveTrainAnnouncements({ )})`, ) await disruptedTrainHandler[systemKey](options2) + console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) + setIsPlayingAfter(false, 5000) } catch (e) { console.warn(`[Live Trains] Error playing announcement for ${train.rid}; see below`) console.error(e) + setIsPlaying(false) } + } else { + console.error(e) + setIsPlaying(false) } - - console.error(e) - } finally { - console.log(`[Live Trains] Announcement for ${train.rid} complete: waiting 5s until next`) - setTimeout(() => setIsPlaying(false), 5000) } }, - [markDisruptedTrainAnnounced, systems, setIsPlaying, disruptedTrainHandler, addLog, useLegacyTocNames, chimeType, announceViaPoints], + [ + markDisruptedTrainAnnounced, + systems, + setIsPlaying, + disruptedTrainHandler, + addLog, + useLegacyTocNames, + chimeType, + announceViaPoints, + setIsPlayingAfter, + ], ) useEffect(() => { @@ -866,7 +921,7 @@ export function LiveTrainAnnouncements({ try { const resp = await fetch( - process.env.NODE_ENV === 'development' ? `http://localhost:8787/api/get-services?${params}` : `/api/get-services?${params}`, + process.env.NODE_ENV === 'development' ? `http://local.davw.network:8787/api/get-services?${params}` : `/api/get-services?${params}`, ) if (!resp.ok) { @@ -1611,6 +1666,8 @@ export function LiveTrainAnnouncements({ /> +
+ = {}) { + this._context = this._createContext(sampleRate) + + sampleRate ||= this._context.sampleRate + + this._sampleRate = sampleRate + this._concurrentNetworkRequests = concurrentNetworkRequests + } + + /** + * Creates Crunker's internal AudioContext. + * + * @internal + */ + private _createContext(sampleRate: number = 44_100): AudioContext { + window.AudioContext = window.AudioContext || (window as any).webkitAudioContext || (window as any).mozAudioContext + return new AudioContext({ sampleRate }) + } + + /** + * + * The internal AudioContext used by Crunker. + */ + get context(): AudioContext { + return this._context + } + + /** + * Asynchronously fetches multiple audio files and returns an array of AudioBuffers. + * + * Network requests are batched, and the size of these batches can be configured with the `concurrentNetworkRequests` option in the Crunker constructor. + */ + async fetchAudio(...filepaths: CrunkerInputTypes[]): Promise { + const buffers: AudioBuffer[] = [] + const groups = Math.ceil(filepaths.length / this._concurrentNetworkRequests) + + for (let i = 0; i < groups; i++) { + const group = filepaths.slice(i * this._concurrentNetworkRequests, (i + 1) * this._concurrentNetworkRequests) + buffers.push(...(await this._fetchAudio(...group))) + } + + return buffers + } + + /** + * Asynchronously fetches multiple audio files and returns an array of AudioBuffers. + */ + private async _fetchAudio(...filepaths: CrunkerInputTypes[]): Promise { + return await Promise.all( + filepaths.map(async filepath => { + let buffer: ArrayBuffer + + if (filepath instanceof File || filepath instanceof Blob) { + buffer = await filepath.arrayBuffer() + } else { + buffer = await fetch(filepath).then(response => { + if (response.headers.has('Content-Type') && !response.headers.get('Content-Type')!.includes('audio/')) { + console.warn( + `Crunker: Attempted to fetch an audio file, but its MIME type is \`${ + response.headers.get('Content-Type')!.split(';')[0] + }\`. We'll try and continue anyway. (file: "${filepath}")`, + ) + } + + return response.arrayBuffer() + }) + } + + return await this._context.decodeAudioData(buffer) + }), + ) + } + + /** + * Merges (layers) multiple AudioBuffers into a single AudioBuffer. + * + * **Visual representation:** + * + * ![](https://user-images.githubusercontent.com/12958674/88806278-968f0680-d186-11ea-9cb5-8ef2606ffcc7.png) + */ + mergeAudio(buffers: AudioBuffer[]): AudioBuffer { + const output = this._context.createBuffer( + this._maxNumberOfChannels(buffers), + this._sampleRate * this._maxDuration(buffers), + this._sampleRate, + ) + + buffers.forEach(buffer => { + for (let channelNumber = 0; channelNumber < buffer.numberOfChannels; channelNumber++) { + const outputData = output.getChannelData(channelNumber) + const bufferData = buffer.getChannelData(channelNumber) + + for (let i = buffer.getChannelData(channelNumber).length - 1; i >= 0; i--) { + outputData[i] += bufferData[i] + } + + output.getChannelData(channelNumber).set(outputData) + } + }) + + return output + } + + /** + * Concatenates multiple AudioBuffers into a single AudioBuffer. + * + * **Visual representation:** + * + * ![](https://user-images.githubusercontent.com/12958674/88806297-9d1d7e00-d186-11ea-8cd2-c64cb0324845.png) + */ + concatAudio(buffers: AudioBuffer[]): AudioBuffer { + const output = this._context.createBuffer(this._maxNumberOfChannels(buffers), this._totalLength(buffers), this._sampleRate) + let offset = 0 + + buffers.forEach(buffer => { + for (let channelNumber = 0; channelNumber < buffer.numberOfChannels; channelNumber++) { + output.getChannelData(channelNumber).set(buffer.getChannelData(channelNumber), offset) + } + + offset += buffer.length + }) + + return output + } + + /** + * Pads a specified AudioBuffer with silence from a specified start time, + * for a specified length of time. + * + * Accepts float values as well as whole integers. + * + * @param buffer AudioBuffer to pad + * @param padStart Time to start padding (in seconds) + * @param seconds Duration to pad for (in seconds) + */ + padAudio(buffer: AudioBuffer, padStart: number = 0, seconds: number = 0): AudioBuffer { + if (seconds === 0) return buffer + + if (padStart < 0) throw new Error('Crunker: Parameter "padStart" in padAudio must be positive') + if (seconds < 0) throw new Error('Crunker: Parameter "seconds" in padAudio must be positive') + + const updatedBuffer = this._context.createBuffer( + buffer.numberOfChannels, + Math.ceil(buffer.length + seconds * buffer.sampleRate), + buffer.sampleRate, + ) + + for (let channelNumber = 0; channelNumber < buffer.numberOfChannels; channelNumber++) { + const channelData = buffer.getChannelData(channelNumber) + updatedBuffer.getChannelData(channelNumber).set(channelData.subarray(0, Math.ceil(padStart * buffer.sampleRate) + 1), 0) + + updatedBuffer + .getChannelData(channelNumber) + .set( + channelData.subarray(Math.ceil(padStart * buffer.sampleRate) + 2, updatedBuffer.length + 1), + Math.ceil((padStart + seconds) * buffer.sampleRate), + ) + } + + return updatedBuffer + } + + /** + * Slices an AudioBuffer from the specified start time to the end time, with optional fade in and out. + * + * @param buffer AudioBuffer to slice + * @param start Start time (in seconds) + * @param end End time (in seconds) + * @param fadeIn Fade in duration (in seconds, default is 0) + * @param fadeOut Fade out duration (in seconds, default is 0) + */ + sliceAudio(buffer: AudioBuffer, start: number, end: number, fadeIn: number = 0, fadeOut: number = 0): AudioBuffer { + if (start >= end) throw new Error('Crunker: "start" time should be less than "end" time in sliceAudio method') + + const length = Math.round((end - start) * this._sampleRate) + const offset = Math.round(start * this._sampleRate) + const newBuffer = this._context.createBuffer(buffer.numberOfChannels, length, this._sampleRate) + + for (let channel = 0; channel < buffer.numberOfChannels; channel++) { + const inputData = buffer.getChannelData(channel) + const outputData = newBuffer.getChannelData(channel) + + for (let i = 0; i < length; i++) { + outputData[i] = inputData[offset + i] + + // Apply fade in + if (i < fadeIn * this._sampleRate) { + outputData[i] *= i / (fadeIn * this._sampleRate) + } + + // Apply fade out + if (i > length - fadeOut * this._sampleRate) { + outputData[i] *= (length - i) / (fadeOut * this._sampleRate) + } + } + } + + return newBuffer + } + + /** + * Plays the provided AudioBuffer in an AudioBufferSourceNode. + */ + play(buffer: AudioBuffer, beforePlay: (source: AudioBufferSourceNode) => void): AudioBufferSourceNode { + const source = this._context.createBufferSource() + + source.buffer = buffer + source.connect(this._context.destination) + + beforePlay(source) + + source.start() + + return source + } + + /** + * Exports the specified AudioBuffer to a Blob, Object URI and HTMLAudioElement. + * + * Note that changing the MIME type does not change the actual file format. The + * file format will **always** be a WAVE file due to how audio is stored in the + * browser. + * + * @param buffer Buffer to export + * @param type MIME type (default: `audio/wav`) + */ + export(buffer: AudioBuffer, type: string = 'audio/wav'): ExportedCrunkerAudio { + const recorded = this._interleave(buffer) + const dataview = this._writeHeaders(recorded, buffer.numberOfChannels, buffer.sampleRate) + const audioBlob = new Blob([dataview], { type }) + + return { + blob: audioBlob, + url: this._renderURL(audioBlob), + element: this._renderAudioElement(audioBlob), + } + } + + /** + * Downloads the provided Blob. + * + * @param blob Blob to download + * @param filename An optional file name to use for the download (default: `crunker`) + */ + download(blob: Blob, filename: string = 'crunker'): HTMLAnchorElement { + const a = document.createElement('a') + + a.style.display = 'none' + a.href = this._renderURL(blob) + a.download = `${filename}.${blob.type.split('/')[1]}` + a.click() + + return a + } + + /** + * Executes a callback if the browser does not support the Web Audio API. + * + * Returns the result of the callback, or `undefined` if the Web Audio API is supported. + * + * @param callback callback to run if the browser does not support the Web Audio API + */ + notSupported(callback: () => T): T | undefined { + return this._isSupported() ? undefined : callback() + } + + /** + * Closes Crunker's internal AudioContext. + */ + close(): this { + this._context.close() + return this + } + + /** + * Returns the largest duration of the longest AudioBuffer. + * + * @internal + */ + private _maxDuration(buffers: AudioBuffer[]): number { + return Math.max(...buffers.map(buffer => buffer.duration)) + } + + /** + * Returns the largest number of channels in an array of AudioBuffers. + * + * @internal + */ + private _maxNumberOfChannels(buffers: AudioBuffer[]): number { + return Math.max(...buffers.map(buffer => buffer.numberOfChannels)) + } + + /** + * Returns the sum of the lengths of an array of AudioBuffers. + * + * @internal + */ + private _totalLength(buffers: AudioBuffer[]): number { + return buffers.map(buffer => buffer.length).reduce((a, b) => a + b, 0) + } + + /** + * Returns whether the browser supports the Web Audio API. + * + * @internal + */ + private _isSupported(): boolean { + return 'AudioContext' in window || 'webkitAudioContext' in window || 'mozAudioContext' in window + } + + /** + * Writes the WAV headers for the specified Float32Array. + * + * Returns a DataView containing the WAV headers and file content. + * + * @internal + */ + private _writeHeaders(buffer: Float32Array, numOfChannels: number, sampleRate: number): DataView { + const bitDepth = 16 + const bytesPerSample = bitDepth / 8 + const sampleSize = numOfChannels * bytesPerSample + + const fileHeaderSize = 8 + const chunkHeaderSize = 36 + const chunkDataSize = buffer.length * bytesPerSample + const chunkTotalSize = chunkHeaderSize + chunkDataSize + + const arrayBuffer = new ArrayBuffer(fileHeaderSize + chunkTotalSize) + const view = new DataView(arrayBuffer) + + this._writeString(view, 0, 'RIFF') + view.setUint32(4, chunkTotalSize, true) + this._writeString(view, 8, 'WAVE') + this._writeString(view, 12, 'fmt ') + view.setUint32(16, 16, true) + view.setUint16(20, 1, true) + view.setUint16(22, numOfChannels, true) + view.setUint32(24, sampleRate, true) + view.setUint32(28, sampleRate * sampleSize, true) + view.setUint16(32, sampleSize, true) + view.setUint16(34, bitDepth, true) + this._writeString(view, 36, 'data') + view.setUint32(40, chunkDataSize, true) + + return this._floatTo16BitPCM(view, buffer, fileHeaderSize + chunkHeaderSize) + } + + /** + * Converts a Float32Array to 16-bit PCM. + * + * @internal + */ + private _floatTo16BitPCM(dataview: DataView, buffer: Float32Array, offset: number): DataView { + for (let i = 0; i < buffer.length; i++, offset += 2) { + const tmp = Math.max(-1, Math.min(1, buffer[i])) + dataview.setInt16(offset, tmp < 0 ? tmp * 0x8000 : tmp * 0x7fff, true) + } + + return dataview + } + + /** + * Writes a string to a DataView at the specified offset. + * + * @internal + */ + private _writeString(dataview: DataView, offset: number, header: string): void { + for (let i = 0; i < header.length; i++) { + dataview.setUint8(offset + i, header.charCodeAt(i)) + } + } + + /** + * Converts an AudioBuffer to a Float32Array. + * + * @internal + */ + private _interleave(input: AudioBuffer): Float32Array { + if (input.numberOfChannels === 1) { + // No need to interleave channels, just return single channel data to save performance and memory + return input.getChannelData(0) + } + const channels = [] + for (let i = 0; i < input.numberOfChannels; i++) { + channels.push(input.getChannelData(i)) + } + const length = channels.reduce((prev, channelData) => prev + channelData.length, 0) + const result = new Float32Array(length) + + let index = 0 + let inputIndex = 0 + + // for 2 channels its like: [L[0], R[0], L[1], R[1], ... , L[n], R[n]] + while (index < length) { + channels.forEach(channelData => { + result[index++] = channelData[inputIndex] + }) + + inputIndex++ + } + + return result + } + + /** + * Creates an HTMLAudioElement whose source is the specified Blob. + * + * @internal + */ + private _renderAudioElement(blob: Blob): HTMLAudioElement { + const audio = document.createElement('audio') + + audio.controls = true + audio.src = this._renderURL(blob) + + return audio + } + + /** + * Creates an Object URL for the specified Blob. + * + * @internal + */ + private _renderURL(blob: Blob): string { + return (window.URL || window.webkitURL).createObjectURL(blob) + } +}