summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJoas Schilling <213943+nickvergessen@users.noreply.github.com>2024-06-13 14:32:37 +0200
committerGitHub <noreply@github.com>2024-06-13 14:32:37 +0200
commitbafe71b2ec4fe97855f4438b54430a2e177b5e7b (patch)
tree99380f173f0443408e5e8d35dee8d2b851388790 /src
parenta4c8ffcab83c84333733b9dffeb2339fd0d3ec68 (diff)
parent87853adab8688f49e9316f092c6744ade5b0e191 (diff)
Merge pull request #12497 from nextcloud/fix-audio-in-inactive-tab-in-safari
Fix audio in inactive tab in Safari
Diffstat (limited to 'src')
-rw-r--r--src/utils/webrtc/CallParticipantsAudioPlayer.js178
-rw-r--r--src/utils/webrtc/CallParticipantsAudioPlayer.spec.js716
-rw-r--r--src/utils/webrtc/index.js11
-rw-r--r--src/utils/webrtc/models/CallParticipantModel.js20
4 files changed, 905 insertions, 20 deletions
diff --git a/src/utils/webrtc/CallParticipantsAudioPlayer.js b/src/utils/webrtc/CallParticipantsAudioPlayer.js
new file mode 100644
index 000000000..4c366e900
--- /dev/null
+++ b/src/utils/webrtc/CallParticipantsAudioPlayer.js
@@ -0,0 +1,178 @@
+/**
+ * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors
+ * SPDX-License-Identifier: AGPL-3.0-or-later
+ */
+
+import attachMediaStream from '../attachmediastream.js'
+
+/**
+ * Player for audio of call participants.
+ *
+ * The player keeps track of the participants added and removed to the
+ * CallParticipantCollection and plays their audio as needed. Note that in the
+ * case of regular audio whether the audio is muted or not depends on
+ * "audioAvailable"; screen share audio, on the other hand, is always treated as
+ * unmuted.
+ *
+ * By default, the audio or screen audio of each participant is played on its
+ * own audio element. Alternatively, the audio of all participants can be mixed
+ * and played by a single element created when the player is created by setting
+ * "mixAudio = true" in the constructor.
+ *
+ * Once the player is no longer needed "destroy()" must be called to stop
+ * tracking the participants and playing audio.
+ *
+ * @param {object} callParticipantCollection the CallParticipantCollection.
+ * @param {boolean} mixAudio true to mix and play all audio in a single audio
+ * element, false to play each audio on its own audio element.
+ */
+export default function CallParticipantsAudioPlayer(callParticipantCollection, mixAudio = false) {
+ this._callParticipantCollection = callParticipantCollection
+
+ this._mixAudio = mixAudio
+
+ if (this._mixAudio) {
+ this._audioContext = new (window.AudioContext || window.webkitAudioContext)()
+ this._audioDestination = this._audioContext.createMediaStreamDestination()
+ this._audioElement = attachMediaStream(this._audioDestination.stream, null, { audio: true })
+ this._audioNodes = new Map()
+ } else {
+ this._audioElements = new Map()
+ }
+
+ this._handleCallParticipantAddedBound = this._handleCallParticipantAdded.bind(this)
+ this._handleCallParticipantRemovedBound = this._handleCallParticipantRemoved.bind(this)
+ this._handleStreamChangedBound = this._handleStreamChanged.bind(this)
+ this._handleScreenChangedBound = this._handleScreenChanged.bind(this)
+ this._handleAudioAvailableChangedBound = this._handleAudioAvailableChanged.bind(this)
+
+ this._callParticipantCollection.on('add', this._handleCallParticipantAddedBound)
+ this._callParticipantCollection.on('remove', this._handleCallParticipantRemovedBound)
+
+ this._callParticipantCollection.callParticipantModels.value.forEach(callParticipantModel => {
+ this._handleCallParticipantAddedBound(this._callParticipantCollection, callParticipantModel)
+ })
+}
+
+CallParticipantsAudioPlayer.prototype = {
+
+ destroy() {
+ this._callParticipantCollection.off('add', this._handleCallParticipantAddedBound)
+ this._callParticipantCollection.off('remove', this._handleCallParticipantRemovedBound)
+
+ this._callParticipantCollection.callParticipantModels.value.forEach(callParticipantModel => {
+ this._handleCallParticipantRemovedBound(this._callParticipantCollection, callParticipantModel)
+ })
+
+ if (this._mixAudio) {
+ this._audioElement.srcObject = null
+ this._audioContext.close()
+ }
+ },
+
+ _handleCallParticipantAdded(callParticipantCollection, callParticipantModel) {
+ callParticipantModel.on('change:stream', this._handleStreamChangedBound)
+ callParticipantModel.on('change:screen', this._handleScreenChangedBound)
+ callParticipantModel.on('change:audioAvailable', this._handleAudioAvailableChangedBound)
+
+ this._handleStreamChangedBound(callParticipantModel, callParticipantModel.get('stream'))
+ this._handleScreenChangedBound(callParticipantModel, callParticipantModel.get('screen'))
+ },
+
+ _handleCallParticipantRemoved(callParticipantCollection, callParticipantModel) {
+ callParticipantModel.off('change:stream', this._handleStreamChangedBound)
+ callParticipantModel.off('change:screen', this._handleScreenChangedBound)
+ callParticipantModel.off('change:audioAvailable', this._handleAudioAvailableChangedBound)
+
+ this._handleStreamChangedBound(callParticipantModel, null)
+ this._handleScreenChangedBound(callParticipantModel, null)
+ },
+
+ _handleStreamChanged(callParticipantModel, stream) {
+ const id = callParticipantModel.get('peerId') + '-stream'
+ const mute = !callParticipantModel.get('audioAvailable')
+ if (this._mixAudio) {
+ this._setAudioNode(id, stream, mute)
+ } else {
+ this._setAudioElement(id, stream, mute)
+ }
+ },
+
+ _handleScreenChanged(callParticipantModel, screen) {
+ const id = callParticipantModel.get('peerId') + '-screen'
+ if (this._mixAudio) {
+ this._setAudioNode(id, screen)
+ } else {
+ this._setAudioElement(id, screen)
+ }
+ },
+
+ _setAudioNode(id, stream, mute = false) {
+ const audioNode = this._audioNodes.get(id)
+ if (audioNode) {
+ if (audioNode.connected) {
+ audioNode.audioSource.disconnect(this._audioDestination)
+ }
+
+ this._audioNodes.delete(id)
+ }
+
+ if (!stream) {
+ return
+ }
+
+ const audioSource = this._audioContext.createMediaStreamSource(stream)
+ if (!mute) {
+ audioSource.connect(this._audioDestination)
+ }
+
+ this._audioNodes.set(id, { audioSource, connected: !mute })
+ },
+
+ _setAudioElement(id, stream, mute = false) {
+ let audioElement = this._audioElements.get(id)
+ if (audioElement) {
+ audioElement.srcObject = null
+
+ this._audioElements.delete(id)
+ }
+
+ if (!stream) {
+ return
+ }
+
+ audioElement = attachMediaStream(stream, null, { audio: true })
+ if (mute) {
+ audioElement.muted = true
+ }
+
+ this._audioElements.set(id, audioElement)
+ },
+
+ _handleAudioAvailableChanged(callParticipantModel, audioAvailable) {
+ if (this._mixAudio) {
+ const audioNode = this._audioNodes.get(callParticipantModel.get('peerId') + '-stream')
+ if (!audioNode) {
+ return
+ }
+
+ if (audioAvailable && !audioNode.connected) {
+ audioNode.audioSource.connect(this._audioDestination)
+ audioNode.connected = true
+ } else if (!audioAvailable && audioNode.connected) {
+ audioNode.audioSource.disconnect(this._audioDestination)
+ audioNode.connected = false
+ }
+
+ return
+ }
+
+ const audioElement = this._audioElements.get(callParticipantModel.get('peerId') + '-stream')
+ if (!audioElement) {
+ return
+ }
+
+ audioElement.muted = !audioAvailable
+ },
+
+}
diff --git a/src/utils/webrtc/CallParticipantsAudioPlayer.spec.js b/src/utils/webrtc/CallParticipantsAudioPlayer.spec.js
new file mode 100644
index 000000000..1e25172ab
--- /dev/null
+++ b/src/utils/webrtc/CallParticipantsAudioPlayer.spec.js
@@ -0,0 +1,716 @@
+/**
+ * SPDX-FileCopyrightText: 2024 Nextcloud GmbH and Nextcloud contributors
+ * SPDX-License-Identifier: AGPL-3.0-or-later
+ */
+
+import { ref } from 'vue'
+
+import CallParticipantsAudioPlayer from './CallParticipantsAudioPlayer.js'
+import EmitterMixin from '../EmitterMixin.js'
+
+/**
+ * Stub of CallParticipantModel with just the attributes and methods used by
+ * CallParticipantsAudioPlayer.
+ *
+ * @param {string} peerId the ID of the peer
+ */
+function CallParticipantModelStub(peerId) {
+ this._superEmitterMixin()
+
+ this.attributes = {
+ peerId,
+ }
+
+ this.get = (key) => {
+ return this.attributes[key]
+ }
+
+ this.set = (key, value) => {
+ this.attributes[key] = value
+
+ this._trigger('change:' + key, [value])
+ }
+}
+EmitterMixin.apply(CallParticipantModelStub.prototype)
+
+/**
+ * Stub of CallParticipantCollection with just the attributes and methods used
+ * by CallParticipantsAudioPlayer.
+ */
+function CallParticipantCollectionStub() {
+ this._superEmitterMixin()
+
+ this.callParticipantModels = ref([])
+}
+EmitterMixin.apply(CallParticipantCollectionStub.prototype)
+
+/**
+ * Mock of MediaStream with an id for easier tracking in tests.
+ *
+ * HTMLAudioElement requires srcObject to conform to the MediaStream interface,
+ * but in jsdom anything can be assigned, so the mock is kept to a minimum.
+ *
+ * @param {string} id the id for the stream.
+ */
+function MediaStreamMock(id) {
+ this.id = id
+}
+
+describe('CallParticipantsAudioPlayer', () => {
+
+ let callParticipantCollection
+ let callParticipantsAudioPlayer
+
+ /**
+ * Adds a CallParticipantModel to the collection.
+ *
+ * The participant must not be yet in the collection.
+ *
+ * @param {object} callParticipantModel the CallParticipantModel to add.
+ */
+ function addCallParticipantModel(callParticipantModel) {
+ callParticipantCollection.callParticipantModels.value.push(callParticipantModel)
+
+ callParticipantCollection._trigger('add', [callParticipantModel])
+ }
+
+ /**
+ * Removes a CallParticipantModel from the collection.
+ *
+ * The participant must be in the collection.
+ *
+ * @param {object} callParticipantModel the CallParticipantModel to remove.
+ */
+ function removeCallParticipantModel(callParticipantModel) {
+ const index = callParticipantCollection.callParticipantModels.value.indexOf(callParticipantModel)
+ callParticipantCollection.callParticipantModels.value.splice(index, 1)
+
+ callParticipantCollection._trigger('remove', [callParticipantModel])
+ }
+
+ /**
+ * Asserts that the audio element with the given id has the expected
+ * srcObject and muted value.
+ *
+ * @param {string} audioElementId the id of the audio element in the player.
+ * @param {object|null} expectedSrcObject the expected srcObject in the
+ * element.
+ * @param {boolean} expectedMuted the expected muted value in the element.
+ */
+ function assertAudioElement(audioElementId, expectedSrcObject, expectedMuted) {
+ expect(callParticipantsAudioPlayer._audioElements.get(audioElementId)).not.toBe(null)
+ expect(callParticipantsAudioPlayer._audioElements.get(audioElementId).tagName.toLowerCase()).toBe('audio')
+ expect(callParticipantsAudioPlayer._audioElements.get(audioElementId).srcObject).toBe(expectedSrcObject)
+ expect(callParticipantsAudioPlayer._audioElements.get(audioElementId).muted).toBe(expectedMuted)
+ }
+
+ beforeEach(() => {
+ callParticipantCollection = new CallParticipantCollectionStub()
+
+ callParticipantsAudioPlayer = new CallParticipantsAudioPlayer(callParticipantCollection)
+ })
+
+ afterEach(() => {
+ jest.clearAllMocks()
+ })
+
+ describe('constructor', () => {
+ test('without participants', () => {
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(0)
+ })
+
+ test('with several participants', () => {
+ const callParticipantModel1 = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel1)
+
+ const callParticipantModel2 = new CallParticipantModelStub('peerId2')
+ addCallParticipantModel(callParticipantModel2)
+
+ callParticipantModel2.attributes.audioAvailable = false
+
+ const stream2 = new MediaStreamMock('stream2')
+ callParticipantModel2.set('stream', stream2)
+
+ const callParticipantModel3 = new CallParticipantModelStub('peerId3')
+ addCallParticipantModel(callParticipantModel3)
+
+ callParticipantModel3.attributes.audioAvailable = true
+
+ const stream3 = new MediaStreamMock('stream3')
+ callParticipantModel3.set('stream', stream3)
+ const screen3 = new MediaStreamMock('screen3')
+ callParticipantModel3.set('screen', screen3)
+
+ callParticipantsAudioPlayer = new CallParticipantsAudioPlayer(callParticipantCollection)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(3)
+ assertAudioElement('peerId2-stream', stream2, true)
+ assertAudioElement('peerId3-stream', stream3, false)
+ assertAudioElement('peerId3-screen', screen3, false)
+ })
+
+ test('add stream and screen', () => {
+ const callParticipantModel1 = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel1)
+
+ callParticipantsAudioPlayer = new CallParticipantsAudioPlayer(callParticipantCollection)
+
+ const stream1 = new MediaStreamMock('stream1')
+ callParticipantModel1.set('stream', stream1)
+ const screen1 = new MediaStreamMock('screen1')
+ callParticipantModel1.set('screen', screen1)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(2)
+ assertAudioElement('peerId1-stream', stream1, true)
+ assertAudioElement('peerId1-screen', screen1, false)
+ })
+
+ test('change audio available', () => {
+ const callParticipantModel1 = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel1)
+
+ callParticipantModel1.attributes.audioAvailable = false
+
+ const stream1 = new MediaStreamMock('stream1')
+ callParticipantModel1.set('stream', stream1)
+ const screen1 = new MediaStreamMock('screen1')
+ callParticipantModel1.set('screen', screen1)
+
+ callParticipantsAudioPlayer = new CallParticipantsAudioPlayer(callParticipantCollection)
+
+ callParticipantModel1.set('audioAvailable', true)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(2)
+ assertAudioElement('peerId1-stream', stream1, false)
+ assertAudioElement('peerId1-screen', screen1, false)
+ })
+
+ test('remove participant, stream and screen', () => {
+ const callParticipantModel1 = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel1)
+
+ const stream1 = new MediaStreamMock('stream1')
+ callParticipantModel1.set('stream', stream1)
+ const screen1 = new MediaStreamMock('screen1')
+ callParticipantModel1.set('screen', screen1)
+
+ const callParticipantModel2 = new CallParticipantModelStub('peerId2')
+ addCallParticipantModel(callParticipantModel2)
+
+ const stream2 = new MediaStreamMock('stream2')
+ callParticipantModel2.set('stream', stream2)
+ const screen2 = new MediaStreamMock('screen2')
+ callParticipantModel2.set('screen', screen2)
+
+ callParticipantsAudioPlayer = new CallParticipantsAudioPlayer(callParticipantCollection)
+
+ callParticipantModel1.set('stream', null)
+ callParticipantModel1.set('screen', null)
+
+ removeCallParticipantModel(callParticipantModel2)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(0)
+ })
+ })
+
+ describe('add stream and screen', () => {
+ test('stream with available audio', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel)
+
+ callParticipantModel.attributes.audioAvailable = true
+
+ const stream = new MediaStreamMock('stream1')
+ callParticipantModel.set('stream', stream)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(1)
+ assertAudioElement('peerId1-stream', stream, false)
+ })
+
+ test('stream without available audio', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel)
+
+ callParticipantModel.attributes.audioAvailable = false
+
+ const stream = new MediaStreamMock('stream1')
+ callParticipantModel.set('stream', stream)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(1)
+ assertAudioElement('peerId1-stream', stream, true)
+ })
+
+ test('screen', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel)
+
+ const screen = new MediaStreamMock('screen1')
+ callParticipantModel.set('screen', screen)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(1)
+ assertAudioElement('peerId1-screen', screen, false)
+ })
+
+ test('stream and screen', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel)
+
+ callParticipantModel.attributes.audioAvailable = false
+
+ const stream = new MediaStreamMock('stream1')
+ callParticipantModel.set('stream', stream)
+ const screen = new MediaStreamMock('screen1')
+ callParticipantModel.set('screen', screen)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(2)
+ assertAudioElement('peerId1-stream', stream, true)
+ assertAudioElement('peerId1-screen', screen, false)
+ })
+
+ // This should not happen (the stream and screen are expected to be set
+ // once the participant is already in the collection), but test it just
+ // in case.
+ test('participant', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+
+ callParticipantModel.attributes.audioAvailable = false
+
+ const stream = new MediaStreamMock('stream1')
+ callParticipantModel.set('stream', stream)
+ const screen = new MediaStreamMock('screen1')
+ callParticipantModel.set('screen', screen)
+
+ addCallParticipantModel(callParticipantModel)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(2)
+ assertAudioElement('peerId1-stream', stream, true)
+ assertAudioElement('peerId1-screen', screen, false)
+ })
+
+ test('several participants, streams and screens', () => {
+ const callParticipantModel1 = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel1)
+ const callParticipantModel2 = new CallParticipantModelStub('peerId2')
+ addCallParticipantModel(callParticipantModel2)
+
+ callParticipantModel1.attributes.audioAvailable = false
+
+ const screen1 = new MediaStreamMock('screen1')
+ callParticipantModel1.set('screen', screen1)
+ const stream1 = new MediaStreamMock('stream1')
+ callParticipantModel1.set('stream', stream1)
+
+ const screen2 = new MediaStreamMock('screen2')
+ callParticipantModel2.set('screen', screen2)
+
+ const callParticipantModel3 = new CallParticipantModelStub('peerId3')
+ addCallParticipantModel(callParticipantModel3)
+
+ callParticipantModel3.attributes.audioAvailable = true
+
+ const stream3 = new MediaStreamMock('stream3')
+ callParticipantModel3.set('stream', stream3)
+
+ const callParticipantModel4 = new CallParticipantModelStub('peerId4')
+
+ callParticipantModel4.attributes.audioAvailable = true
+
+ const stream4 = new MediaStreamMock('stream4')
+ callParticipantModel4.set('stream', stream4)
+
+ addCallParticipantModel(callParticipantModel4)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(5)
+ assertAudioElement('peerId1-stream', stream1, true)
+ assertAudioElement('peerId1-screen', screen1, false)
+ assertAudioElement('peerId2-screen', screen2, false)
+ assertAudioElement('peerId3-stream', stream3, false)
+ assertAudioElement('peerId4-stream', stream4, false)
+ })
+
+ // This should not happen (the previous stream or screen is expected to
+ // be removed first), but test it just in case.
+ test('replace stream and screen', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel)
+
+ callParticipantModel.attributes.audioAvailable = false
+
+ const stream1 = new MediaStreamMock('stream1')
+ callParticipantModel.set('stream', stream1)
+ const screen1 = new MediaStreamMock('screen1')
+ callParticipantModel.set('screen', screen1)
+
+ const audioElementStream1 = callParticipantsAudioPlayer._audioElements.get('peerId1-stream')
+ const audioElementScreen1 = callParticipantsAudioPlayer._audioElements.get('peerId1-screen')
+
+ callParticipantModel.attributes.audioAvailable = true
+
+ const stream2 = new MediaStreamMock('stream2')
+ callParticipantModel.set('stream', stream2)
+ const screen2 = new MediaStreamMock('screen2')
+ callParticipantModel.set('screen', screen2)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(2)
+ assertAudioElement('peerId1-stream', stream2, false)
+ assertAudioElement('peerId1-screen', screen2, false)
+ expect(audioElementStream1.srcObject).toBe(null)
+ expect(audioElementScreen1.srcObject).toBe(null)
+ })
+ })
+
+ describe('change audio available', () => {
+ test('not available with stream', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel)
+
+ callParticipantModel.attributes.audioAvailable = true
+
+ const stream = new MediaStreamMock('stream1')
+ callParticipantModel.set('stream', stream)
+
+ callParticipantModel.set('audioAvailable', false)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(1)
+ assertAudioElement('peerId1-stream', stream, true)
+ })
+
+ test('not available without stream', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel)
+
+ callParticipantModel.set('audioAvailable', false)
+
+ expect(callParticipantsAudioPlayer._audioElements.size).toBe(0)
+ })
+
+ test('not available with screen', () => {
+ const callParticipantModel = new CallParticipantModelStub('peerId1')
+ addCallParticipantModel(callParticipantModel)
+
+ callParticipantModel.attributes.audioAvailable = true
+
+ const screen = new MediaStreamMock('screen1')
+ callParticipantModel.set('screen', screen)
+
+ callParticipantModel.set('audioAvailable', false)
+
+ expe