Skip to content

Commit

Permalink
feat(android)!: configurable SoundPool and AudioManager.mode (#1388)
Browse files Browse the repository at this point in the history
* feat(android): configurable SoundPool in LOW_LATENCY mode
* fix(android): apply global AudioContext to `MediaPlayer`
* feat(android): add `AudioMode` to AudioContextAndroid
* feat(android): set `AudioManager.mode` and `AudioManager.isSpeakerphoneOn` on `setGlobalAudioContext`
* docs: add missing docs for audio_context_config library
* docs: add docs on how to set platform-specific audio context
  • Loading branch information
Gustl22 authored Jan 21, 2023
1 parent f06cab9 commit 5697f18
Show file tree
Hide file tree
Showing 10 changed files with 448 additions and 134 deletions.
18 changes: 13 additions & 5 deletions getting_started.md
Original file line number Diff line number Diff line change
Expand Up @@ -181,22 +181,30 @@ An Audio Context is a (mostly mobile-specific) set of secondary, platform-specif
The Audio Context configuration can be set globally via:

```dart
AudioPlayer.global.setGlobalAudioContext(config);
AudioPlayer.global.setGlobalAudioContext(AudioContextConfig(/*...*/).build());
```

This will naturally apply to all players. On iOS, that is the only option.
On Android only, each player can have different Audio Context configuration.
To configure player specific Audio Context (if desired), use:

```dart
player.setAudioContext(config);
player.setAudioContext(AudioContextConfig(/*...*/).build());
```

While each platform has its own set of configurations, they are somewhat related, and you can create them using a unified interface call `AudioContextConfig` -- it provides generic abstractions that convey intent, that are then converted to platform specific configurations.
While each platform has its own set of configurations, they are somewhat related, and you can create them using a unified interface call [`AudioContextConfig`](https://pub.dev/documentation/audioplayers_platform_interface/latest/api_audio_context_config/api_audio_context_config-library.html).
It provides generic abstractions that convey intent, that are then converted to platform specific configurations.

Note that if this process is not perfect, you can create your configuration from scratch by providing exact details for each platform.
Note that if this process is not perfect, you can create your configuration from scratch by providing exact details for each platform via
[AudioContextAndroid](https://pub.dev/documentation/audioplayers_platform_interface/latest/api_audio_context_config/AudioContextAndroid-class.html) and
[AudioContextIOS](https://pub.dev/documentation/audioplayers_platform_interface/latest/api_audio_context_config/AudioContextIOS-class.html).

The [`AudioContextConfig` class has documentation about each parameter](https://github.com/bluefireteam/audioplayers/blob/main/packages/audioplayers_platform_interface/lib/api/audio_context_config.dart), what they are for, and what configurations they reflect on native code.
```dart
player.setAudioContext(AudioContext(
android: AudioContextAndroid(/*...*/),
iOS: AudioContextIOS(/*...*/),
));
```

## Streams

Expand Down
178 changes: 133 additions & 45 deletions packages/audioplayers/example/integration_test/lib_test.dart
Original file line number Diff line number Diff line change
Expand Up @@ -14,52 +14,52 @@ void main() {

IntegrationTestWidgetsFlutterBinding.ensureInitialized();

group('play multiple sources', () {
final audioTestDataList = [
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(wavUrl1),
duration: const Duration(milliseconds: 451),
),
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(wavUrl2),
duration: const Duration(seconds: 1, milliseconds: 068),
),
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(mp3Url1),
duration: const Duration(minutes: 3, seconds: 30, milliseconds: 77),
),
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(mp3Url2),
duration: const Duration(minutes: 1, seconds: 34, milliseconds: 119),
),
if (features.hasUrlSource && features.hasPlaylistSourceType)
LibSourceTestData(
source: UrlSource(m3u8StreamUrl),
duration: Duration.zero,
isLiveStream: true,
),
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(mpgaStreamUrl),
duration: Duration.zero,
isLiveStream: true,
),
if (features.hasAssetSource)
LibSourceTestData(
source: AssetSource(asset1),
duration: const Duration(seconds: 1, milliseconds: 068),
),
if (features.hasAssetSource)
LibSourceTestData(
source: AssetSource(asset2),
duration: const Duration(minutes: 1, seconds: 34, milliseconds: 119),
),
];
final audioTestDataList = [
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(wavUrl1),
duration: const Duration(milliseconds: 451),
),
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(wavUrl2),
duration: const Duration(seconds: 1, milliseconds: 068),
),
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(mp3Url1),
duration: const Duration(minutes: 3, seconds: 30, milliseconds: 77),
),
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(mp3Url2),
duration: const Duration(minutes: 1, seconds: 34, milliseconds: 119),
),
if (features.hasUrlSource && features.hasPlaylistSourceType)
LibSourceTestData(
source: UrlSource(m3u8StreamUrl),
duration: Duration.zero,
isLiveStream: true,
),
if (features.hasUrlSource)
LibSourceTestData(
source: UrlSource(mpgaStreamUrl),
duration: Duration.zero,
isLiveStream: true,
),
if (features.hasAssetSource)
LibSourceTestData(
source: AssetSource(asset1),
duration: const Duration(seconds: 1, milliseconds: 068),
),
if (features.hasAssetSource)
LibSourceTestData(
source: AssetSource(asset2),
duration: const Duration(minutes: 1, seconds: 34, milliseconds: 119),
),
];

group('play multiple sources', () {
testWidgets(
'play multiple sources simultaneously',
(WidgetTester tester) async {
Expand Down Expand Up @@ -111,4 +111,92 @@ void main() {
}
});
});

group('Audio Context', () {
/// Android and iOS only: Play the same sound twice with a different audio
/// context each. This test can be executed on a device, with either
/// "Silent", "Vibrate" or "Ring" mode. In "Silent" or "Vibrate" mode
/// the second sound should not be audible.
testWidgets(
'test changing AudioContextConfigs',
(WidgetTester tester) async {
final player = AudioPlayer()..setReleaseMode(ReleaseMode.stop);

final td = audioTestDataList[0];

var audioContext = AudioContextConfig(
//ignore: avoid_redundant_argument_values
forceSpeaker: true,
//ignore: avoid_redundant_argument_values
respectSilence: false,
).build();
await AudioPlayer.global.setGlobalAudioContext(audioContext);
await player.setAudioContext(audioContext);

await player.play(td.source);
await tester.pumpAndSettle();
await tester.pump(td.duration + const Duration(seconds: 8));
expect(player.state, PlayerState.completed);

audioContext = AudioContextConfig(
forceSpeaker: false,
respectSilence: true,
).build();
await AudioPlayer.global.setGlobalAudioContext(audioContext);
await player.setAudioContext(audioContext);

await player.resume();
await tester.pumpAndSettle();
await tester.pump(td.duration + const Duration(seconds: 8));
expect(player.state, PlayerState.completed);
},
skip: !features.hasForceSpeaker,
);

/// Android and iOS only: Play the same sound twice with a different audio
/// context each. This test can be executed on a device, with either
/// "Silent", "Vibrate" or "Ring" mode. In "Silent" or "Vibrate" mode
/// the second sound should not be audible.
testWidgets(
'test changing AudioContextConfigs in LOW_LATENCY mode',
(WidgetTester tester) async {
final player = AudioPlayer()..setReleaseMode(ReleaseMode.stop);
player.setPlayerMode(PlayerMode.lowLatency);

final td = audioTestDataList[0];

var audioContext = AudioContextConfig(
//ignore: avoid_redundant_argument_values
forceSpeaker: true,
//ignore: avoid_redundant_argument_values
respectSilence: false,
).build();
await AudioPlayer.global.setGlobalAudioContext(audioContext);
await player.setAudioContext(audioContext);

await player.setSource(td.source);
await player.resume();
await tester.pumpAndSettle();
await tester.pump(td.duration + const Duration(seconds: 8));
expect(player.state, PlayerState.playing);
await player.stop();
expect(player.state, PlayerState.stopped);

audioContext = AudioContextConfig(
forceSpeaker: false,
respectSilence: true,
).build();
await AudioPlayer.global.setGlobalAudioContext(audioContext);
await player.setAudioContext(audioContext);

await player.resume();
await tester.pumpAndSettle();
await tester.pump(td.duration + const Duration(seconds: 8));
expect(player.state, PlayerState.playing);
await player.stop();
expect(player.state, PlayerState.stopped);
},
skip: !features.hasForceSpeaker || !features.hasLowLatency,
);
});
}
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ class PlatformFeatures {
hasPlaylistSourceType: false,
hasLowLatency: false,
hasReleaseModeRelease: false,
hasForceSpeaker: false,
hasDuckAudio: false,
hasRespectSilence: false,
hasStayAwake: false,
Expand All @@ -27,7 +28,6 @@ class PlatformFeatures {
hasPlaylistSourceType: false,
hasReleaseModeRelease: false,
hasLowLatency: false,
hasDuckAudio: false,
hasBalance: false,
);

Expand All @@ -36,6 +36,7 @@ class PlatformFeatures {
hasPlaylistSourceType: false,
hasLowLatency: false,
hasReleaseModeRelease: false,
hasForceSpeaker: false,
hasDuckAudio: false,
hasRespectSilence: false,
hasStayAwake: false,
Expand All @@ -51,6 +52,7 @@ class PlatformFeatures {
// MP3 duration is estimated: https://bugzilla.gnome.org/show_bug.cgi?id=726144
// Use GstDiscoverer to get duration before playing: https://gstreamer.freedesktop.org/documentation/pbutils/gstdiscoverer.html?gi-language=c
hasMp3Duration: false,
hasForceSpeaker: false,
hasDuckAudio: false,
hasRespectSilence: false,
hasStayAwake: false,
Expand All @@ -63,6 +65,7 @@ class PlatformFeatures {
hasPlaylistSourceType: false,
hasLowLatency: false,
hasReleaseModeRelease: false,
hasForceSpeaker: false,
hasDuckAudio: false,
hasRespectSilence: false,
hasStayAwake: false,
Expand All @@ -85,6 +88,7 @@ class PlatformFeatures {
final bool hasMp3Duration;

final bool hasPlaybackRate;
final bool hasForceSpeaker;
final bool hasDuckAudio; // Not yet tested
final bool hasRespectSilence; // Not yet tested
final bool hasStayAwake; // Not yet tested
Expand All @@ -109,6 +113,7 @@ class PlatformFeatures {
this.hasBalance = true,
this.hasSeek = true,
this.hasPlaybackRate = true,
this.hasForceSpeaker = true,
this.hasDuckAudio = true,
this.hasRespectSilence = true,
this.hasStayAwake = true,
Expand Down
9 changes: 9 additions & 0 deletions packages/audioplayers/example/lib/tabs/audio_context.dart
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,15 @@ class _AudioContextTabState extends State<AudioContextTab>
audioContext.android.copy(audioFocus: v),
),
),
LabeledDropDown<AndroidAudioMode>(
key: const Key('audioMode'),
label: 'audioMode',
options: {for (var e in AndroidAudioMode.values) e: e.name},
selected: audioContext.android.audioMode,
onChange: (v) => updateAudioContextAndroid(
audioContext.android.copy(audioMode: v),
),
),
],
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,21 +7,24 @@ import android.media.AudioManager
import android.media.MediaPlayer
import android.os.Build
import androidx.annotation.RequiresApi
import java.util.Objects

data class AudioContextAndroid(
val isSpeakerphoneOn: Boolean,
val stayAwake: Boolean,
val contentType: Int,
val usageType: Int,
val audioFocus: Int?,
val audioMode: Int,
) {
@SuppressLint("InlinedApi") // we are just using numerical constants
constructor() : this(
isSpeakerphoneOn = false,
isSpeakerphoneOn = true,
stayAwake = false,
contentType = CONTENT_TYPE_MUSIC,
usageType = USAGE_MEDIA,
audioFocus = null,
audioMode = AudioManager.MODE_NORMAL,
)

fun setAttributesOnPlayer(player: MediaPlayer) {
Expand Down Expand Up @@ -49,4 +52,14 @@ data class AudioContextAndroid(
else -> AudioManager.STREAM_MUSIC
}
}
}

override fun hashCode() = Objects.hash(isSpeakerphoneOn, stayAwake, contentType, usageType, audioFocus, audioMode)

override fun equals(other: Any?) = (other is AudioContextAndroid)
&& isSpeakerphoneOn == other.isSpeakerphoneOn
&& stayAwake == other.stayAwake
&& contentType == other.contentType
&& usageType == other.usageType
&& audioFocus == other.audioFocus
&& audioMode == other.audioMode
}
Loading

0 comments on commit 5697f18

Please sign in to comment.