diff --git a/.travis.yml b/.travis.yml index 777c697..309712b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - 1.10.x + - 1.11.x install: - go get -t ./... diff --git a/context.go b/context.go index 8bccb8d..98e8cf1 100644 --- a/context.go +++ b/context.go @@ -71,9 +71,9 @@ func (ctx Context) Devices(kind DeviceType) ([]DeviceInfo, error) { defer contextMutex.Unlock() var playbackDevices *C.mal_device_info - var playbackDeviceCount C.uint + var playbackDeviceCount C.mal_uint32 var captureDevices *C.mal_device_info - var captureDeviceCount C.uint + var captureDeviceCount C.mal_uint32 result := C.mal_context_get_devices(ctx.cptr(), &playbackDevices, &playbackDeviceCount, diff --git a/mini_al.h b/mini_al.h index 9a1fc29..15a3310 100644 --- a/mini_al.h +++ b/mini_al.h @@ -1,5 +1,5 @@ // Audio playback and capture library. Public domain. See "unlicense" statement at the end of this file. -// mini_al - v0.8.10 - 2018-10-21 +// mini_al - v0.8.14 - 2018-12-16 // // David Reid - davidreidsoftware@gmail.com @@ -25,7 +25,7 @@ // - OSS (FreeBSD) // - OpenSL|ES (Android only) // - OpenAL -// - SDL +// - SDL2 // - Null (Silence) // // Supported Formats: @@ -77,8 +77,7 @@ // // Building for Emscripten // ----------------------- -// The Emscripten build currently uses SDL 1.2 for it's backend which means specifying "-s USE_SDL=2" is unecessary -// as of this version. +// The Emscripten build uses SDL2 and requires "-s USE_SDL=2" on the command line. // // // Playback Example @@ -131,6 +130,12 @@ // BACKEND NUANCES // =============== // +// PulseAudio +// ---------- +// - If you experience bad glitching/noise on Arch Linux, consider this fix from the Arch wiki: +// https://wiki.archlinux.org/index.php/PulseAudio/Troubleshooting#Glitches,_skips_or_crackling +// Alternatively, consider using a different backend such as ALSA. +// // Android // ------- // - To capture audio on Android, remember to add the RECORD_AUDIO permission to your manifest: @@ -148,6 +153,10 @@ // // // +// OpenAL +// ------ +// - Capture is not supported on iOS with OpenAL. Use the Core Audio backend instead. +// // // OPTIONS // ======= @@ -566,8 +575,10 @@ typedef enum typedef enum { - mal_channel_mix_mode_planar_blend = 0, // Simple averaging based on the plane(s) the channel is sitting on. + mal_channel_mix_mode_rectangular = 0, // Simple averaging based on the plane(s) the channel is sitting on. mal_channel_mix_mode_simple, // Drop excess channels; zeroed out extra channels. + mal_channel_mix_mode_custom_weights, // Use custom weights specified in mal_channel_router_config. + mal_channel_mix_mode_planar_blend = mal_channel_mix_mode_rectangular, mal_channel_mix_mode_default = mal_channel_mix_mode_planar_blend } mal_channel_mix_mode; @@ -635,6 +646,7 @@ typedef struct mal_channel channelMapIn[MAL_MAX_CHANNELS]; mal_channel channelMapOut[MAL_MAX_CHANNELS]; mal_channel_mix_mode mixingMode; + float weights[MAL_MAX_CHANNELS][MAL_MAX_CHANNELS]; // [in][out]. Only used when mixingMode is set to mal_channel_mix_mode_custom_weights. mal_bool32 noSSE2 : 1; mal_bool32 noAVX2 : 1; mal_bool32 noAVX512 : 1; @@ -653,7 +665,6 @@ struct mal_channel_router mal_bool32 useAVX512 : 1; mal_bool32 useNEON : 1; mal_uint8 shuffleTable[MAL_MAX_CHANNELS]; - float weights[MAL_MAX_CHANNELS][MAL_MAX_CHANNELS]; }; @@ -908,8 +919,8 @@ mal_format_converter_config mal_format_converter_config_init_deinterleaved(mal_f // 2) Planar Blending // Channels are blended based on a set of planes that each speaker emits audio from. // -// Planar Blending -// --------------- +// Rectangular / Planar Blending +// ----------------------------- // In this mode, channel positions are associated with a set of planes where the channel conceptually emits audio from. An example is the front/left speaker. // This speaker is positioned to the front of the listener, so you can think of it as emitting audio from the front plane. It is also positioned to the left // of the listener so you can think of it as also emitting audio from the left plane. Now consider the (unrealistic) situation where the input channel map @@ -939,7 +950,8 @@ mal_format_converter_config mal_format_converter_config_init_deinterleaved(mal_f // Note that input and output data is always deinterleaved 32-bit floating point. // // Initialize the channel router with mal_channel_router_init(). You will need to pass in a config object which specifies the input and output configuration, -// mixing mode and a callback for sending data to the router. This callback will be called when input data needs to be sent to the router for processing. +// mixing mode and a callback for sending data to the router. This callback will be called when input data needs to be sent to the router for processing. Note +// that the mixing mode is only used when a 1:1 mapping is unavailable. This includes the custom weights mode. // // Read data from the channel router with mal_channel_router_read_deinterleaved(). Output data is always 32-bit floating point. // @@ -1114,6 +1126,11 @@ void mal_pcm_f32_to_s24(void* pOut, const void* pIn, mal_uint64 count, mal_dithe void mal_pcm_f32_to_s32(void* pOut, const void* pIn, mal_uint64 count, mal_dither_mode ditherMode); void mal_pcm_convert(void* pOut, mal_format formatOut, const void* pIn, mal_format formatIn, mal_uint64 sampleCount, mal_dither_mode ditherMode); +// Deinterleaves an interleaved buffer. +void mal_deinterleave_pcm_frames(mal_format format, mal_uint32 channels, mal_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames); + +// Interleaves a group of deinterleaved buffers. +void mal_interleave_pcm_frames(mal_format format, mal_uint32 channels, mal_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames); ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// @@ -1629,6 +1646,7 @@ struct mal_context mal_proc pa_stream_get_sample_spec; mal_proc pa_stream_get_channel_map; mal_proc pa_stream_get_buffer_attr; + mal_proc pa_stream_set_buffer_attr; mal_proc pa_stream_get_device_name; mal_proc pa_stream_set_write_callback; mal_proc pa_stream_set_read_callback; @@ -1683,6 +1701,7 @@ struct mal_context mal_proc AudioOutputUnitStart; mal_proc AudioOutputUnitStop; mal_proc AudioUnitAddPropertyListener; + mal_proc AudioUnitGetPropertyInfo; mal_proc AudioUnitGetProperty; mal_proc AudioUnitSetProperty; mal_proc AudioUnitInitialize; @@ -1824,16 +1843,11 @@ struct mal_context mal_handle hSDL; // SDL mal_proc SDL_InitSubSystem; mal_proc SDL_QuitSubSystem; - mal_proc SDL_CloseAudio; - mal_proc SDL_OpenAudio; - mal_proc SDL_PauseAudio; mal_proc SDL_GetNumAudioDevices; mal_proc SDL_GetAudioDeviceName; mal_proc SDL_CloseAudioDevice; mal_proc SDL_OpenAudioDevice; mal_proc SDL_PauseAudioDevice; - - mal_bool32 usingSDL1; } sdl; #endif #ifdef MAL_SUPPORT_NULL @@ -2014,6 +2028,7 @@ MAL_ALIGNED_STRUCT(MAL_SIMD_ALIGNMENT) mal_device /*AudioComponent*/ mal_ptr component; // <-- Can this be per-context? /*AudioUnit*/ mal_ptr audioUnit; /*AudioBufferList**/ mal_ptr pAudioBufferList; // Only used for input devices. + mal_event stopEvent; mal_bool32 isSwitchingDevice; /* <-- Set to true when the default device has changed and mini_al is in the process of switching. */ } coreaudio; #endif @@ -2805,6 +2820,10 @@ mal_uint64 mal_sine_wave_read_ex(mal_sine_wave* pSineWave, mal_uint64 frameCount #endif #endif +#if defined(_MSC_VER) + #pragma warning(push) + #pragma warning(disable:4752) // found Intel(R) Advanced Vector Extensions; consider using /arch:AVX +#endif #if defined(MAL_X64) || defined(MAL_X86) #if defined(_MSC_VER) && !defined(__clang__) @@ -4864,7 +4883,7 @@ mal_bool32 mal_device__get_current_frame__null(mal_device* pDevice, mal_uint32* mal_assert(pCurrentPos != NULL); *pCurrentPos = 0; - mal_uint64 currentFrameAbs = (mal_uint64)(mal_timer_get_time_in_seconds(&pDevice->null_device.timer) * pDevice->sampleRate) / pDevice->channels; + mal_uint64 currentFrameAbs = (mal_uint64)(mal_timer_get_time_in_seconds(&pDevice->null_device.timer) * pDevice->sampleRate); *pCurrentPos = (mal_uint32)(currentFrameAbs % pDevice->bufferSizeInFrames); return MAL_TRUE; @@ -4917,11 +4936,11 @@ mal_uint32 mal_device__wait_for_frames__null(mal_device* pDevice) while (!pDevice->null_device.breakFromMainLoop) { mal_uint32 framesAvailable = mal_device__get_available_frames__null(pDevice); - if (framesAvailable > 0) { + if (framesAvailable >= (pDevice->bufferSizeInFrames/pDevice->periods)) { return framesAvailable; } - mal_sleep(16); + mal_sleep(pDevice->bufferSizeInMilliseconds/pDevice->periods); } // We'll get here if the loop was terminated. Just return whatever's available. @@ -11078,8 +11097,22 @@ mal_result mal_context_init__alsa(mal_context* pContext) mal_assert(pContext != NULL); #ifndef MAL_NO_RUNTIME_LINKING - pContext->alsa.asoundSO = mal_dlopen("libasound.so"); + const char* libasoundNames[] = { + "libasound.so.2", + "libasound.so" + }; + + for (size_t i = 0; i < mal_countof(libasoundNames); ++i) { + pContext->alsa.asoundSO = mal_dlopen(libasoundNames[i]); + if (pContext->alsa.asoundSO != NULL) { + break; + } + } + if (pContext->alsa.asoundSO == NULL) { +#ifdef MAL_DEBUG_OUTPUT + printf("[ALSA] Failed to open shared object.\n"); +#endif return MAL_NO_BACKEND; } @@ -11784,6 +11817,7 @@ typedef mal_pa_stream_state_t (* mal_pa_stream_get_state_proc) typedef const mal_pa_sample_spec* (* mal_pa_stream_get_sample_spec_proc) (mal_pa_stream* s); typedef const mal_pa_channel_map* (* mal_pa_stream_get_channel_map_proc) (mal_pa_stream* s); typedef const mal_pa_buffer_attr* (* mal_pa_stream_get_buffer_attr_proc) (mal_pa_stream* s); +typedef mal_pa_operation* (* mal_pa_stream_set_buffer_attr_proc) (mal_pa_stream* s, const mal_pa_buffer_attr* attr, mal_pa_stream_success_cb_t cb, void* userdata); typedef const char* (* mal_pa_stream_get_device_name_proc) (mal_pa_stream* s); typedef void (* mal_pa_stream_set_write_callback_proc) (mal_pa_stream* s, mal_pa_stream_request_cb_t cb, void* userdata); typedef void (* mal_pa_stream_set_read_callback_proc) (mal_pa_stream* s, mal_pa_stream_request_cb_t cb, void* userdata); @@ -12305,6 +12339,10 @@ void mal_pulse_device_write_callback(mal_pa_stream* pStream, size_t sizeInBytes, mal_context* pContext = pDevice->pContext; mal_assert(pContext != NULL); +#ifdef MAL_DEBUG_OUTPUT + printf("[PulseAudio] write_callback: sizeInBytes=%d\n", (int)sizeInBytes); +#endif + size_t bytesRemaining = sizeInBytes; while (bytesRemaining > 0) { size_t bytesToReadFromClient = bytesRemaining; @@ -12319,19 +12357,35 @@ void mal_pulse_device_write_callback(mal_pa_stream* pStream, size_t sizeInBytes, return; } +#ifdef MAL_DEBUG_OUTPUT + printf(" bytesToReadFromClient=%d", (int)bytesToReadFromClient); +#endif + if (pBuffer != NULL && bytesToReadFromClient > 0) { mal_uint32 framesToReadFromClient = (mal_uint32)bytesToReadFromClient / (pDevice->internalChannels*mal_get_bytes_per_sample(pDevice->internalFormat)); if (framesToReadFromClient > 0) { mal_device__read_frames_from_client(pDevice, framesToReadFromClient, pBuffer); +#ifdef MAL_DEBUG_OUTPUT + printf(", framesToReadFromClient=%d\n", (int)framesToReadFromClient); +#endif + error = ((mal_pa_stream_write_proc)pContext->pulse.pa_stream_write)((mal_pa_stream*)pDevice->pulse.pStream, pBuffer, bytesToReadFromClient, NULL, 0, MAL_PA_SEEK_RELATIVE); if (error < 0) { mal_post_error(pDevice, MAL_LOG_LEVEL_ERROR, "[PulseAudio] Failed to write data to the PulseAudio stream.", mal_result_from_pulse(error)); return; } + } else { +#ifdef MAL_DEBUG_OUTPUT + printf(", framesToReadFromClient=0\n"); +#endif } bytesRemaining -= bytesToReadFromClient; + } else { +#ifdef MAL_DEBUG_OUTPUT + printf(", framesToReadFromClient=0\n"); +#endif } } } @@ -12460,6 +12514,7 @@ mal_result mal_device_init__pulse(mal_context* pContext, mal_device_type type, c mal_pa_sample_spec ss; mal_pa_channel_map cmap; mal_pa_buffer_attr attr; + mal_pa_stream_flags_t streamFlags; const mal_pa_sample_spec* pActualSS = NULL; const mal_pa_channel_map* pActualCMap = NULL; @@ -12533,53 +12588,6 @@ mal_result mal_device_init__pulse(mal_context* pContext, mal_device_type type, c ((mal_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); } - -#if 0 - mal_pa_sample_spec deviceSS; - mal_pa_channel_map deviceCMap; - if (type == mal_device_type_playback) { - deviceSS = sinkInfo.sample_spec; - deviceCMap = sinkInfo.channel_map; - } else { - deviceSS = sourceInfo.sample_spec; - deviceCMap = sourceInfo.channel_map; - } - - if (pDevice->usingDefaultFormat) { - ss.format = deviceSS.format; - } else { - ss.format = mal_format_to_pulse(pConfig->format); - } - if (ss.format == MAL_PA_SAMPLE_INVALID) { - ss.format = MAL_PA_SAMPLE_S16LE; - } - - if (pDevice->usingDefaultChannels) { - ss.channels = deviceSS.channels; - } else { - ss.channels = pConfig->channels; - } - - if (pDevice->usingDefaultSampleRate) { - ss.rate = deviceSS.rate; - } else { - ss.rate = pConfig->sampleRate; - } - - - if (pDevice->usingDefaultChannelMap) { - cmap = deviceCMap; - } else { - cmap.channels = pConfig->channels; - for (mal_uint32 iChannel = 0; iChannel < pConfig->channels; ++iChannel) { - cmap.map[iChannel] = mal_channel_position_to_pulse(pConfig->channelMap[iChannel]); - } - - if (((mal_pa_channel_map_valid_proc)pContext->pulse.pa_channel_map_valid)(&cmap) == 0 || ((mal_pa_channel_map_compatible_proc)pContext->pulse.pa_channel_map_compatible)(&cmap, &ss) == 0) { - ((mal_pa_channel_map_init_extend_proc)pContext->pulse.pa_channel_map_init_extend)(&cmap, ss.channels, MAL_PA_CHANNEL_MAP_DEFAULT); // The channel map is invalid, so just fall back to the default. - } - } -#else if (type == mal_device_type_playback) { ss = sinkInfo.sample_spec; cmap = sinkInfo.channel_map; @@ -12587,7 +12595,7 @@ mal_result mal_device_init__pulse(mal_context* pContext, mal_device_type type, c ss = sourceInfo.sample_spec; cmap = sourceInfo.channel_map; } -#endif + // Buffer size. bufferSizeInFrames = pDevice->bufferSizeInFrames; @@ -12606,10 +12614,14 @@ mal_result mal_device_init__pulse(mal_context* pContext, mal_device_type type, c } attr.maxlength = bufferSizeInFrames * mal_get_bytes_per_sample(mal_format_from_pulse(ss.format))*ss.channels; - attr.tlength = attr.maxlength / pConfig->periods; + attr.tlength = attr.maxlength; attr.prebuf = (mal_uint32)-1; - attr.minreq = attr.tlength; - attr.fragsize = attr.tlength; + attr.minreq = attr.maxlength / pConfig->periods; + attr.fragsize = attr.maxlength / pConfig->periods; + +#ifdef MAL_DEBUG_OUTPUT + printf("[PulseAudio] attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; bufferSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, bufferSizeInFrames); +#endif char streamName[256]; if (pConfig->pulse.pStreamName != NULL) { @@ -12628,11 +12640,15 @@ mal_result mal_device_init__pulse(mal_context* pContext, mal_device_type type, c } - + streamFlags = MAL_PA_STREAM_START_CORKED; + if (dev != NULL) { + streamFlags |= MAL_PA_STREAM_DONT_MOVE | MAL_PA_STREAM_FIX_FORMAT | MAL_PA_STREAM_FIX_RATE | MAL_PA_STREAM_FIX_CHANNELS; + } + if (type == mal_device_type_playback) { - error = ((mal_pa_stream_connect_playback_proc)pContext->pulse.pa_stream_connect_playback)((mal_pa_stream*)pDevice->pulse.pStream, dev, &attr, MAL_PA_STREAM_START_CORKED, NULL, NULL); + error = ((mal_pa_stream_connect_playback_proc)pContext->pulse.pa_stream_connect_playback)((mal_pa_stream*)pDevice->pulse.pStream, dev, &attr, streamFlags, NULL, NULL); } else { - error = ((mal_pa_stream_connect_record_proc)pContext->pulse.pa_stream_connect_record)((mal_pa_stream*)pDevice->pulse.pStream, dev, &attr, MAL_PA_STREAM_START_CORKED); + error = ((mal_pa_stream_connect_record_proc)pContext->pulse.pa_stream_connect_record)((mal_pa_stream*)pDevice->pulse.pStream, dev, &attr, streamFlags); } if (error != MAL_PA_OK) { @@ -12652,6 +12668,21 @@ mal_result mal_device_init__pulse(mal_context* pContext, mal_device_type type, c // Internal format. pActualSS = ((mal_pa_stream_get_sample_spec_proc)pContext->pulse.pa_stream_get_sample_spec)((mal_pa_stream*)pDevice->pulse.pStream); if (pActualSS != NULL) { + // If anything has changed between the requested and the actual sample spec, we need to update the buffer. + if (ss.format != pActualSS->format || ss.channels != pActualSS->channels || ss.rate != pActualSS->rate) { + attr.maxlength = bufferSizeInFrames * mal_get_bytes_per_sample(mal_format_from_pulse(pActualSS->format))*pActualSS->channels; + attr.tlength = attr.maxlength; + attr.prebuf = (mal_uint32)-1; + attr.minreq = attr.maxlength / pConfig->periods; + attr.fragsize = attr.maxlength / pConfig->periods; + + pOP = ((mal_pa_stream_set_buffer_attr_proc)pContext->pulse.pa_stream_set_buffer_attr)((mal_pa_stream*)pDevice->pulse.pStream, &attr, NULL, NULL); + if (pOP != NULL) { + mal_device__wait_for_operation__pulse(pDevice, pOP); + ((mal_pa_operation_unref_proc)pContext->pulse.pa_operation_unref)(pOP); + } + } + ss = *pActualSS; } @@ -12680,6 +12711,10 @@ mal_result mal_device_init__pulse(mal_context* pContext, mal_device_type type, c pDevice->bufferSizeInFrames = attr.maxlength / (mal_get_bytes_per_sample(pDevice->internalFormat)*pDevice->internalChannels); pDevice->periods = attr.maxlength / attr.tlength; +#ifdef MAL_DEBUG_OUTPUT + printf("[PulseAudio] actual attr: maxlength=%d, tlength=%d, prebuf=%d, minreq=%d, fragsize=%d; pDevice->bufferSizeInFrames=%d\n", attr.maxlength, attr.tlength, attr.prebuf, attr.minreq, attr.fragsize, pDevice->bufferSizeInFrames); +#endif + // Grab the name of the device if we can. dev = ((mal_pa_stream_get_device_name_proc)pContext->pulse.pa_stream_get_device_name)((mal_pa_stream*)pDevice->pulse.pStream); @@ -12930,6 +12965,7 @@ mal_result mal_context_init__pulse(mal_context* pContext) pContext->pulse.pa_stream_get_sample_spec = (mal_proc)mal_dlsym(pContext->pulse.pulseSO, "pa_stream_get_sample_spec"); pContext->pulse.pa_stream_get_channel_map = (mal_proc)mal_dlsym(pContext->pulse.pulseSO, "pa_stream_get_channel_map"); pContext->pulse.pa_stream_get_buffer_attr = (mal_proc)mal_dlsym(pContext->pulse.pulseSO, "pa_stream_get_buffer_attr"); + pContext->pulse.pa_stream_set_buffer_attr = (mal_proc)mal_dlsym(pContext->pulse.pulseSO, "pa_stream_set_buffer_attr"); pContext->pulse.pa_stream_get_device_name = (mal_proc)mal_dlsym(pContext->pulse.pulseSO, "pa_stream_get_device_name"); pContext->pulse.pa_stream_set_write_callback = (mal_proc)mal_dlsym(pContext->pulse.pulseSO, "pa_stream_set_write_callback"); pContext->pulse.pa_stream_set_read_callback = (mal_proc)mal_dlsym(pContext->pulse.pulseSO, "pa_stream_set_read_callback"); @@ -12972,6 +13008,7 @@ mal_result mal_context_init__pulse(mal_context* pContext) mal_pa_stream_get_sample_spec_proc _pa_stream_get_sample_spec = pa_stream_get_sample_spec; mal_pa_stream_get_channel_map_proc _pa_stream_get_channel_map = pa_stream_get_channel_map; mal_pa_stream_get_buffer_attr_proc _pa_stream_get_buffer_attr = pa_stream_get_buffer_attr; + mal_pa_stream_set_buffer_attr_proc _pa_stream_set_buffer_attr = pa_stream_set_buffer_attr; mal_pa_stream_get_device_name_proc _pa_stream_get_device_name = pa_stream_get_device_name; mal_pa_stream_set_write_callback_proc _pa_stream_set_write_callback = pa_stream_set_write_callback; mal_pa_stream_set_read_callback_proc _pa_stream_set_read_callback = pa_stream_set_read_callback; @@ -13013,6 +13050,7 @@ mal_result mal_context_init__pulse(mal_context* pContext) pContext->pulse.pa_stream_get_sample_spec = (mal_proc)_pa_stream_get_sample_spec; pContext->pulse.pa_stream_get_channel_map = (mal_proc)_pa_stream_get_channel_map; pContext->pulse.pa_stream_get_buffer_attr = (mal_proc)_pa_stream_get_buffer_attr; + pContext->pulse.pa_stream_set_buffer_attr = (mal_proc)_pa_stream_set_buffer_attr; pContext->pulse.pa_stream_get_device_name = (mal_proc)_pa_stream_get_device_name; pContext->pulse.pa_stream_set_write_callback = (mal_proc)_pa_stream_set_write_callback; pContext->pulse.pa_stream_set_read_callback = (mal_proc)_pa_stream_set_read_callback; @@ -13652,6 +13690,7 @@ typedef OSStatus (* mal_AudioComponentInstanceNew_proc)(AudioComponent inCompone typedef OSStatus (* mal_AudioOutputUnitStart_proc)(AudioUnit inUnit); typedef OSStatus (* mal_AudioOutputUnitStop_proc)(AudioUnit inUnit); typedef OSStatus (* mal_AudioUnitAddPropertyListener_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitPropertyListenerProc inProc, void* inProcUserData); +typedef OSStatus (* mal_AudioUnitGetPropertyInfo_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, UInt32* outDataSize, Boolean* outWriteable); typedef OSStatus (* mal_AudioUnitGetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, void* outData, UInt32* ioDataSize); typedef OSStatus (* mal_AudioUnitSetProperty_proc)(AudioUnit inUnit, AudioUnitPropertyID inID, AudioUnitScope inScope, AudioUnitElement inElement, const void* inData, UInt32 inDataSize); typedef OSStatus (* mal_AudioUnitInitialize_proc)(AudioUnit inUnit); @@ -13904,6 +13943,80 @@ mal_result mal_format_from_AudioStreamBasicDescription(const AudioStreamBasicDes return MAL_FORMAT_NOT_SUPPORTED; } +mal_result mal_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, mal_channel channelMap[MAL_MAX_CHANNELS]) +{ + mal_assert(pChannelLayout != NULL); + + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { + for (UInt32 iChannel = 0; iChannel < pChannelLayout->mNumberChannelDescriptions; ++iChannel) { + channelMap[iChannel] = mal_channel_from_AudioChannelLabel(pChannelLayout->mChannelDescriptions[iChannel].mChannelLabel); + } + } else +#if 0 + if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { + // This is the same kind of system that's used by Windows audio APIs. + UInt32 iChannel = 0; + AudioChannelBitmap bitmap = pChannelLayout->mChannelBitmap; + for (UInt32 iBit = 0; iBit < 32; ++iBit) { + AudioChannelBitmap bit = bitmap & (1 << iBit); + if (bit != 0) { + channelMap[iChannel++] = mal_channel_from_AudioChannelBit(bit); + } + } + } else +#endif + { + // Need to use the tag to determine the channel map. For now I'm just assuming a default channel map, but later on this should + // be updated to determine the mapping based on the tag. + UInt32 channelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag); + switch (pChannelLayout->mChannelLayoutTag) + { + case kAudioChannelLayoutTag_Mono: + case kAudioChannelLayoutTag_Stereo: + case kAudioChannelLayoutTag_StereoHeadphones: + case kAudioChannelLayoutTag_MatrixStereo: + case kAudioChannelLayoutTag_MidSide: + case kAudioChannelLayoutTag_XY: + case kAudioChannelLayoutTag_Binaural: + case kAudioChannelLayoutTag_Ambisonic_B_Format: + { + mal_get_standard_channel_map(mal_standard_channel_map_default, channelCount, channelMap); + } break; + + case kAudioChannelLayoutTag_Octagonal: + { + channelMap[7] = MAL_CHANNEL_SIDE_RIGHT; + channelMap[6] = MAL_CHANNEL_SIDE_LEFT; + } // Intentional fallthrough. + case kAudioChannelLayoutTag_Hexagonal: + { + channelMap[5] = MAL_CHANNEL_BACK_CENTER; + } // Intentional fallthrough. + case kAudioChannelLayoutTag_Pentagonal: + { + channelMap[4] = MAL_CHANNEL_FRONT_CENTER; + } // Intentional fallghrough. + case kAudioChannelLayoutTag_Quadraphonic: + { + channelMap[3] = MAL_CHANNEL_BACK_RIGHT; + channelMap[2] = MAL_CHANNEL_BACK_LEFT; + channelMap[1] = MAL_CHANNEL_RIGHT; + channelMap[0] = MAL_CHANNEL_LEFT; + } break; + + // TODO: Add support for more tags here. + + default: + { + mal_get_standard_channel_map(mal_standard_channel_map_default, channelCount, channelMap); + } break; + } + } + + return MAL_SUCCESS; +} + + #if defined(MAL_APPLE_DESKTOP) mal_result mal_get_device_object_ids__coreaudio(mal_context* pContext, UInt32* pDeviceCount, AudioObjectID** ppDeviceObjectIDs) // NOTE: Free the returned buffer with mal_free(). { @@ -14085,7 +14198,6 @@ mal_result mal_get_AudioObject_stream_descriptions(mal_context* pContext, AudioO } - mal_result mal_get_AudioObject_channel_layout(mal_context* pContext, AudioObjectID deviceObjectID, mal_device_type deviceType, AudioChannelLayout** ppChannelLayout) // NOTE: Free the returned pointer with mal_free(). { mal_assert(pContext != NULL); @@ -14144,79 +14256,6 @@ mal_result mal_get_AudioObject_channel_count(mal_context* pContext, AudioObjectI return MAL_SUCCESS; } -mal_result mal_get_channel_map_from_AudioChannelLayout(AudioChannelLayout* pChannelLayout, mal_channel channelMap[MAL_MAX_CHANNELS]) -{ - mal_assert(pChannelLayout != NULL); - - if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelDescriptions) { - for (UInt32 iChannel = 0; iChannel < pChannelLayout->mNumberChannelDescriptions; ++iChannel) { - channelMap[iChannel] = mal_channel_from_AudioChannelLabel(pChannelLayout->mChannelDescriptions[iChannel].mChannelLabel); - } - } else -#if 0 - if (pChannelLayout->mChannelLayoutTag == kAudioChannelLayoutTag_UseChannelBitmap) { - // This is the same kind of system that's used by Windows audio APIs. - UInt32 iChannel = 0; - AudioChannelBitmap bitmap = pChannelLayout->mChannelBitmap; - for (UInt32 iBit = 0; iBit < 32; ++iBit) { - AudioChannelBitmap bit = bitmap & (1 << iBit); - if (bit != 0) { - channelMap[iChannel++] = mal_channel_from_AudioChannelBit(bit); - } - } - } else -#endif - { - // Need to use the tag to determine the channel map. For now I'm just assuming a default channel map, but later on this should - // be updated to determine the mapping based on the tag. - UInt32 channelCount = AudioChannelLayoutTag_GetNumberOfChannels(pChannelLayout->mChannelLayoutTag); - switch (pChannelLayout->mChannelLayoutTag) - { - case kAudioChannelLayoutTag_Mono: - case kAudioChannelLayoutTag_Stereo: - case kAudioChannelLayoutTag_StereoHeadphones: - case kAudioChannelLayoutTag_MatrixStereo: - case kAudioChannelLayoutTag_MidSide: - case kAudioChannelLayoutTag_XY: - case kAudioChannelLayoutTag_Binaural: - case kAudioChannelLayoutTag_Ambisonic_B_Format: - { - mal_get_standard_channel_map(mal_standard_channel_map_default, channelCount, channelMap); - } break; - - case kAudioChannelLayoutTag_Octagonal: - { - channelMap[7] = MAL_CHANNEL_SIDE_RIGHT; - channelMap[6] = MAL_CHANNEL_SIDE_LEFT; - } // Intentional fallthrough. - case kAudioChannelLayoutTag_Hexagonal: - { - channelMap[5] = MAL_CHANNEL_BACK_CENTER; - } // Intentional fallthrough. - case kAudioChannelLayoutTag_Pentagonal: - { - channelMap[4] = MAL_CHANNEL_FRONT_CENTER; - } // Intentional fallghrough. - case kAudioChannelLayoutTag_Quadraphonic: - { - channelMap[3] = MAL_CHANNEL_BACK_RIGHT; - channelMap[2] = MAL_CHANNEL_BACK_LEFT; - channelMap[1] = MAL_CHANNEL_RIGHT; - channelMap[0] = MAL_CHANNEL_LEFT; - } break; - - // TODO: Add support for more tags here. - - default: - { - mal_get_standard_channel_map(mal_standard_channel_map_default, channelCount, channelMap); - } break; - } - } - - return MAL_SUCCESS; -} - mal_result mal_get_AudioObject_channel_map(mal_context* pContext, AudioObjectID deviceObjectID, mal_device_type deviceType, mal_channel channelMap[MAL_MAX_CHANNELS]) { mal_assert(pContext != NULL); @@ -14229,9 +14268,11 @@ mal_result mal_get_AudioObject_channel_map(mal_context* pContext, AudioObjectID result = mal_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap); if (result != MAL_SUCCESS) { + mal_free(pChannelLayout); return result; } + mal_free(pChannelLayout); return result; } @@ -14649,7 +14690,46 @@ mal_result mal_find_best_format__coreaudio(mal_context* pContext, AudioObjectID } #endif +mal_result mal_get_AudioUnit_channel_map(mal_context* pContext, AudioUnit audioUnit, mal_device_type deviceType, mal_channel channelMap[MAL_MAX_CHANNELS]) +{ + mal_assert(pContext != NULL); + + AudioUnitScope deviceScope; + AudioUnitElement deviceBus; + if (deviceType == mal_device_type_playback) { + deviceScope = kAudioUnitScope_Output; + deviceBus = MAL_COREAUDIO_OUTPUT_BUS; + } else { + deviceScope = kAudioUnitScope_Input; + deviceBus = MAL_COREAUDIO_INPUT_BUS; + } + + UInt32 channelLayoutSize; + OSStatus status = ((mal_AudioUnitGetPropertyInfo_proc)pContext->coreaudio.AudioUnitGetPropertyInfo)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, &channelLayoutSize, NULL); + if (status != noErr) { + return mal_result_from_OSStatus(status); + } + + AudioChannelLayout* pChannelLayout = (AudioChannelLayout*)mal_malloc(channelLayoutSize); + if (pChannelLayout == NULL) { + return MAL_OUT_OF_MEMORY; + } + + status = ((mal_AudioUnitGetProperty_proc)pContext->coreaudio.AudioUnitGetProperty)(audioUnit, kAudioUnitProperty_AudioChannelLayout, deviceScope, deviceBus, pChannelLayout, &channelLayoutSize); + if (status != noErr) { + mal_free(pChannelLayout); + return mal_result_from_OSStatus(status); + } + + mal_result result = mal_get_channel_map_from_AudioChannelLayout(pChannelLayout, channelMap); + if (result != MAL_SUCCESS) { + mal_free(pChannelLayout); + return result; + } + mal_free(pChannelLayout); + return MAL_SUCCESS; +} mal_bool32 mal_context_is_device_id_equal__coreaudio(mal_context* pContext, const mal_device_id* pID0, const mal_device_id* pID1) { @@ -14900,27 +14980,62 @@ OSStatus mal_on_output__coreaudio(void* pUserData, AudioUnitRenderActionFlags* p #if defined(MAL_DEBUG_OUTPUT) printf("INFO: Output Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pBufferList->mNumberBuffers); #endif + + // We need to check whether or not we are outputting interleaved or non-interleaved samples. The + // way we do this is slightly different for each type. + mal_stream_layout layout = mal_stream_layout_interleaved; + if (pBufferList->mBuffers[0].mNumberChannels != pDevice->internalChannels) { + layout = mal_stream_layout_deinterleaved; + } - // For now we can assume everything is interleaved. - for (UInt32 iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) { - if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->internalChannels) { - mal_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / mal_get_bytes_per_frame(pDevice->internalFormat, pDevice->internalChannels); - if (frameCountForThisBuffer > 0) { - mal_device__read_frames_from_client(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData); - } - - #if defined(MAL_DEBUG_OUTPUT) - printf(" frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize); - #endif - } else { - // This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's - // not interleaved, in which case we can't handle right now since mini_al does not yet support non-interleaved streams. We just - // output silence here. - mal_zero_memory(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize); + if (layout == mal_stream_layout_interleaved) { + // For now we can assume everything is interleaved. + for (UInt32 iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; ++iBuffer) { + if (pBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->internalChannels) { + mal_uint32 frameCountForThisBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / mal_get_bytes_per_frame(pDevice->internalFormat, pDevice->internalChannels); + if (frameCountForThisBuffer > 0) { + mal_device__read_frames_from_client(pDevice, frameCountForThisBuffer, pBufferList->mBuffers[iBuffer].mData); + } + + #if defined(MAL_DEBUG_OUTPUT) + printf(" frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize); + #endif + } else { + // This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's + // not interleaved, in which case we can't handle right now since mini_al does not yet support non-interleaved streams. We just + // output silence here. + mal_zero_memory(pBufferList->mBuffers[iBuffer].mData, pBufferList->mBuffers[iBuffer].mDataByteSize); - #if defined(MAL_DEBUG_OUTPUT) - printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize); - #endif + #if defined(MAL_DEBUG_OUTPUT) + printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pBufferList->mBuffers[iBuffer].mNumberChannels, pBufferList->mBuffers[iBuffer].mDataByteSize); + #endif + } + } + } else { + // This is the deinterleaved case. We need to update each buffer in groups of internalChannels. This + // assumes each buffer is the same size. + mal_uint8 tempBuffer[4096]; + for (UInt32 iBuffer = 0; iBuffer < pBufferList->mNumberBuffers; iBuffer += pDevice->internalChannels) { + mal_uint32 frameCountPerBuffer = pBufferList->mBuffers[iBuffer].mDataByteSize / mal_get_bytes_per_sample(pDevice->internalFormat); + + mal_uint32 framesRemaining = frameCountPerBuffer; + while (framesRemaining > 0) { + mal_uint32 framesToRead = sizeof(tempBuffer) / mal_get_bytes_per_frame(pDevice->internalFormat, pDevice->internalChannels); + if (framesToRead > framesRemaining) { + framesToRead = framesRemaining; + } + + mal_device__read_frames_from_client(pDevice, framesToRead, tempBuffer); + + void* ppDeinterleavedBuffers[MAL_MAX_CHANNELS]; + for (mal_uint32 iChannel = 0; iChannel < pDevice->internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)mal_offset_ptr(pBufferList->mBuffers[iBuffer].mData, (frameCountPerBuffer - framesRemaining) * mal_get_bytes_per_sample(pDevice->internalFormat)); + } + + mal_deinterleave_pcm_frames(pDevice->internalFormat, pDevice->internalChannels, framesToRead, tempBuffer, ppDeinterleavedBuffers); + + framesRemaining -= framesToRead; + } } } @@ -14941,6 +15056,13 @@ OSStatus mal_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pA AudioBufferList* pRenderedBufferList = (AudioBufferList*)pDevice->coreaudio.pAudioBufferList; mal_assert(pRenderedBufferList); + // We need to check whether or not we are outputting interleaved or non-interleaved samples. The + // way we do this is slightly different for each type. + mal_stream_layout layout = mal_stream_layout_interleaved; + if (pRenderedBufferList->mBuffers[0].mNumberChannels != pDevice->internalChannels) { + layout = mal_stream_layout_deinterleaved; + } + #if defined(MAL_DEBUG_OUTPUT) printf("INFO: Input Callback: busNumber=%d, frameCount=%d, mNumberBuffers=%d\n", busNumber, frameCount, pRenderedBufferList->mNumberBuffers); #endif @@ -14953,16 +15075,58 @@ OSStatus mal_on_input__coreaudio(void* pUserData, AudioUnitRenderActionFlags* pA return status; } - // For now we can assume everything is interleaved. - for (UInt32 iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) { - if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->internalChannels) { - mal_device__send_frames_to_client(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData); - #if defined(MAL_DEBUG_OUTPUT) - printf(" mDataByteSize=%d\n", pRenderedBufferList->mBuffers[iBuffer].mDataByteSize); - #endif - } else { - // This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's - // not interleaved, in which case we can't handle right now since mini_al does not yet support non-interleaved streams. + if (layout == mal_stream_layout_interleaved) { + for (UInt32 iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; ++iBuffer) { + if (pRenderedBufferList->mBuffers[iBuffer].mNumberChannels == pDevice->internalChannels) { + mal_device__send_frames_to_client(pDevice, frameCount, pRenderedBufferList->mBuffers[iBuffer].mData); + #if defined(MAL_DEBUG_OUTPUT) + printf(" mDataByteSize=%d\n", pRenderedBufferList->mBuffers[iBuffer].mDataByteSize); + #endif + } else { + // This case is where the number of channels in the output buffer do not match our internal channels. It could mean that it's + // not interleaved, in which case we can't handle right now since mini_al does not yet support non-interleaved streams. + + mal_uint8 silentBuffer[4096]; + mal_zero_memory(silentBuffer, sizeof(silentBuffer)); + + mal_uint32 framesRemaining = frameCount; + while (framesRemaining > 0) { + mal_uint32 framesToSend = sizeof(silentBuffer) / mal_get_bytes_per_frame(pDevice->internalFormat, pDevice->internalChannels); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + mal_device__send_frames_to_client(pDevice, framesToSend, silentBuffer); + framesRemaining -= framesToSend; + } + + #if defined(MAL_DEBUG_OUTPUT) + printf(" WARNING: Outputting silence. frameCount=%d, mNumberChannels=%d, mDataByteSize=%d\n", frameCount, pRenderedBufferList->mBuffers[iBuffer].mNumberChannels, pRenderedBufferList->mBuffers[iBuffer].mDataByteSize); + #endif + } + } + } else { + // This is the deinterleaved case. We need to interleave the audio data before sending it to the client. This + // assumes each buffer is the same size. + mal_uint8 tempBuffer[4096]; + for (UInt32 iBuffer = 0; iBuffer < pRenderedBufferList->mNumberBuffers; iBuffer += pDevice->internalChannels) { + mal_uint32 framesRemaining = frameCount; + while (framesRemaining > 0) { + mal_uint32 framesToSend = sizeof(tempBuffer) / mal_get_bytes_per_sample(pDevice->internalFormat); + if (framesToSend > framesRemaining) { + framesToSend = framesRemaining; + } + + void* ppDeinterleavedBuffers[MAL_MAX_CHANNELS]; + for (mal_uint32 iChannel = 0; iChannel < pDevice->internalChannels; ++iChannel) { + ppDeinterleavedBuffers[iChannel] = (void*)mal_offset_ptr(pRenderedBufferList->mBuffers[iBuffer].mData, (frameCount - framesRemaining) * mal_get_bytes_per_sample(pDevice->internalFormat)); + } + + mal_interleave_pcm_frames(pDevice->internalFormat, pDevice->internalChannels, framesToSend, (const void**)ppDeinterleavedBuffers, tempBuffer); + mal_device__send_frames_to_client(pDevice, framesToSend, tempBuffer); + + framesRemaining -= framesToSend; + } } } @@ -14985,6 +15149,8 @@ void on_start_stop__coreaudio(void* pUserData, AudioUnit audioUnit, AudioUnitPro if (onStop) { onStop(pDevice); } + + mal_event_signal(&pDevice->coreaudio.stopEvent); } else { UInt32 isRunning; UInt32 isRunningSize = sizeof(isRunning); @@ -15274,11 +15440,24 @@ mal_result mal_device_init_internal__coreaudio(mal_context* pContext, mal_device } - // Internal channel map. + // Internal channel map. This is weird in my testing. If I use the AudioObject to get the + // channel map, the channel descriptions are set to "Unknown" for some reason. To work around + // this it looks like retrieving it from the AudioUnit will work. However, and this is where + // it gets weird, it doesn't seem to work with capture devices, nor at all on iOS... Therefore + // I'm going to fall back to a default assumption in these cases. #if defined(MAL_APPLE_DESKTOP) - result = mal_get_AudioObject_channel_map(pContext, deviceObjectID, deviceType, pData->channelMapOut); + result = mal_get_AudioUnit_channel_map(pContext, pData->audioUnit, deviceType, pData->channelMapOut); if (result != MAL_SUCCESS) { - return result; + #if 0 + // Try falling back to the channel map from the AudioObject. + result = mal_get_AudioObject_channel_map(pContext, deviceObjectID, deviceType, pData->channelMapOut); + if (result != MAL_SUCCESS) { + return result; + } + #else + // Fall back to default assumptions. + mal_get_standard_channel_map(mal_standard_channel_map_default, pData->channelsOut, pData->channelMapOut); + #endif } #else // TODO: Figure out how to get the channel map using AVAudioSession. @@ -15299,11 +15478,14 @@ mal_result mal_device_init_internal__coreaudio(mal_context* pContext, mal_device if (result != MAL_SUCCESS) { return result; } + + pData->bufferSizeInFramesOut = actualBufferSizeInFrames * pData->periodsOut; #else actualBufferSizeInFrames = 4096; + pData->bufferSizeInFramesOut = actualBufferSizeInFrames; #endif - pData->bufferSizeInFramesOut = actualBufferSizeInFrames * pData->periodsOut; + // During testing I discovered that the buffer size can be too big. You'll get an error like this: // @@ -15404,6 +15586,12 @@ mal_result mal_device_init_internal__coreaudio(mal_context* pContext, mal_device // Grab the name. #if defined(MAL_APPLE_DESKTOP) mal_get_AudioObject_name(pContext, deviceObjectID, sizeof(pData->deviceName), pData->deviceName); +#else + if (deviceType == mal_device_type_playback) { + mal_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MAL_DEFAULT_PLAYBACK_DEVICE_NAME); + } else { + mal_strcpy_s(pData->deviceName, sizeof(pData->deviceName), MAL_DEFAULT_CAPTURE_DEVICE_NAME); + } #endif return result; @@ -15482,7 +15670,7 @@ mal_result mal_device_init__coreaudio(mal_context* pContext, mal_device_type dev data.usingDefaultChannelMap = pDevice->usingDefaultChannelMap; data.shareMode = pDevice->initConfig.shareMode; - mal_result result = mal_device_init_internal__coreaudio(pDevice->pContext, pDevice->type, NULL, &data, (void*)pDevice); + mal_result result = mal_device_init_internal__coreaudio(pDevice->pContext, pDevice->type, pDeviceID, &data, (void*)pDevice); if (result != MAL_SUCCESS) { return result; } @@ -15520,6 +15708,12 @@ mal_result mal_device_init__coreaudio(mal_context* pContext, mal_device_type dev ((mal_AudioObjectAddPropertyListener_proc)pDevice->pContext->coreaudio.AudioObjectAddPropertyListener)(kAudioObjectSystemObject, &propAddress, &mal_default_output_device_changed__coreaudio, pDevice); #endif + /* + When stopping the device, a callback is called on another thread. We need to wait for this callback + before returning from mal_device_stop(). This event is used for this. + */ + mal_event_init(pContext, &pDevice->coreaudio.stopEvent); + return MAL_SUCCESS; } @@ -15545,6 +15739,8 @@ mal_result mal_device__stop_backend__coreaudio(mal_device* pDevice) return mal_result_from_OSStatus(status); } + /* We need to wait for the callback to finish before returning. */ + mal_event_wait(&pDevice->coreaudio.stopEvent); return MAL_SUCCESS; } @@ -15567,6 +15763,22 @@ mal_result mal_context_uninit__coreaudio(mal_context* pContext) mal_result mal_context_init__coreaudio(mal_context* pContext) { mal_assert(pContext != NULL); + +#if defined(MAL_APPLE_MOBILE) + @autoreleasepool { + AVAudioSession* pAudioSession = [AVAudioSession sharedInstance]; + mal_assert(pAudioSession != NULL); + + [pAudioSession setCategory: AVAudioSessionCategoryPlayAndRecord error:nil]; + + // By default we want mini_al to use the speakers instead of the receiver. In the future this may + // be customizable. + mal_bool32 useSpeakers = MAL_TRUE; + if (useSpeakers) { + [pAudioSession overrideOutputAudioPort:AVAudioSessionPortOverrideSpeaker error:nil]; + } + } +#endif #if !defined(MAL_NO_RUNTIME_LINKING) && !defined(MAL_APPLE_MOBILE) pContext->coreaudio.hCoreFoundation = mal_dlopen("CoreFoundation.framework/CoreFoundation"); @@ -15617,6 +15829,7 @@ mal_result mal_context_init__coreaudio(mal_context* pContext) pContext->coreaudio.AudioOutputUnitStart = mal_dlsym(pContext->coreaudio.hAudioUnit, "AudioOutputUnitStart"); pContext->coreaudio.AudioOutputUnitStop = mal_dlsym(pContext->coreaudio.hAudioUnit, "AudioOutputUnitStop"); pContext->coreaudio.AudioUnitAddPropertyListener = mal_dlsym(pContext->coreaudio.hAudioUnit, "AudioUnitAddPropertyListener"); + pContext->coreaudio.AudioUnitGetPropertyInfo = mal_dlsym(pContext->coreaudio.hAudioUnit, "AudioUnitGetPropertyInfo"); pContext->coreaudio.AudioUnitGetProperty = mal_dlsym(pContext->coreaudio.hAudioUnit, "AudioUnitGetProperty"); pContext->coreaudio.AudioUnitSetProperty = mal_dlsym(pContext->coreaudio.hAudioUnit, "AudioUnitSetProperty"); pContext->coreaudio.AudioUnitInitialize = mal_dlsym(pContext->coreaudio.hAudioUnit, "AudioUnitInitialize"); @@ -15637,6 +15850,7 @@ mal_result mal_context_init__coreaudio(mal_context* pContext) pContext->coreaudio.AudioOutputUnitStart = (mal_proc)AudioOutputUnitStart; pContext->coreaudio.AudioOutputUnitStop = (mal_proc)AudioOutputUnitStop; pContext->coreaudio.AudioUnitAddPropertyListener = (mal_proc)AudioUnitAddPropertyListener; + pContext->coreaudio.AudioUnitGetPropertyInfo = (mal_proc)AudioUnitGetPropertyInfo; pContext->coreaudio.AudioUnitGetProperty = (mal_proc)AudioUnitGetProperty; pContext->coreaudio.AudioUnitSetProperty = (mal_proc)AudioUnitSetProperty; pContext->coreaudio.AudioUnitInitialize = (mal_proc)AudioUnitInitialize; @@ -18036,10 +18250,16 @@ void mal_device_uninit__opensl(mal_device* pDevice) // Uninit device. if (pDevice->type == mal_device_type_playback) { - if (pDevice->opensl.pAudioPlayerObj) MAL_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj); - if (pDevice->opensl.pOutputMixObj) MAL_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj); + if (pDevice->opensl.pAudioPlayerObj) { + MAL_OPENSL_OBJ(pDevice->opensl.pAudioPlayerObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioPlayerObj); + } + if (pDevice->opensl.pOutputMixObj) { + MAL_OPENSL_OBJ(pDevice->opensl.pOutputMixObj)->Destroy((SLObjectItf)pDevice->opensl.pOutputMixObj); + } } else { - if (pDevice->opensl.pAudioRecorderObj) MAL_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj); + if (pDevice->opensl.pAudioRecorderObj) { + MAL_OPENSL_OBJ(pDevice->opensl.pAudioRecorderObj)->Destroy((SLObjectItf)pDevice->opensl.pAudioRecorderObj); + } } mal_free(pDevice->opensl.pBuffer); @@ -18369,7 +18589,6 @@ mal_result mal_device__stop_backend__opensl(mal_device* pDevice) MAL_OPENSL_BUFFERQUEUE(pDevice->opensl.pBufferQueue)->Clear((SLAndroidSimpleBufferQueueItf)pDevice->opensl.pBufferQueue); // Make sure the client is aware that the device has stopped. There may be an OpenSL|ES callback for this, but I haven't found it. - mal_device__set_state(pDevice, MAL_STATE_STOPPED); mal_stop_proc onStop = pDevice->onStop; if (onStop) { onStop(pDevice); @@ -19416,8 +19635,6 @@ mal_result mal_context_init__openal(mal_context* pContext) /////////////////////////////////////////////////////////////////////////////// #ifdef MAL_HAS_SDL -//#define MAL_USE_SDL_1 - #define MAL_SDL_INIT_AUDIO 0x00000010 #define MAL_AUDIO_U8 0x0008 #define MAL_AUDIO_S16 0x8010 @@ -19434,11 +19651,6 @@ mal_result mal_context_init__openal(mal_context* pContext) #define SDL_MAIN_HANDLED #ifdef MAL_EMSCRIPTEN #include - - // For now just use SDL 1.2 with Emscripten. This avoids the need for "-s USE_SDL=2" at compile time. - #ifndef MAL_USE_SDL_1 - #define MAL_USE_SDL_1 - #endif #else #include #endif @@ -19470,11 +19682,8 @@ typedef int (* MAL_PFN_SDL_InitSubSystem)(mal_uint32 flags); typedef void (* MAL_PFN_SDL_QuitSubSystem)(mal_uint32 flags); typedef int (* MAL_PFN_SDL_GetNumAudioDevices)(int iscapture); typedef const char* (* MAL_PFN_SDL_GetAudioDeviceName)(int index, int iscapture); -typedef void (* MAL_PFN_SDL_CloseAudio)(void); typedef void (* MAL_PFN_SDL_CloseAudioDevice)(MAL_SDL_AudioDeviceID dev); -typedef int (* MAL_PFN_SDL_OpenAudio)(MAL_SDL_AudioSpec* desired, MAL_SDL_AudioSpec* obtained); typedef MAL_SDL_AudioDeviceID (* MAL_PFN_SDL_OpenAudioDevice)(const char* device, int iscapture, const MAL_SDL_AudioSpec* desired, MAL_SDL_AudioSpec* obtained, int allowed_changes); -typedef void (* MAL_PFN_SDL_PauseAudio)(int pause_on); typedef void (* MAL_PFN_SDL_PauseAudioDevice)(MAL_SDL_AudioDeviceID dev, int pause_on); MAL_SDL_AudioFormat mal_format_to_sdl(mal_format format) @@ -19517,68 +19726,42 @@ mal_result mal_context_enumerate_devices__sdl(mal_context* pContext, mal_enum_de mal_assert(pContext != NULL); mal_assert(callback != NULL); -#ifndef MAL_USE_SDL_1 - if (!pContext->sdl.usingSDL1) { - mal_bool32 isTerminated = MAL_FALSE; - - // Playback - if (!isTerminated) { - int deviceCount = ((MAL_PFN_SDL_GetNumAudioDevices)pContext->sdl.SDL_GetNumAudioDevices)(0); - for (int i = 0; i < deviceCount; ++i) { - mal_device_info deviceInfo; - mal_zero_object(&deviceInfo); - - deviceInfo.id.sdl = i; - mal_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ((MAL_PFN_SDL_GetAudioDeviceName)pContext->sdl.SDL_GetAudioDeviceName)(i, 0), (size_t)-1); - - mal_bool32 cbResult = callback(pContext, mal_device_type_playback, &deviceInfo, pUserData); - if (cbResult == MAL_FALSE) { - isTerminated = MAL_TRUE; - break; - } - } - } + mal_bool32 isTerminated = MAL_FALSE; - // Capture - if (!isTerminated) { - int deviceCount = ((MAL_PFN_SDL_GetNumAudioDevices)pContext->sdl.SDL_GetNumAudioDevices)(1); - for (int i = 0; i < deviceCount; ++i) { - mal_device_info deviceInfo; - mal_zero_object(&deviceInfo); + // Playback + if (!isTerminated) { + int deviceCount = ((MAL_PFN_SDL_GetNumAudioDevices)pContext->sdl.SDL_GetNumAudioDevices)(0); + for (int i = 0; i < deviceCount; ++i) { + mal_device_info deviceInfo; + mal_zero_object(&deviceInfo); - deviceInfo.id.sdl = i; - mal_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ((MAL_PFN_SDL_GetAudioDeviceName)pContext->sdl.SDL_GetAudioDeviceName)(i, 1), (size_t)-1); + deviceInfo.id.sdl = i; + mal_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ((MAL_PFN_SDL_GetAudioDeviceName)pContext->sdl.SDL_GetAudioDeviceName)(i, 0), (size_t)-1); - mal_bool32 cbResult = callback(pContext, mal_device_type_capture, &deviceInfo, pUserData); - if (cbResult == MAL_FALSE) { - isTerminated = MAL_TRUE; - break; - } + mal_bool32 cbResult = callback(pContext, mal_device_type_playback, &deviceInfo, pUserData); + if (cbResult == MAL_FALSE) { + isTerminated = MAL_TRUE; + break; } } - } else -#endif - { - // SDL1 only uses default devices, and does not support capture. - mal_bool32 cbResult = MAL_TRUE; + } - // Playback. - if (cbResult) { + // Capture + if (!isTerminated) { + int deviceCount = ((MAL_PFN_SDL_GetNumAudioDevices)pContext->sdl.SDL_GetNumAudioDevices)(1); + for (int i = 0; i < deviceCount; ++i) { mal_device_info deviceInfo; mal_zero_object(&deviceInfo); - mal_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MAL_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); - cbResult = callback(pContext, mal_device_type_playback, &deviceInfo, pUserData); - } -#if 0 // No capture with SDL1. - // Capture. - if (cbResult) { - mal_device_info deviceInfo; - mal_zero_object(&deviceInfo); - mal_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), MAL_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); - cbResult = callback(pContext, mal_device_type_capture, &deviceInfo, pUserData); + deviceInfo.id.sdl = i; + mal_strncpy_s(deviceInfo.name, sizeof(deviceInfo.name), ((MAL_PFN_SDL_GetAudioDeviceName)pContext->sdl.SDL_GetAudioDeviceName)(i, 1), (size_t)-1); + + mal_bool32 cbResult = callback(pContext, mal_device_type_capture, &deviceInfo, pUserData); + if (cbResult == MAL_FALSE) { + isTerminated = MAL_TRUE; + break; + } } -#endif } return MAL_SUCCESS; @@ -19589,24 +19772,7 @@ mal_result mal_context_get_device_info__sdl(mal_context* pContext, mal_device_ty mal_assert(pContext != NULL); (void)shareMode; -#ifndef MAL_USE_SDL_1 - if (!pContext->sdl.usingSDL1) { - if (pDeviceID == NULL) { - if (deviceType == mal_device_type_playback) { - pDeviceInfo->id.sdl = 0; - mal_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MAL_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); - } else { - pDeviceInfo->id.sdl = 0; - mal_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MAL_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); - } - } else { - pDeviceInfo->id.sdl = pDeviceID->sdl; - mal_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ((MAL_PFN_SDL_GetAudioDeviceName)pContext->sdl.SDL_GetAudioDeviceName)(pDeviceID->sdl, (deviceType == mal_device_type_playback) ? 0 : 1), (size_t)-1); - } - } else -#endif - { - // SDL1 uses default devices. + if (pDeviceID == NULL) { if (deviceType == mal_device_type_playback) { pDeviceInfo->id.sdl = 0; mal_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MAL_DEFAULT_PLAYBACK_DEVICE_NAME, (size_t)-1); @@ -19614,6 +19780,9 @@ mal_result mal_context_get_device_info__sdl(mal_context* pContext, mal_device_ty pDeviceInfo->id.sdl = 0; mal_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), MAL_DEFAULT_CAPTURE_DEVICE_NAME, (size_t)-1); } + } else { + pDeviceInfo->id.sdl = pDeviceID->sdl; + mal_strncpy_s(pDeviceInfo->name, sizeof(pDeviceInfo->name), ((MAL_PFN_SDL_GetAudioDeviceName)pContext->sdl.SDL_GetAudioDeviceName)(pDeviceID->sdl, (deviceType == mal_device_type_playback) ? 0 : 1), (size_t)-1); } // To get an accurate idea on the backend's native format we need to open the device. Not ideal, but it's the only way. An @@ -19637,40 +19806,20 @@ mal_result mal_context_get_device_info__sdl(mal_context* pContext, mal_device_ty MAL_SDL_AudioSpec desiredSpec, obtainedSpec; mal_zero_memory(&desiredSpec, sizeof(desiredSpec)); -#ifndef MAL_USE_SDL_1 - if (!pContext->sdl.usingSDL1) { - int isCapture = (deviceType == mal_device_type_playback) ? 0 : 1; + int isCapture = (deviceType == mal_device_type_playback) ? 0 : 1; - const char* pDeviceName = NULL; - if (pDeviceID != NULL) { - pDeviceName = ((MAL_PFN_SDL_GetAudioDeviceName)pContext->sdl.SDL_GetAudioDeviceName)(pDeviceID->sdl, isCapture); - } - - MAL_SDL_AudioDeviceID tempDeviceID = ((MAL_PFN_SDL_OpenAudioDevice)pContext->sdl.SDL_OpenAudioDevice)(pDeviceName, isCapture, &desiredSpec, &obtainedSpec, MAL_SDL_AUDIO_ALLOW_ANY_CHANGE); - if (tempDeviceID == 0) { - return mal_context_post_error(pContext, NULL, MAL_LOG_LEVEL_ERROR, "Failed to open SDL device.", MAL_FAILED_TO_OPEN_BACKEND_DEVICE); - } - - ((MAL_PFN_SDL_CloseAudioDevice)pContext->sdl.SDL_CloseAudioDevice)(tempDeviceID); - } else -#endif - { - // SDL1 uses default devices. - (void)pDeviceID; - - // SDL1 only supports playback as far as I can tell. - if (deviceType != mal_device_type_playback) { - return MAL_NO_DEVICE; - } - - MAL_SDL_AudioDeviceID tempDeviceID = ((MAL_PFN_SDL_OpenAudio)pContext->sdl.SDL_OpenAudio)(&desiredSpec, &obtainedSpec); - if (tempDeviceID != 0) { - return mal_context_post_error(pContext, NULL, MAL_LOG_LEVEL_ERROR, "Failed to open SDL device.", MAL_FAILED_TO_OPEN_BACKEND_DEVICE); - } + const char* pDeviceName = NULL; + if (pDeviceID != NULL) { + pDeviceName = ((MAL_PFN_SDL_GetAudioDeviceName)pContext->sdl.SDL_GetAudioDeviceName)(pDeviceID->sdl, isCapture); + } - ((MAL_PFN_SDL_CloseAudio)pContext->sdl.SDL_CloseAudio)(); + MAL_SDL_AudioDeviceID tempDeviceID = ((MAL_PFN_SDL_OpenAudioDevice)pContext->sdl.SDL_OpenAudioDevice)(pDeviceName, isCapture, &desiredSpec, &obtainedSpec, MAL_SDL_AUDIO_ALLOW_ANY_CHANGE); + if (tempDeviceID == 0) { + return mal_context_post_error(pContext, NULL, MAL_LOG_LEVEL_ERROR, "Failed to open SDL device.", MAL_FAILED_TO_OPEN_BACKEND_DEVICE); } + ((MAL_PFN_SDL_CloseAudioDevice)pContext->sdl.SDL_CloseAudioDevice)(tempDeviceID); + pDeviceInfo->minChannels = obtainedSpec.channels; pDeviceInfo->maxChannels = obtainedSpec.channels; pDeviceInfo->minSampleRate = obtainedSpec.freq; @@ -19697,14 +19846,7 @@ void mal_device_uninit__sdl(mal_device* pDevice) { mal_assert(pDevice != NULL); -#ifndef MAL_USE_SDL_1 - if (!pDevice->pContext->sdl.usingSDL1) { - ((MAL_PFN_SDL_CloseAudioDevice)pDevice->pContext->sdl.SDL_CloseAudioDevice)(pDevice->sdl.deviceID); - } else -#endif - { - ((MAL_PFN_SDL_CloseAudio)pDevice->pContext->sdl.SDL_CloseAudio)(); - } + ((MAL_PFN_SDL_CloseAudioDevice)pDevice->pContext->sdl.SDL_CloseAudioDevice)(pDevice->sdl.deviceID); } @@ -19713,7 +19855,11 @@ void mal_audio_callback__sdl(void* pUserData, mal_uint8* pBuffer, int bufferSize mal_device* pDevice = (mal_device*)pUserData; mal_assert(pDevice != NULL); - mal_uint32 bufferSizeInFrames = (mal_uint32)bufferSizeInBytes / mal_get_bytes_per_sample(pDevice->internalFormat) / pDevice->internalChannels; + mal_uint32 bufferSizeInFrames = (mal_uint32)bufferSizeInBytes / mal_get_bytes_per_frame(pDevice->internalFormat, pDevice->internalChannels); + +#ifdef MAL_DEBUG_OUTPUT + printf("[SDL] Callback: bufferSizeInBytes=%d, bufferSizeInFrames=%d\n", bufferSizeInBytes, bufferSizeInFrames); +#endif if (pDevice->type == mal_device_type_playback) { mal_device__read_frames_from_client(pDevice, bufferSizeInFrames, pBuffer); @@ -19760,41 +19906,16 @@ mal_result mal_device_init__sdl(mal_context* pContext, mal_device_type type, con desiredSpec.format = MAL_AUDIO_F32; } -#ifndef MAL_USE_SDL_1 - if (!pDevice->pContext->sdl.usingSDL1) { - int isCapture = (type == mal_device_type_playback) ? 0 : 1; - - const char* pDeviceName = NULL; - if (pDeviceID != NULL) { - pDeviceName = ((MAL_PFN_SDL_GetAudioDeviceName)pDevice->pContext->sdl.SDL_GetAudioDeviceName)(pDeviceID->sdl, isCapture); - } - - pDevice->sdl.deviceID = ((MAL_PFN_SDL_OpenAudioDevice)pDevice->pContext->sdl.SDL_OpenAudioDevice)(pDeviceName, isCapture, &desiredSpec, &obtainedSpec, MAL_SDL_AUDIO_ALLOW_ANY_CHANGE); - if (pDevice->sdl.deviceID == 0) { - return mal_post_error(pDevice, MAL_LOG_LEVEL_ERROR, "Failed to open SDL2 device.", MAL_FAILED_TO_OPEN_BACKEND_DEVICE); - } - } else -#endif - { - // SDL1 uses default devices. - (void)pDeviceID; - - // SDL1 only supports playback as far as I can tell. - if (type != mal_device_type_playback) { - return MAL_NO_DEVICE; - } - - // SDL1 does not support floating point formats. - if (desiredSpec.format == MAL_AUDIO_F32) { - desiredSpec.format = MAL_AUDIO_S16; - } + int isCapture = (type == mal_device_type_playback) ? 0 : 1; - int deviceID = ((MAL_PFN_SDL_OpenAudio)pDevice->pContext->sdl.SDL_OpenAudio)(&desiredSpec, &obtainedSpec); - if (deviceID < 0) { - return mal_post_error(pDevice, MAL_LOG_LEVEL_ERROR, "Failed to open SDL1 device.", MAL_FAILED_TO_OPEN_BACKEND_DEVICE); - } + const char* pDeviceName = NULL; + if (pDeviceID != NULL) { + pDeviceName = ((MAL_PFN_SDL_GetAudioDeviceName)pDevice->pContext->sdl.SDL_GetAudioDeviceName)(pDeviceID->sdl, isCapture); + } - pDevice->sdl.deviceID = (mal_uint32)deviceID; + pDevice->sdl.deviceID = ((MAL_PFN_SDL_OpenAudioDevice)pDevice->pContext->sdl.SDL_OpenAudioDevice)(pDeviceName, isCapture, &desiredSpec, &obtainedSpec, MAL_SDL_AUDIO_ALLOW_ANY_CHANGE); + if (pDevice->sdl.deviceID == 0) { + return mal_post_error(pDevice, MAL_LOG_LEVEL_ERROR, "Failed to open SDL2 device.", MAL_FAILED_TO_OPEN_BACKEND_DEVICE); } pDevice->internalFormat = mal_format_from_sdl(obtainedSpec.format); @@ -19806,7 +19927,6 @@ mal_result mal_device_init__sdl(mal_context* pContext, mal_device_type type, con #ifdef MAL_DEBUG_OUTPUT printf("=== SDL CONFIG ===\n"); - printf("REQUESTED -> RECEIVED\n"); printf(" FORMAT: %s -> %s\n", mal_get_format_name(pConfig->format), mal_get_format_name(pDevice->internalFormat)); printf(" CHANNELS: %d -> %d\n", desiredSpec.channels, obtainedSpec.channels); printf(" SAMPLE RATE: %d -> %d\n", desiredSpec.freq, obtainedSpec.freq); @@ -19820,15 +19940,7 @@ mal_result mal_device__start_backend__sdl(mal_device* pDevice) { mal_assert(pDevice != NULL); -#ifndef MAL_USE_SDL_1 - if (!pDevice->pContext->sdl.usingSDL1) { - ((MAL_PFN_SDL_PauseAudioDevice)pDevice->pContext->sdl.SDL_PauseAudioDevice)(pDevice->sdl.deviceID, 0); - } else -#endif - { - ((MAL_PFN_SDL_PauseAudio)pDevice->pContext->sdl.SDL_PauseAudio)(0); - } - + ((MAL_PFN_SDL_PauseAudioDevice)pDevice->pContext->sdl.SDL_PauseAudioDevice)(pDevice->sdl.deviceID, 0); return MAL_SUCCESS; } @@ -19836,14 +19948,7 @@ mal_result mal_device__stop_backend__sdl(mal_device* pDevice) { mal_assert(pDevice != NULL); -#ifndef MAL_USE_SDL_1 - if (!pDevice->pContext->sdl.usingSDL1) { - ((MAL_PFN_SDL_PauseAudioDevice)pDevice->pContext->sdl.SDL_PauseAudioDevice)(pDevice->sdl.deviceID, 1); - } else -#endif - { - ((MAL_PFN_SDL_PauseAudio)pDevice->pContext->sdl.SDL_PauseAudio)(1); - } + ((MAL_PFN_SDL_PauseAudioDevice)pDevice->pContext->sdl.SDL_PauseAudioDevice)(pDevice->sdl.deviceID, 1); mal_device__set_state(pDevice, MAL_STATE_STOPPED); mal_stop_proc onStop = pDevice->onStop; @@ -19872,14 +19977,11 @@ mal_result mal_context_init__sdl(mal_context* pContext) // Run-time linking. const char* libNames[] = { #if defined(MAL_WIN32) - "SDL2.dll", - "SDL.dll" + "SDL2.dll" #elif defined(MAL_APPLE) - "SDL2.framework/SDL2", - "SDL.framework/SDL" + "SDL2.framework/SDL2" #else - "libSDL2-2.0.so.0", - "libSDL-1.2.so.0" + "libSDL2-2.0.so.0" #endif }; @@ -19896,41 +19998,21 @@ mal_result mal_context_init__sdl(mal_context* pContext) pContext->sdl.SDL_InitSubSystem = mal_dlsym(pContext->sdl.hSDL, "SDL_InitSubSystem"); pContext->sdl.SDL_QuitSubSystem = mal_dlsym(pContext->sdl.hSDL, "SDL_QuitSubSystem"); - pContext->sdl.SDL_CloseAudio = mal_dlsym(pContext->sdl.hSDL, "SDL_CloseAudio"); - pContext->sdl.SDL_OpenAudio = mal_dlsym(pContext->sdl.hSDL, "SDL_OpenAudio"); - pContext->sdl.SDL_PauseAudio = mal_dlsym(pContext->sdl.hSDL, "SDL_PauseAudio"); -#ifndef MAL_USE_SDL_1 pContext->sdl.SDL_GetNumAudioDevices = mal_dlsym(pContext->sdl.hSDL, "SDL_GetNumAudioDevices"); pContext->sdl.SDL_GetAudioDeviceName = mal_dlsym(pContext->sdl.hSDL, "SDL_GetAudioDeviceName"); pContext->sdl.SDL_CloseAudioDevice = mal_dlsym(pContext->sdl.hSDL, "SDL_CloseAudioDevice"); pContext->sdl.SDL_OpenAudioDevice = mal_dlsym(pContext->sdl.hSDL, "SDL_OpenAudioDevice"); pContext->sdl.SDL_PauseAudioDevice = mal_dlsym(pContext->sdl.hSDL, "SDL_PauseAudioDevice"); -#endif #else // Compile-time linking. pContext->sdl.SDL_InitSubSystem = (mal_proc)SDL_InitSubSystem; pContext->sdl.SDL_QuitSubSystem = (mal_proc)SDL_QuitSubSystem; - pContext->sdl.SDL_CloseAudio = (mal_proc)SDL_CloseAudio; - pContext->sdl.SDL_OpenAudio = (mal_proc)SDL_OpenAudio; - pContext->sdl.SDL_PauseAudio = (mal_proc)SDL_PauseAudio; -#ifndef MAL_USE_SDL_1 pContext->sdl.SDL_GetNumAudioDevices = (mal_proc)SDL_GetNumAudioDevices; pContext->sdl.SDL_GetAudioDeviceName = (mal_proc)SDL_GetAudioDeviceName; pContext->sdl.SDL_CloseAudioDevice = (mal_proc)SDL_CloseAudioDevice; pContext->sdl.SDL_OpenAudioDevice = (mal_proc)SDL_OpenAudioDevice; pContext->sdl.SDL_PauseAudioDevice = (mal_proc)SDL_PauseAudioDevice; #endif -#endif - - // We need to determine whether or not we are using SDL2 or SDL1. We can know this by looking at whether or not certain - // function pointers are NULL. - if (pContext->sdl.SDL_GetNumAudioDevices == NULL || - pContext->sdl.SDL_GetAudioDeviceName == NULL || - pContext->sdl.SDL_CloseAudioDevice == NULL || - pContext->sdl.SDL_OpenAudioDevice == NULL || - pContext->sdl.SDL_PauseAudioDevice == NULL) { - pContext->sdl.usingSDL1 = MAL_TRUE; - } int resultSDL = ((MAL_PFN_SDL_InitSubSystem)pContext->sdl.SDL_InitSubSystem)(MAL_SDL_INIT_AUDIO); if (resultSDL != 0) { @@ -20442,6 +20524,14 @@ mal_result mal_context_init(const mal_backend backends[], mal_uint32 backendCoun mal_context_post_error(pContext, NULL, MAL_LOG_LEVEL_WARNING, "Failed to initialize mutex for device info retrieval. mal_context_get_device_info() is not thread safe.", MAL_FAILED_TO_CREATE_MUTEX); } +#ifdef MAL_DEBUG_OUTPUT + printf("[mini_al] Endian: %s\n", mal_is_little_endian() ? "LE" : "BE"); + printf("[mini_al] SSE2: %s\n", mal_has_sse2() ? "YES" : "NO"); + printf("[mini_al] AVX2: %s\n", mal_has_avx2() ? "YES" : "NO"); + printf("[mini_al] AVX512F: %s\n", mal_has_avx512f() ? "YES" : "NO"); + printf("[mini_al] NEON: %s\n", mal_has_neon() ? "YES" : "NO"); +#endif + pContext->backend = backend; return result; } @@ -20780,10 +20870,17 @@ mal_result mal_device_init(mal_context* pContext, mal_device_type type, mal_devi #ifdef MAL_DEBUG_OUTPUT - printf("[WASAPI] %s (%s)\n", pDevice->name, (pDevice->type == mal_device_type_playback) ? "Playback" : "Capture"); + printf("[%s] %s (%s)\n", mal_get_backend_name(pDevice->pContext->backend), pDevice->name, (pDevice->type == mal_device_type_playback) ? "Playback" : "Capture"); printf(" Format: %s -> %s\n", mal_get_format_name(pDevice->format), mal_get_format_name(pDevice->internalFormat)); printf(" Channels: %d -> %d\n", pDevice->channels, pDevice->internalChannels); printf(" Sample Rate: %d -> %d\n", pDevice->sampleRate, pDevice->internalSampleRate); + printf(" Conversion:\n"); + printf(" Pre Format Conversion: %s\n", pDevice->dsp.isPreFormatConversionRequired ? "YES" : "NO"); + printf(" Post Format Conversion: %s\n", pDevice->dsp.isPostFormatConversionRequired ? "YES" : "NO"); + printf(" Channel Routing: %s\n", pDevice->dsp.isChannelRoutingRequired ? "YES" : "NO"); + printf(" SRC: %s\n", pDevice->dsp.isSRCRequired ? "YES" : "NO"); + printf(" Channel Routing at Start: %s\n", pDevice->dsp.isChannelRoutingAtStart ? "YES" : "NO"); + printf(" Passthrough: %s\n", pDevice->dsp.isPassthrough ? "YES" : "NO"); #endif @@ -20963,6 +21060,7 @@ mal_result mal_device_stop(mal_device* pDevice) // Asynchronous backends need to be handled differently. if (mal_context_is_backend_asynchronous(pDevice->pContext)) { result = pDevice->pContext->onDeviceStop(pDevice); + mal_device__set_state(pDevice, MAL_STATE_STOPPED); } else { // Synchronous backends. @@ -24598,8 +24696,8 @@ float g_malChannelPlaneRatios[MAL_CHANNEL_POSITION_COUNT][6] = { { 0.0f, 0.5f, 0.5f, 0.0f, 0.0f, 0.0f}, // MAL_CHANNEL_FRONT_RIGHT { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}, // MAL_CHANNEL_FRONT_CENTER { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, // MAL_CHANNEL_LFE - { 0.5f, 0.0f, 0.0f, 0.0f, 0.5f, 0.0f}, // MAL_CHANNEL_BACK_LEFT - { 0.0f, 0.5f, 0.0f, 0.0f, 0.5f, 0.0f}, // MAL_CHANNEL_BACK_RIGHT + { 0.5f, 0.0f, 0.0f, 0.5f, 0.0f, 0.0f}, // MAL_CHANNEL_BACK_LEFT + { 0.0f, 0.5f, 0.0f, 0.5f, 0.0f, 0.0f}, // MAL_CHANNEL_BACK_RIGHT { 0.25f, 0.0f, 0.75f, 0.0f, 0.0f, 0.0f}, // MAL_CHANNEL_FRONT_LEFT_CENTER { 0.0f, 0.25f, 0.75f, 0.0f, 0.0f, 0.0f}, // MAL_CHANNEL_FRONT_RIGHT_CENTER { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}, // MAL_CHANNEL_BACK_CENTER @@ -24810,7 +24908,7 @@ mal_result mal_channel_router_init(const mal_channel_router_config* pConfig, mal mal_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; if (channelPosIn == channelPosOut) { - pRouter->weights[iChannelIn][iChannelOut] = 1; + pRouter->config.weights[iChannelIn][iChannelOut] = 1; } } } @@ -24825,7 +24923,7 @@ mal_result mal_channel_router_init(const mal_channel_router_config* pConfig, mal mal_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; if (channelPosOut != MAL_CHANNEL_NONE && channelPosOut != MAL_CHANNEL_MONO && channelPosOut != MAL_CHANNEL_LFE) { - pRouter->weights[iChannelIn][iChannelOut] = 1; + pRouter->config.weights[iChannelIn][iChannelOut] = 1; } } } @@ -24853,7 +24951,7 @@ mal_result mal_channel_router_init(const mal_channel_router_config* pConfig, mal mal_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; if (channelPosIn != MAL_CHANNEL_NONE && channelPosIn != MAL_CHANNEL_MONO && channelPosIn != MAL_CHANNEL_LFE) { - pRouter->weights[iChannelIn][iChannelOut] += monoWeight; + pRouter->config.weights[iChannelIn][iChannelOut] += monoWeight; } } } @@ -24863,56 +24961,67 @@ mal_result mal_channel_router_init(const mal_channel_router_config* pConfig, mal // Input and output channels that are not present on the other side need to be blended in based on spatial locality. - if (pRouter->config.mixingMode != mal_channel_mix_mode_simple) { - // Unmapped input channels. - for (mal_uint32 iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - mal_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; - - if (mal_channel_router__is_spatial_channel_position(pRouter, channelPosIn)) { - if (!mal_channel_map_contains_channel_position(pRouter->config.channelsOut, pRouter->config.channelMapOut, channelPosIn)) { - for (mal_uint32 iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - mal_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; - - if (mal_channel_router__is_spatial_channel_position(pRouter, channelPosOut)) { - float weight = 0; - if (pRouter->config.mixingMode == mal_channel_mix_mode_planar_blend) { - weight = mal_channel_router__calculate_input_channel_planar_weight(pRouter, channelPosIn, channelPosOut); - } - - // Only apply the weight if we haven't already got some contribution from the respective channels. - if (pRouter->weights[iChannelIn][iChannelOut] == 0) { - pRouter->weights[iChannelIn][iChannelOut] = weight; + switch (pRouter->config.mixingMode) + { + case mal_channel_mix_mode_rectangular: + { + // Unmapped input channels. + for (mal_uint32 iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { + mal_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; + + if (mal_channel_router__is_spatial_channel_position(pRouter, channelPosIn)) { + if (!mal_channel_map_contains_channel_position(pRouter->config.channelsOut, pRouter->config.channelMapOut, channelPosIn)) { + for (mal_uint32 iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { + mal_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; + + if (mal_channel_router__is_spatial_channel_position(pRouter, channelPosOut)) { + float weight = 0; + if (pRouter->config.mixingMode == mal_channel_mix_mode_planar_blend) { + weight = mal_channel_router__calculate_input_channel_planar_weight(pRouter, channelPosIn, channelPosOut); + } + + // Only apply the weight if we haven't already got some contribution from the respective channels. + if (pRouter->config.weights[iChannelIn][iChannelOut] == 0) { + pRouter->config.weights[iChannelIn][iChannelOut] = weight; + } } } } } } - } - - // Unmapped output channels. - for (mal_uint32 iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { - mal_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; - if (mal_channel_router__is_spatial_channel_position(pRouter, channelPosOut)) { - if (!mal_channel_map_contains_channel_position(pRouter->config.channelsIn, pRouter->config.channelMapIn, channelPosOut)) { - for (mal_uint32 iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { - mal_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; - - if (mal_channel_router__is_spatial_channel_position(pRouter, channelPosIn)) { - float weight = 0; - if (pRouter->config.mixingMode == mal_channel_mix_mode_planar_blend) { - weight = mal_channel_router__calculate_input_channel_planar_weight(pRouter, channelPosIn, channelPosOut); - } + // Unmapped output channels. + for (mal_uint32 iChannelOut = 0; iChannelOut < pRouter->config.channelsOut; ++iChannelOut) { + mal_channel channelPosOut = pRouter->config.channelMapOut[iChannelOut]; - // Only apply the weight if we haven't already got some contribution from the respective channels. - if (pRouter->weights[iChannelIn][iChannelOut] == 0) { - pRouter->weights[iChannelIn][iChannelOut] = weight; + if (mal_channel_router__is_spatial_channel_position(pRouter, channelPosOut)) { + if (!mal_channel_map_contains_channel_position(pRouter->config.channelsIn, pRouter->config.channelMapIn, channelPosOut)) { + for (mal_uint32 iChannelIn = 0; iChannelIn < pRouter->config.channelsIn; ++iChannelIn) { + mal_channel channelPosIn = pRouter->config.channelMapIn[iChannelIn]; + + if (mal_channel_router__is_spatial_channel_position(pRouter, channelPosIn)) { + float weight = 0; + if (pRouter->config.mixingMode == mal_channel_mix_mode_planar_blend) { + weight = mal_channel_router__calculate_input_channel_planar_weight(pRouter, channelPosIn, channelPosOut); + } + + // Only apply the weight if we haven't already got some contribution from the respective channels. + if (pRouter->config.weights[iChannelIn][iChannelOut] == 0) { + pRouter->config.weights[iChannelIn][iChannelOut] = weight; + } } } } } } - } + } break; + + case mal_channel_mix_mode_custom_weights: + case mal_channel_mix_mode_simple: + default: + { + /* Fallthrough. */ + } break; } return MAL_SUCCESS; @@ -24964,7 +25073,7 @@ void mal_channel_router__do_routing(mal_channel_router* pRouter, mal_uint64 fram mal_uint64 iFrame = 0; #if defined(MAL_SUPPORT_NEON) if (mal_channel_router__can_use_neon(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) { - float32x4_t weight = vmovq_n_f32(pRouter->weights[iChannelIn][iChannelOut]); + float32x4_t weight = vmovq_n_f32(pRouter->config.weights[iChannelIn][iChannelOut]); mal_uint64 frameCount4 = frameCount/4; for (mal_uint64 iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) { @@ -24979,7 +25088,7 @@ void mal_channel_router__do_routing(mal_channel_router* pRouter, mal_uint64 fram #endif #if defined(MAL_SUPPORT_AVX512) if (mal_channel_router__can_use_avx512(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) { - __m512 weight = _mm512_set1_ps(pRouter->weights[iChannelIn][iChannelOut]); + __m512 weight = _mm512_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]); mal_uint64 frameCount16 = frameCount/16; for (mal_uint64 iFrame16 = 0; iFrame16 < frameCount16; iFrame16 += 1) { @@ -24994,7 +25103,7 @@ void mal_channel_router__do_routing(mal_channel_router* pRouter, mal_uint64 fram #endif #if defined(MAL_SUPPORT_AVX2) if (mal_channel_router__can_use_avx2(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) { - __m256 weight = _mm256_set1_ps(pRouter->weights[iChannelIn][iChannelOut]); + __m256 weight = _mm256_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]); mal_uint64 frameCount8 = frameCount/8; for (mal_uint64 iFrame8 = 0; iFrame8 < frameCount8; iFrame8 += 1) { @@ -25009,7 +25118,7 @@ void mal_channel_router__do_routing(mal_channel_router* pRouter, mal_uint64 fram #endif #if defined(MAL_SUPPORT_SSE2) if (mal_channel_router__can_use_sse2(pRouter, ppSamplesOut[iChannelOut], ppSamplesIn[iChannelIn])) { - __m128 weight = _mm_set1_ps(pRouter->weights[iChannelIn][iChannelOut]); + __m128 weight = _mm_set1_ps(pRouter->config.weights[iChannelIn][iChannelOut]); mal_uint64 frameCount4 = frameCount/4; for (mal_uint64 iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) { @@ -25022,10 +25131,10 @@ void mal_channel_router__do_routing(mal_channel_router* pRouter, mal_uint64 fram } else #endif { // Reference. - float weight0 = pRouter->weights[iChannelIn][iChannelOut]; - float weight1 = pRouter->weights[iChannelIn][iChannelOut]; - float weight2 = pRouter->weights[iChannelIn][iChannelOut]; - float weight3 = pRouter->weights[iChannelIn][iChannelOut]; + float weight0 = pRouter->config.weights[iChannelIn][iChannelOut]; + float weight1 = pRouter->config.weights[iChannelIn][iChannelOut]; + float weight2 = pRouter->config.weights[iChannelIn][iChannelOut]; + float weight3 = pRouter->config.weights[iChannelIn][iChannelOut]; mal_uint64 frameCount4 = frameCount/4; for (mal_uint64 iFrame4 = 0; iFrame4 < frameCount4; iFrame4 += 1) { @@ -25039,7 +25148,7 @@ void mal_channel_router__do_routing(mal_channel_router* pRouter, mal_uint64 fram // Leftover. for (; iFrame < frameCount; ++iFrame) { - ppSamplesOut[iChannelOut][iFrame] += ppSamplesIn[iChannelIn][iFrame] * pRouter->weights[iChannelIn][iChannelOut]; + ppSamplesOut[iChannelOut][iFrame] += ppSamplesIn[iChannelIn][iFrame] * pRouter->config.weights[iChannelIn][iChannelOut]; } } } @@ -25324,9 +25433,6 @@ mal_uint64 mal_src_read_deinterleaved(mal_src* pSRC, mal_uint64 frameCount, void } mal_src_algorithm algorithm = pSRC->config.algorithm; - if (pSRC->config.sampleRateIn == pSRC->config.sampleRateOut) { - //algorithm = mal_src_algorithm_none; - } // Can use a function pointer for this. switch (algorithm) { @@ -26151,6 +26257,92 @@ void mal_pcm_convert(void* pOut, mal_format formatOut, const void* pIn, mal_form } } +void mal_deinterleave_pcm_frames(mal_format format, mal_uint32 channels, mal_uint64 frameCount, const void* pInterleavedPCMFrames, void** ppDeinterleavedPCMFrames) +{ + if (pInterleavedPCMFrames == NULL || ppDeinterleavedPCMFrames == NULL) { + return; // Invalid args. + } + + // For efficiency we do this per format. + switch (format) { + case mal_format_s16: + { + const mal_int16* pSrcS16 = (const mal_int16*)pInterleavedPCMFrames; + for (mal_uint64 iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + for (mal_uint32 iChannel = 0; iChannel < channels; ++iChannel) { + mal_int16* pDstS16 = (mal_int16*)ppDeinterleavedPCMFrames[iChannel]; + pDstS16[iPCMFrame] = pSrcS16[iPCMFrame*channels+iChannel]; + } + } + } break; + + case mal_format_f32: + { + const float* pSrcF32 = (const float*)pInterleavedPCMFrames; + for (mal_uint64 iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + for (mal_uint32 iChannel = 0; iChannel < channels; ++iChannel) { + float* pDstF32 = (float*)ppDeinterleavedPCMFrames[iChannel]; + pDstF32[iPCMFrame] = pSrcF32[iPCMFrame*channels+iChannel]; + } + } + } break; + + default: + { + mal_uint32 sampleSizeInBytes = mal_get_bytes_per_sample(format); + + for (mal_uint64 iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + for (mal_uint32 iChannel = 0; iChannel < channels; ++iChannel) { + void* pDst = mal_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes); + const void* pSrc = mal_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes); + memcpy(pDst, pSrc, sampleSizeInBytes); + } + } + } break; + } +} + +void mal_interleave_pcm_frames(mal_format format, mal_uint32 channels, mal_uint64 frameCount, const void** ppDeinterleavedPCMFrames, void* pInterleavedPCMFrames) +{ + switch (format) + { + case mal_format_s16: + { + mal_int16* pDstS16 = (mal_int16*)pInterleavedPCMFrames; + for (mal_uint64 iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + for (mal_uint32 iChannel = 0; iChannel < channels; ++iChannel) { + const mal_int16* pSrcS16 = (const mal_int16*)ppDeinterleavedPCMFrames[iChannel]; + pDstS16[iPCMFrame*channels+iChannel] = pSrcS16[iPCMFrame]; + } + } + } break; + + case mal_format_f32: + { + float* pDstF32 = (float*)pInterleavedPCMFrames; + for (mal_uint64 iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + for (mal_uint32 iChannel = 0; iChannel < channels; ++iChannel) { + const float* pSrcF32 = (const float*)ppDeinterleavedPCMFrames[iChannel]; + pDstF32[iPCMFrame*channels+iChannel] = pSrcF32[iPCMFrame]; + } + } + } break; + + default: + { + mal_uint32 sampleSizeInBytes = mal_get_bytes_per_sample(format); + + for (mal_uint64 iPCMFrame = 0; iPCMFrame < frameCount; ++iPCMFrame) { + for (mal_uint32 iChannel = 0; iChannel < channels; ++iChannel) { + void* pDst = mal_offset_ptr(pInterleavedPCMFrames, (iPCMFrame*channels+iChannel)*sampleSizeInBytes); + const void* pSrc = mal_offset_ptr(ppDeinterleavedPCMFrames[iChannel], iPCMFrame*sampleSizeInBytes); + memcpy(pDst, pSrc, sampleSizeInBytes); + } + } + } break; + } +} + typedef struct @@ -26911,7 +27103,7 @@ mal_result mal_decoder_internal_on_seek_to_frame__wav(mal_decoder* pDecoder, mal drwav* pWav = (drwav*)pDecoder->pInternalDecoder; mal_assert(pWav != NULL); - drwav_bool32 result = drwav_seek_to_sample(pWav, frameIndex*pWav->channels); + drwav_bool32 result = drwav_seek_to_pcm_frame(pWav, frameIndex); if (result) { return MAL_SUCCESS; } else { @@ -26936,9 +27128,9 @@ mal_uint32 mal_decoder_internal_on_read_frames__wav(mal_dsp* pDSP, mal_uint32 fr mal_assert(pWav != NULL); switch (pDecoder->internalFormat) { - case mal_format_s16: return (mal_uint32)drwav_read_s16(pWav, frameCount*pDecoder->internalChannels, (drwav_int16*)pSamplesOut) / pDecoder->internalChannels; - case mal_format_s32: return (mal_uint32)drwav_read_s32(pWav, frameCount*pDecoder->internalChannels, (drwav_int32*)pSamplesOut) / pDecoder->internalChannels; - case mal_format_f32: return (mal_uint32)drwav_read_f32(pWav, frameCount*pDecoder->internalChannels, (float*)pSamplesOut) / pDecoder->internalChannels; + case mal_format_s16: return (mal_uint32)drwav_read_pcm_frames_s16(pWav, frameCount, (drwav_int16*)pSamplesOut); + case mal_format_s32: return (mal_uint32)drwav_read_pcm_frames_s32(pWav, frameCount, (drwav_int32*)pSamplesOut); + case mal_format_f32: return (mal_uint32)drwav_read_pcm_frames_f32(pWav, frameCount, (float*)pSamplesOut); default: break; } @@ -27038,7 +27230,7 @@ mal_result mal_decoder_internal_on_seek_to_frame__flac(mal_decoder* pDecoder, ma drflac* pFlac = (drflac*)pDecoder->pInternalDecoder; mal_assert(pFlac != NULL); - drflac_bool32 result = drflac_seek_to_sample(pFlac, frameIndex*pFlac->channels); + drflac_bool32 result = drflac_seek_to_pcm_frame(pFlac, frameIndex); if (result) { return MAL_SUCCESS; } else { @@ -27063,7 +27255,7 @@ mal_uint32 mal_decoder_internal_on_read_frames__flac(mal_dsp* pDSP, mal_uint32 f drflac* pFlac = (drflac*)pDecoder->pInternalDecoder; mal_assert(pFlac != NULL); - return (mal_uint32)drflac_read_s32(pFlac, frameCount*pDecoder->internalChannels, (drflac_int32*)pSamplesOut) / pDecoder->internalChannels; + return (mal_uint32)drflac_read_pcm_frames_s32(pFlac, frameCount, (drflac_int32*)pSamplesOut); } mal_result mal_decoder_init_flac__internal(const mal_decoder_config* pConfig, mal_decoder* pDecoder) @@ -27402,7 +27594,7 @@ mal_result mal_decoder_internal_on_seek_to_frame__mp3(mal_decoder* pDecoder, mal drmp3* pMP3 = (drmp3*)pDecoder->pInternalDecoder; mal_assert(pMP3 != NULL); - drmp3_bool32 result = drmp3_seek_to_frame(pMP3, frameIndex); + drmp3_bool32 result = drmp3_seek_to_pcm_frame(pMP3, frameIndex); if (result) { return MAL_SUCCESS; } else { @@ -27428,7 +27620,7 @@ mal_uint32 mal_decoder_internal_on_read_frames__mp3(mal_dsp* pDSP, mal_uint32 fr drmp3* pMP3 = (drmp3*)pDecoder->pInternalDecoder; mal_assert(pMP3 != NULL); - return (mal_uint32)drmp3_read_f32(pMP3, frameCount, (float*)pSamplesOut); + return (mal_uint32)drmp3_read_pcm_frames_f32(pMP3, frameCount, (float*)pSamplesOut); } mal_result mal_decoder_init_mp3__internal(const mal_decoder_config* pConfig, mal_decoder* pDecoder) @@ -28302,6 +28494,9 @@ mal_uint64 mal_sine_wave_read_ex(mal_sine_wave* pSineWave, mal_uint64 frameCount return frameCount; } +#if defined(_MSC_VER) + #pragma warning(pop) +#endif #endif // MINI_AL_IMPLEMENTATION @@ -28309,6 +28504,24 @@ mal_uint64 mal_sine_wave_read_ex(mal_sine_wave* pSineWave, mal_uint64 frameCount // REVISION HISTORY // ================ // +// v0.8.14 - 2018-12-16 +// - Core Audio: Fix a bug where the device state is not set correctly after stopping. +// - Add support for custom weights to the channel router. +// - Update decoders to use updated APIs in dr_flac, dr_mp3 and dr_wav. +// +// v0.8.13 - 2018-12-04 +// - Core Audio: Fix a bug with channel mapping. +// - Fix a bug with channel routing where the back/left and back/right channels have the wrong weight. +// +// v0.8.12 - 2018-11-27 +// - Drop support for SDL 1.2. The Emscripten build now requires "-s USE_SDL=2". +// - Fix a linking error with ALSA. +// - Fix a bug on iOS where the device name is not set correctly. +// +// v0.8.11 - 2018-11-21 +// - iOS bug fixes. +// - Minor tweaks to PulseAudio. +// // v0.8.10 - 2018-10-21 // - Core Audio: Fix a hang when uninitializing a device. // - Fix a bug where an incorrect value is returned from mal_device_stop().