mirror of
				https://github.com/thunderbrewhq/thunderbrew
				synced 2025-10-31 08:16:03 +03:00 
			
		
		
		
	
		
			
				
	
	
		
			1382 lines
		
	
	
		
			45 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			1382 lines
		
	
	
		
			45 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /*
 | |
|   Simple DirectMedia Layer
 | |
|   Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org>
 | |
| 
 | |
|   This software is provided 'as-is', without any express or implied
 | |
|   warranty.  In no event will the authors be held liable for any damages
 | |
|   arising from the use of this software.
 | |
| 
 | |
|   Permission is granted to anyone to use this software for any purpose,
 | |
|   including commercial applications, and to alter it and redistribute it
 | |
|   freely, subject to the following restrictions:
 | |
| 
 | |
|   1. The origin of this software must not be misrepresented; you must not
 | |
|      claim that you wrote the original software. If you use this software
 | |
|      in a product, an acknowledgment in the product documentation would be
 | |
|      appreciated but is not required.
 | |
|   2. Altered source versions must be plainly marked as such, and must not be
 | |
|      misrepresented as being the original software.
 | |
|   3. This notice may not be removed or altered from any source distribution.
 | |
| */
 | |
| #include "SDL_internal.h"
 | |
| 
 | |
| #include "SDL_sysaudio.h"
 | |
| 
 | |
| #include "SDL_audioqueue.h"
 | |
| #include "SDL_audioresample.h"
 | |
| 
 | |
| #ifndef SDL_INT_MAX
 | |
| #define SDL_INT_MAX ((int)(~0u>>1))
 | |
| #endif
 | |
| 
 | |
| #ifdef SDL_SSE3_INTRINSICS
 | |
| // Convert from stereo to mono. Average left and right.
 | |
| static void SDL_TARGETING("sse3") SDL_ConvertStereoToMono_SSE3(float *dst, const float *src, int num_frames)
 | |
| {
 | |
|     LOG_DEBUG_AUDIO_CONVERT("stereo", "mono (using SSE3)");
 | |
| 
 | |
|     const __m128 divby2 = _mm_set1_ps(0.5f);
 | |
|     int i = num_frames;
 | |
| 
 | |
|     /* Do SSE blocks as long as we have 16 bytes available.
 | |
|        Just use unaligned load/stores, if the memory at runtime is
 | |
|        aligned it'll be just as fast on modern processors */
 | |
|     while (i >= 4) {  // 4 * float32
 | |
|         _mm_storeu_ps(dst, _mm_mul_ps(_mm_hadd_ps(_mm_loadu_ps(src), _mm_loadu_ps(src + 4)), divby2));
 | |
|         i -= 4;
 | |
|         src += 8;
 | |
|         dst += 4;
 | |
|     }
 | |
| 
 | |
|     // Finish off any leftovers with scalar operations.
 | |
|     while (i) {
 | |
|         *dst = (src[0] + src[1]) * 0.5f;
 | |
|         dst++;
 | |
|         i--;
 | |
|         src += 2;
 | |
|     }
 | |
| }
 | |
| #endif
 | |
| 
 | |
| #ifdef SDL_SSE_INTRINSICS
 | |
| // Convert from mono to stereo. Duplicate to stereo left and right.
 | |
| static void SDL_TARGETING("sse") SDL_ConvertMonoToStereo_SSE(float *dst, const float *src, int num_frames)
 | |
| {
 | |
|     LOG_DEBUG_AUDIO_CONVERT("mono", "stereo (using SSE)");
 | |
| 
 | |
|     // convert backwards, since output is growing in-place.
 | |
|     src += (num_frames-4) * 1;
 | |
|     dst += (num_frames-4) * 2;
 | |
| 
 | |
|     /* Do SSE blocks as long as we have 16 bytes available.
 | |
|        Just use unaligned load/stores, if the memory at runtime is
 | |
|        aligned it'll be just as fast on modern processors */
 | |
|     // convert backwards, since output is growing in-place.
 | |
|     int i = num_frames;
 | |
|     while (i >= 4) {                                           // 4 * float32
 | |
|         const __m128 input = _mm_loadu_ps(src);                // A B C D
 | |
|         _mm_storeu_ps(dst, _mm_unpacklo_ps(input, input));     // A A B B
 | |
|         _mm_storeu_ps(dst + 4, _mm_unpackhi_ps(input, input)); // C C D D
 | |
|         i -= 4;
 | |
|         src -= 4;
 | |
|         dst -= 8;
 | |
|     }
 | |
| 
 | |
|     // Finish off any leftovers with scalar operations.
 | |
|     src += 3;
 | |
|     dst += 6;   // adjust for smaller buffers.
 | |
|     while (i) {  // convert backwards, since output is growing in-place.
 | |
|         const float srcFC = src[0];
 | |
|         dst[1] /* FR */ = srcFC;
 | |
|         dst[0] /* FL */ = srcFC;
 | |
|         i--;
 | |
|         src--;
 | |
|         dst -= 2;
 | |
|     }
 | |
| }
 | |
| #endif
 | |
| 
 | |
| // Include the autogenerated channel converters...
 | |
| #include "SDL_audio_channel_converters.h"
 | |
| 
 | |
| static bool SDL_IsSupportedAudioFormat(const SDL_AudioFormat fmt)
 | |
| {
 | |
|     switch (fmt) {
 | |
|     case SDL_AUDIO_U8:
 | |
|     case SDL_AUDIO_S8:
 | |
|     case SDL_AUDIO_S16LE:
 | |
|     case SDL_AUDIO_S16BE:
 | |
|     case SDL_AUDIO_S32LE:
 | |
|     case SDL_AUDIO_S32BE:
 | |
|     case SDL_AUDIO_F32LE:
 | |
|     case SDL_AUDIO_F32BE:
 | |
|         return true;  // supported.
 | |
| 
 | |
|     default:
 | |
|         break;
 | |
|     }
 | |
| 
 | |
|     return false;  // unsupported.
 | |
| }
 | |
| 
 | |
| static bool SDL_IsSupportedChannelCount(const int channels)
 | |
| {
 | |
|     return ((channels >= 1) && (channels <= 8));
 | |
| }
 | |
| 
 | |
| bool SDL_ChannelMapIsBogus(const int *chmap, int channels)
 | |
| {
 | |
|     if (chmap) {
 | |
|         for (int i = 0; i < channels; i++) {
 | |
|             const int mapping = chmap[i];
 | |
|             if ((mapping < -1) || (mapping >= channels)) {
 | |
|                 return true;
 | |
|             }
 | |
|         }
 | |
|     }
 | |
|     return false;
 | |
| }
 | |
| 
 | |
| bool SDL_ChannelMapIsDefault(const int *chmap, int channels)
 | |
| {
 | |
|     if (chmap) {
 | |
|         for (int i = 0; i < channels; i++) {
 | |
|             if (chmap[i] != i) {
 | |
|                 return false;
 | |
|             }
 | |
|         }
 | |
|     }
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| // Swizzle audio channels. src and dst can be the same pointer. It does not change the buffer size.
 | |
| static void SwizzleAudio(const int num_frames, void *dst, const void *src, int channels, const int *map, SDL_AudioFormat fmt)
 | |
| {
 | |
|     const int bitsize = (int) SDL_AUDIO_BITSIZE(fmt);
 | |
| 
 | |
|     bool has_null_mappings = false;  // !!! FIXME: calculate this when setting the channel map instead.
 | |
|     for (int i = 0; i < channels; i++) {
 | |
|         if (map[i] == -1) {
 | |
|             has_null_mappings = true;
 | |
|             break;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     #define CHANNEL_SWIZZLE(bits) { \
 | |
|         Uint##bits *tdst = (Uint##bits *) dst; /* treat as UintX; we only care about moving bits and not the type here. */ \
 | |
|         const Uint##bits *tsrc = (const Uint##bits *) src; \
 | |
|         if (src != dst) {  /* don't need to copy to a temporary frame first. */ \
 | |
|             if (has_null_mappings) { \
 | |
|                 const Uint##bits silence = (Uint##bits) SDL_GetSilenceValueForFormat(fmt); \
 | |
|                 for (int i = 0; i < num_frames; i++, tsrc += channels, tdst += channels) { \
 | |
|                     for (int ch = 0; ch < channels; ch++) { \
 | |
|                         const int m = map[ch]; \
 | |
|                         tdst[ch] = (m == -1) ? silence : tsrc[m]; \
 | |
|                     } \
 | |
|                 } \
 | |
|             } else { \
 | |
|                 for (int i = 0; i < num_frames; i++, tsrc += channels, tdst += channels) { \
 | |
|                     for (int ch = 0; ch < channels; ch++) { \
 | |
|                         tdst[ch] = tsrc[map[ch]]; \
 | |
|                     } \
 | |
|                 } \
 | |
|             } \
 | |
|         } else { \
 | |
|             bool isstack; \
 | |
|             Uint##bits *tmp = (Uint##bits *) SDL_small_alloc(int, channels, &isstack); /* !!! FIXME: allocate this when setting the channel map instead. */ \
 | |
|             if (tmp) { \
 | |
|                 if (has_null_mappings) { \
 | |
|                     const Uint##bits silence = (Uint##bits) SDL_GetSilenceValueForFormat(fmt); \
 | |
|                     for (int i = 0; i < num_frames; i++, tsrc += channels, tdst += channels) { \
 | |
|                         for (int ch = 0; ch < channels; ch++) { \
 | |
|                             const int m = map[ch]; \
 | |
|                             tmp[ch] = (m == -1) ? silence : tsrc[m]; \
 | |
|                         } \
 | |
|                         for (int ch = 0; ch < channels; ch++) { \
 | |
|                             tdst[ch] = tmp[ch]; \
 | |
|                         } \
 | |
|                     } \
 | |
|                 } else { \
 | |
|                     for (int i = 0; i < num_frames; i++, tsrc += channels, tdst += channels) { \
 | |
|                         for (int ch = 0; ch < channels; ch++) { \
 | |
|                             tmp[ch] = tsrc[map[ch]]; \
 | |
|                         } \
 | |
|                         for (int ch = 0; ch < channels; ch++) { \
 | |
|                             tdst[ch] = tmp[ch]; \
 | |
|                         } \
 | |
|                     } \
 | |
|                 } \
 | |
|                 SDL_small_free(tmp, isstack); \
 | |
|             } \
 | |
|         } \
 | |
|     }
 | |
| 
 | |
|     switch (bitsize) {
 | |
|         case 8: CHANNEL_SWIZZLE(8); break;
 | |
|         case 16: CHANNEL_SWIZZLE(16); break;
 | |
|         case 32: CHANNEL_SWIZZLE(32); break;
 | |
|         // we don't currently have int64 or double audio datatypes, so no `case 64` for now.
 | |
|         default: SDL_assert(!"Unsupported audio datatype size"); break;
 | |
|     }
 | |
| 
 | |
|     #undef CHANNEL_SWIZZLE
 | |
| }
 | |
| 
 | |
| 
 | |
| // This does type and channel conversions _but not resampling_ (resampling happens in SDL_AudioStream).
 | |
| // This does not check parameter validity, (beyond asserts), it expects you did that already!
 | |
| // All of this has to function as if src==dst==scratch (conversion in-place), but as a convenience
 | |
| // if you're just going to copy the final output elsewhere, you can specify a different output pointer.
 | |
| //
 | |
| // The scratch buffer must be able to store `num_frames * CalculateMaxSampleFrameSize(src_format, src_channels, dst_format, dst_channels)` bytes.
 | |
| // If the scratch buffer is NULL, this restriction applies to the output buffer instead.
 | |
| //
 | |
| // Since this is a convenient point that audio goes through even if it doesn't need format conversion,
 | |
| // we also handle gain adjustment here, so we don't have to make another pass over the data later.
 | |
| // Strictly speaking, this is also a "conversion".  :)
 | |
| void ConvertAudio(int num_frames,
 | |
|                   const void *src, SDL_AudioFormat src_format, int src_channels, const int *src_map,
 | |
|                   void *dst, SDL_AudioFormat dst_format, int dst_channels, const int *dst_map,
 | |
|                   void *scratch, float gain)
 | |
| {
 | |
|     SDL_assert(src != NULL);
 | |
|     SDL_assert(dst != NULL);
 | |
|     SDL_assert(SDL_IsSupportedAudioFormat(src_format));
 | |
|     SDL_assert(SDL_IsSupportedAudioFormat(dst_format));
 | |
|     SDL_assert(SDL_IsSupportedChannelCount(src_channels));
 | |
|     SDL_assert(SDL_IsSupportedChannelCount(dst_channels));
 | |
| 
 | |
|     if (!num_frames) {
 | |
|         return;  // no data to convert, quit.
 | |
|     }
 | |
| 
 | |
| #if DEBUG_AUDIO_CONVERT
 | |
|     SDL_Log("SDL_AUDIO_CONVERT: Convert format %04x->%04x, channels %u->%u", src_format, dst_format, src_channels, dst_channels);
 | |
| #endif
 | |
| 
 | |
|     const int dst_bitsize = (int) SDL_AUDIO_BITSIZE(dst_format);
 | |
|     const int dst_sample_frame_size = (dst_bitsize / 8) * dst_channels;
 | |
| 
 | |
|     const bool chmaps_match = (src_channels == dst_channels) && SDL_AudioChannelMapsEqual(src_channels, src_map, dst_map);
 | |
|     if (chmaps_match) {
 | |
|         src_map = dst_map = NULL;  // NULL both these out so we don't do any unnecessary swizzling.
 | |
|     }
 | |
| 
 | |
|     /* Type conversion goes like this now:
 | |
|         - swizzle through source channel map to "standard" layout.
 | |
|         - byteswap to CPU native format first if necessary.
 | |
|         - convert to native Float32 if necessary.
 | |
|         - change channel count if necessary.
 | |
|         - convert to final data format.
 | |
|         - byteswap back to foreign format if necessary.
 | |
|         - swizzle through dest channel map from "standard" layout.
 | |
| 
 | |
|        The expectation is we can process data faster in float32
 | |
|        (possibly with SIMD), and making several passes over the same
 | |
|        buffer is likely to be CPU cache-friendly, avoiding the
 | |
|        biggest performance hit in modern times. Previously we had
 | |
|        (script-generated) custom converters for every data type and
 | |
|        it was a bloat on SDL compile times and final library size. */
 | |
| 
 | |
|     // swizzle input to "standard" format if necessary.
 | |
|     if (src_map) {
 | |
|         void* buf = scratch ? scratch : dst;  // use scratch if available, since it has to be big enough to hold src, unless it's NULL, then dst has to be.
 | |
|         SwizzleAudio(num_frames, buf, src, src_channels, src_map, src_format);
 | |
|         src = buf;
 | |
|     }
 | |
| 
 | |
|     // see if we can skip float conversion entirely.
 | |
|     if ((src_channels == dst_channels) && (gain == 1.0f)) {
 | |
|         if (src_format == dst_format) {
 | |
|             // nothing to do, we're already in the right format, just copy it over if necessary.
 | |
|             if (dst_map) {
 | |
|                 SwizzleAudio(num_frames, dst, src, dst_channels, dst_map, dst_format);
 | |
|             } else if (src != dst) {
 | |
|                 SDL_memcpy(dst, src, num_frames * dst_sample_frame_size);
 | |
|             }
 | |
|             return;
 | |
|         }
 | |
| 
 | |
|         // just a byteswap needed?
 | |
|         if ((src_format ^ dst_format) == SDL_AUDIO_MASK_BIG_ENDIAN) {
 | |
|             if (dst_map) {  // do this first, in case we duplicate channels, we can avoid an extra copy if src != dst.
 | |
|                 SwizzleAudio(num_frames, dst, src, dst_channels, dst_map, dst_format);
 | |
|                 src = dst;
 | |
|             }
 | |
|             ConvertAudioSwapEndian(dst, src, num_frames * dst_channels, dst_bitsize);
 | |
|             return;  // all done.
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (!scratch) {
 | |
|         scratch = dst;
 | |
|     }
 | |
| 
 | |
|     const bool srcconvert = src_format != SDL_AUDIO_F32;
 | |
|     const bool channelconvert = src_channels != dst_channels;
 | |
|     const bool dstconvert = dst_format != SDL_AUDIO_F32;
 | |
| 
 | |
|     // get us to float format.
 | |
|     if (srcconvert) {
 | |
|         void* buf = (channelconvert || dstconvert) ? scratch : dst;
 | |
|         ConvertAudioToFloat((float *) buf, src, num_frames * src_channels, src_format);
 | |
|         src = buf;
 | |
|     }
 | |
| 
 | |
|     // Gain adjustment
 | |
|     if (gain != 1.0f) {
 | |
|         float *buf = (float *)((channelconvert || dstconvert) ? scratch : dst);
 | |
|         const int total_samples = num_frames * src_channels;
 | |
|         if (src == buf) {
 | |
|             for (int i = 0; i < total_samples; i++) {
 | |
|                 buf[i] *= gain;
 | |
|             }
 | |
|         } else {
 | |
|             float *fsrc = (float *)src;
 | |
|             for (int i = 0; i < total_samples; i++) {
 | |
|                 buf[i] = fsrc[i] * gain;
 | |
|             }
 | |
|         }
 | |
|         src = buf;
 | |
|     }
 | |
| 
 | |
|     // Channel conversion
 | |
| 
 | |
|     if (channelconvert) {
 | |
|         SDL_AudioChannelConverter channel_converter;
 | |
|         SDL_AudioChannelConverter override = NULL;
 | |
| 
 | |
|         // SDL_IsSupportedChannelCount should have caught these asserts, or we added a new format and forgot to update the table.
 | |
|         SDL_assert(src_channels <= SDL_arraysize(channel_converters));
 | |
|         SDL_assert(dst_channels <= SDL_arraysize(channel_converters[0]));
 | |
| 
 | |
|         channel_converter = channel_converters[src_channels - 1][dst_channels - 1];
 | |
|         SDL_assert(channel_converter != NULL);
 | |
| 
 | |
|         // swap in some SIMD versions for a few of these.
 | |
|         if (channel_converter == SDL_ConvertStereoToMono) {
 | |
|             #ifdef SDL_SSE3_INTRINSICS
 | |
|             if (!override && SDL_HasSSE3()) { override = SDL_ConvertStereoToMono_SSE3; }
 | |
|             #endif
 | |
|         } else if (channel_converter == SDL_ConvertMonoToStereo) {
 | |
|             #ifdef SDL_SSE_INTRINSICS
 | |
|             if (!override && SDL_HasSSE()) { override = SDL_ConvertMonoToStereo_SSE; }
 | |
|             #endif
 | |
|         }
 | |
| 
 | |
|         if (override) {
 | |
|             channel_converter = override;
 | |
|         }
 | |
| 
 | |
|         void* buf = dstconvert ? scratch : dst;
 | |
|         channel_converter((float *) buf, (const float *) src, num_frames);
 | |
|         src = buf;
 | |
|     }
 | |
| 
 | |
|     // Resampling is not done in here. SDL_AudioStream handles that.
 | |
| 
 | |
|     // Move to final data type.
 | |
|     if (dstconvert) {
 | |
|         ConvertAudioFromFloat(dst, (const float *) src, num_frames * dst_channels, dst_format);
 | |
|         src = dst;
 | |
|     }
 | |
| 
 | |
|     SDL_assert(src == dst);  // if we got here, we _had_ to have done _something_. Otherwise, we should have memcpy'd!
 | |
| 
 | |
|     if (dst_map) {
 | |
|         SwizzleAudio(num_frames, dst, src, dst_channels, dst_map, dst_format);
 | |
|     }
 | |
| }
 | |
| 
 | |
| // Calculate the largest frame size needed to convert between the two formats.
 | |
| static int CalculateMaxFrameSize(SDL_AudioFormat src_format, int src_channels, SDL_AudioFormat dst_format, int dst_channels)
 | |
| {
 | |
|     const int src_format_size = SDL_AUDIO_BYTESIZE(src_format);
 | |
|     const int dst_format_size = SDL_AUDIO_BYTESIZE(dst_format);
 | |
|     const int max_app_format_size = SDL_max(src_format_size, dst_format_size);
 | |
|     const int max_format_size = SDL_max(max_app_format_size, sizeof (float));  // ConvertAudio and ResampleAudio use floats.
 | |
|     const int max_channels = SDL_max(src_channels, dst_channels);
 | |
|     return max_format_size * max_channels;
 | |
| }
 | |
| 
 | |
| static Sint64 GetAudioStreamResampleRate(SDL_AudioStream* stream, int src_freq, Sint64 resample_offset)
 | |
| {
 | |
|     src_freq = (int)((float)src_freq * stream->freq_ratio);
 | |
| 
 | |
|     Sint64 resample_rate = SDL_GetResampleRate(src_freq, stream->dst_spec.freq);
 | |
| 
 | |
|     // If src_freq == dst_freq, and we aren't between frames, don't resample
 | |
|     if ((resample_rate == 0x100000000) && (resample_offset == 0)) {
 | |
|         resample_rate = 0;
 | |
|     }
 | |
| 
 | |
|     return resample_rate;
 | |
| }
 | |
| 
 | |
| static bool UpdateAudioStreamInputSpec(SDL_AudioStream *stream, const SDL_AudioSpec *spec, const int *chmap)
 | |
| {
 | |
|     if (SDL_AudioSpecsEqual(&stream->input_spec, spec, stream->input_chmap, chmap)) {
 | |
|         return true;
 | |
|     }
 | |
| 
 | |
|     if (!SDL_ResetAudioQueueHistory(stream->queue, SDL_GetResamplerHistoryFrames())) {
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     if (!chmap) {
 | |
|         stream->input_chmap = NULL;
 | |
|     } else {
 | |
|         const size_t chmaplen = sizeof (*chmap) * spec->channels;
 | |
|         stream->input_chmap = stream->input_chmap_storage;
 | |
|         SDL_memcpy(stream->input_chmap, chmap, chmaplen);
 | |
|     }
 | |
| 
 | |
|     SDL_copyp(&stream->input_spec, spec);
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| SDL_AudioStream *SDL_CreateAudioStream(const SDL_AudioSpec *src_spec, const SDL_AudioSpec *dst_spec)
 | |
| {
 | |
|     SDL_ChooseAudioConverters();
 | |
|     SDL_SetupAudioResampler();
 | |
| 
 | |
|     SDL_AudioStream *result = (SDL_AudioStream *)SDL_calloc(1, sizeof(SDL_AudioStream));
 | |
|     if (!result) {
 | |
|         return NULL;
 | |
|     }
 | |
| 
 | |
|     result->freq_ratio = 1.0f;
 | |
|     result->gain = 1.0f;
 | |
|     result->queue = SDL_CreateAudioQueue(8192);
 | |
| 
 | |
|     if (!result->queue) {
 | |
|         SDL_free(result);
 | |
|         return NULL;
 | |
|     }
 | |
| 
 | |
|     result->lock = SDL_CreateMutex();
 | |
|     if (!result->lock) {
 | |
|         SDL_free(result->queue);
 | |
|         SDL_free(result);
 | |
|         return NULL;
 | |
|     }
 | |
| 
 | |
|     OnAudioStreamCreated(result);
 | |
| 
 | |
|     if (!SDL_SetAudioStreamFormat(result, src_spec, dst_spec)) {
 | |
|         SDL_DestroyAudioStream(result);
 | |
|         return NULL;
 | |
|     }
 | |
| 
 | |
|     return result;
 | |
| }
 | |
| 
 | |
| SDL_PropertiesID SDL_GetAudioStreamProperties(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         SDL_InvalidParamError("stream");
 | |
|         return 0;
 | |
|     }
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     if (stream->props == 0) {
 | |
|         stream->props = SDL_CreateProperties();
 | |
|     }
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
|     return stream->props;
 | |
| }
 | |
| 
 | |
| bool SDL_SetAudioStreamGetCallback(SDL_AudioStream *stream, SDL_AudioStreamCallback callback, void *userdata)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     stream->get_callback = callback;
 | |
|     stream->get_callback_userdata = userdata;
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| bool SDL_SetAudioStreamPutCallback(SDL_AudioStream *stream, SDL_AudioStreamCallback callback, void *userdata)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     stream->put_callback = callback;
 | |
|     stream->put_callback_userdata = userdata;
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| bool SDL_LockAudioStream(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| bool SDL_UnlockAudioStream(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| bool SDL_GetAudioStreamFormat(SDL_AudioStream *stream, SDL_AudioSpec *src_spec, SDL_AudioSpec *dst_spec)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     if (src_spec) {
 | |
|         SDL_copyp(src_spec, &stream->src_spec);
 | |
|     }
 | |
|     if (dst_spec) {
 | |
|         SDL_copyp(dst_spec, &stream->dst_spec);
 | |
|     }
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     if (src_spec && src_spec->format == 0) {
 | |
|         return SDL_SetError("Stream has no source format");
 | |
|     } else if (dst_spec && dst_spec->format == 0) {
 | |
|         return SDL_SetError("Stream has no destination format");
 | |
|     }
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| bool SDL_SetAudioStreamFormat(SDL_AudioStream *stream, const SDL_AudioSpec *src_spec, const SDL_AudioSpec *dst_spec)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
| 
 | |
|     // note that while we've removed the maximum frequency checks, SDL _will_
 | |
|     // fail to resample to extremely high sample rates correctly. Really high,
 | |
|     // like 196608000Hz. File a bug.  :P
 | |
| 
 | |
|     if (src_spec) {
 | |
|         if (!SDL_IsSupportedAudioFormat(src_spec->format)) {
 | |
|             return SDL_InvalidParamError("src_spec->format");
 | |
|         } else if (!SDL_IsSupportedChannelCount(src_spec->channels)) {
 | |
|             return SDL_InvalidParamError("src_spec->channels");
 | |
|         } else if (src_spec->freq <= 0) {
 | |
|             return SDL_InvalidParamError("src_spec->freq");
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (dst_spec) {
 | |
|         if (!SDL_IsSupportedAudioFormat(dst_spec->format)) {
 | |
|             return SDL_InvalidParamError("dst_spec->format");
 | |
|         } else if (!SDL_IsSupportedChannelCount(dst_spec->channels)) {
 | |
|             return SDL_InvalidParamError("dst_spec->channels");
 | |
|         } else if (dst_spec->freq <= 0) {
 | |
|             return SDL_InvalidParamError("dst_spec->freq");
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
| 
 | |
|     // quietly refuse to change the format of the end currently bound to a device.
 | |
|     if (stream->bound_device) {
 | |
|         if (stream->bound_device->physical_device->recording) {
 | |
|             src_spec = NULL;
 | |
|         } else {
 | |
|             dst_spec = NULL;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (src_spec) {
 | |
|         if (src_spec->channels != stream->src_spec.channels) {
 | |
|             SDL_free(stream->src_chmap);
 | |
|             stream->src_chmap = NULL;
 | |
|         }
 | |
|         SDL_copyp(&stream->src_spec, src_spec);
 | |
|     }
 | |
| 
 | |
|     if (dst_spec) {
 | |
|         if (dst_spec->channels != stream->dst_spec.channels) {
 | |
|             SDL_free(stream->dst_chmap);
 | |
|             stream->dst_chmap = NULL;
 | |
|         }
 | |
|         SDL_copyp(&stream->dst_spec, dst_spec);
 | |
|     }
 | |
| 
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| bool SetAudioStreamChannelMap(SDL_AudioStream *stream, const SDL_AudioSpec *spec, int **stream_chmap, const int *chmap, int channels, int isinput)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
| 
 | |
|     bool result = true;
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
| 
 | |
|     if (channels != spec->channels) {
 | |
|         result = SDL_SetError("Wrong number of channels");
 | |
|     } else if (!*stream_chmap && !chmap) {
 | |
|         // already at default, we're good.
 | |
|     } else if (*stream_chmap && chmap && (SDL_memcmp(*stream_chmap, chmap, sizeof (*chmap) * channels) == 0)) {
 | |
|         // already have this map, don't allocate/copy it again.
 | |
|     } else if (SDL_ChannelMapIsBogus(chmap, channels)) {
 | |
|         result = SDL_SetError("Invalid channel mapping");
 | |
|     } else {
 | |
|         if (SDL_ChannelMapIsDefault(chmap, channels)) {
 | |
|             chmap = NULL;  // just apply a default mapping.
 | |
|         }
 | |
|         if (chmap) {
 | |
|             int *dupmap = SDL_ChannelMapDup(chmap, channels);
 | |
|             if (!dupmap) {
 | |
|                 result = SDL_SetError("Invalid channel mapping");
 | |
|             } else {
 | |
|                 SDL_free(*stream_chmap);
 | |
|                 *stream_chmap = dupmap;
 | |
|             }
 | |
|         } else {
 | |
|             SDL_free(*stream_chmap);
 | |
|             *stream_chmap = NULL;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
|     return result;
 | |
| }
 | |
| 
 | |
| bool SDL_SetAudioStreamInputChannelMap(SDL_AudioStream *stream, const int *chmap, int channels)
 | |
| {
 | |
|     return SetAudioStreamChannelMap(stream, &stream->src_spec, &stream->src_chmap, chmap, channels, 1);
 | |
| }
 | |
| 
 | |
| bool SDL_SetAudioStreamOutputChannelMap(SDL_AudioStream *stream, const int *chmap, int channels)
 | |
| {
 | |
|     return SetAudioStreamChannelMap(stream, &stream->dst_spec, &stream->dst_chmap, chmap, channels, 0);
 | |
| }
 | |
| 
 | |
| int *SDL_GetAudioStreamInputChannelMap(SDL_AudioStream *stream, int *count)
 | |
| {
 | |
|     int *result = NULL;
 | |
|     int channels = 0;
 | |
|     if (stream) {
 | |
|         SDL_LockMutex(stream->lock);
 | |
|         channels = stream->src_spec.channels;
 | |
|         result = SDL_ChannelMapDup(stream->src_chmap, channels);
 | |
|         SDL_UnlockMutex(stream->lock);
 | |
|     }
 | |
| 
 | |
|     if (count) {
 | |
|         *count = channels;
 | |
|     }
 | |
| 
 | |
|     return result;
 | |
| }
 | |
| 
 | |
| int *SDL_GetAudioStreamOutputChannelMap(SDL_AudioStream *stream, int *count)
 | |
| {
 | |
|     int *result = NULL;
 | |
|     int channels = 0;
 | |
|     if (stream) {
 | |
|         SDL_LockMutex(stream->lock);
 | |
|         channels = stream->dst_spec.channels;
 | |
|         result = SDL_ChannelMapDup(stream->dst_chmap, channels);
 | |
|         SDL_UnlockMutex(stream->lock);
 | |
|     }
 | |
| 
 | |
|     if (count) {
 | |
|         *count = channels;
 | |
|     }
 | |
| 
 | |
|     return result;
 | |
| }
 | |
| 
 | |
| float SDL_GetAudioStreamFrequencyRatio(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         SDL_InvalidParamError("stream");
 | |
|         return 0.0f;
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     const float freq_ratio = stream->freq_ratio;
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     return freq_ratio;
 | |
| }
 | |
| 
 | |
| bool SDL_SetAudioStreamFrequencyRatio(SDL_AudioStream *stream, float freq_ratio)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
| 
 | |
|     // Picked mostly arbitrarily.
 | |
|     const float min_freq_ratio = 0.01f;
 | |
|     const float max_freq_ratio = 100.0f;
 | |
| 
 | |
|     if (freq_ratio < min_freq_ratio) {
 | |
|         return SDL_SetError("Frequency ratio is too low");
 | |
|     } else if (freq_ratio > max_freq_ratio) {
 | |
|         return SDL_SetError("Frequency ratio is too high");
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     stream->freq_ratio = freq_ratio;
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| float SDL_GetAudioStreamGain(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         SDL_InvalidParamError("stream");
 | |
|         return -1.0f;
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     const float gain = stream->gain;
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     return gain;
 | |
| }
 | |
| 
 | |
| bool SDL_SetAudioStreamGain(SDL_AudioStream *stream, float gain)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     } else if (gain < 0.0f) {
 | |
|         return SDL_InvalidParamError("gain");
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     stream->gain = gain;
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| static bool CheckAudioStreamIsFullySetup(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (stream->src_spec.format == 0) {
 | |
|         return SDL_SetError("Stream has no source format");
 | |
|     } else if (stream->dst_spec.format == 0) {
 | |
|         return SDL_SetError("Stream has no destination format");
 | |
|     }
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| static bool PutAudioStreamBuffer(SDL_AudioStream *stream, const void *buf, int len, SDL_ReleaseAudioBufferCallback callback, void* userdata)
 | |
| {
 | |
| #if DEBUG_AUDIOSTREAM
 | |
|     SDL_Log("AUDIOSTREAM: wants to put %d bytes", len);
 | |
| #endif
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
| 
 | |
|     if (!CheckAudioStreamIsFullySetup(stream)) {
 | |
|         SDL_UnlockMutex(stream->lock);
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     if ((len % SDL_AUDIO_FRAMESIZE(stream->src_spec)) != 0) {
 | |
|         SDL_UnlockMutex(stream->lock);
 | |
|         return SDL_SetError("Can't add partial sample frames");
 | |
|     }
 | |
| 
 | |
|     SDL_AudioTrack* track = NULL;
 | |
| 
 | |
|     if (callback) {
 | |
|         track = SDL_CreateAudioTrack(stream->queue, &stream->src_spec, stream->src_chmap, (Uint8 *)buf, len, len, callback, userdata);
 | |
| 
 | |
|         if (!track) {
 | |
|             SDL_UnlockMutex(stream->lock);
 | |
|             return false;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     const int prev_available = stream->put_callback ? SDL_GetAudioStreamAvailable(stream) : 0;
 | |
| 
 | |
|     bool result = true;
 | |
| 
 | |
|     if (track) {
 | |
|         SDL_AddTrackToAudioQueue(stream->queue, track);
 | |
|     } else {
 | |
|         result = SDL_WriteToAudioQueue(stream->queue, &stream->src_spec, stream->src_chmap, (const Uint8 *)buf, len);
 | |
|     }
 | |
| 
 | |
|     if (result) {
 | |
|         if (stream->put_callback) {
 | |
|             const int newavail = SDL_GetAudioStreamAvailable(stream) - prev_available;
 | |
|             stream->put_callback(stream->put_callback_userdata, stream, newavail, newavail);
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     return result;
 | |
| }
 | |
| 
 | |
| static void SDLCALL FreeAllocatedAudioBuffer(void *userdata, const void *buf, int len)
 | |
| {
 | |
|     SDL_free((void*) buf);
 | |
| }
 | |
| 
 | |
| bool SDL_PutAudioStreamData(SDL_AudioStream *stream, const void *buf, int len)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     } else if (!buf) {
 | |
|         return SDL_InvalidParamError("buf");
 | |
|     } else if (len < 0) {
 | |
|         return SDL_InvalidParamError("len");
 | |
|     } else if (len == 0) {
 | |
|         return true; // nothing to do.
 | |
|     }
 | |
| 
 | |
|     // When copying in large amounts of data, try and do as much work as possible
 | |
|     // outside of the stream lock, otherwise the output device is likely to be starved.
 | |
|     const int large_input_thresh = 64 * 1024;
 | |
| 
 | |
|     if (len >= large_input_thresh) {
 | |
|         void *data = SDL_malloc(len);
 | |
| 
 | |
|         if (!data) {
 | |
|             return false;
 | |
|         }
 | |
| 
 | |
|         SDL_memcpy(data, buf, len);
 | |
|         buf = data;
 | |
| 
 | |
|         bool ret = PutAudioStreamBuffer(stream, buf, len, FreeAllocatedAudioBuffer, NULL);
 | |
|         if (!ret) {
 | |
|             SDL_free(data);
 | |
|         }
 | |
|         return ret;
 | |
|     }
 | |
| 
 | |
|     return PutAudioStreamBuffer(stream, buf, len, NULL, NULL);
 | |
| }
 | |
| 
 | |
| bool SDL_FlushAudioStream(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
|     SDL_FlushAudioQueue(stream->queue);
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| /* this does not save the previous contents of stream->work_buffer. It's a work buffer!!
 | |
|    The returned buffer is aligned/padded for use with SIMD instructions. */
 | |
| static Uint8 *EnsureAudioStreamWorkBufferSize(SDL_AudioStream *stream, size_t newlen)
 | |
| {
 | |
|     if (stream->work_buffer_allocation >= newlen) {
 | |
|         return stream->work_buffer;
 | |
|     }
 | |
| 
 | |
|     Uint8 *ptr = (Uint8 *) SDL_aligned_alloc(SDL_GetSIMDAlignment(), newlen);
 | |
|     if (!ptr) {
 | |
|         return NULL;  // previous work buffer is still valid!
 | |
|     }
 | |
| 
 | |
|     SDL_aligned_free(stream->work_buffer);
 | |
|     stream->work_buffer = ptr;
 | |
|     stream->work_buffer_allocation = newlen;
 | |
|     return ptr;
 | |
| }
 | |
| 
 | |
| static Sint64 NextAudioStreamIter(SDL_AudioStream* stream, void** inout_iter,
 | |
|     Sint64* inout_resample_offset, SDL_AudioSpec* out_spec, int **out_chmap, bool* out_flushed)
 | |
| {
 | |
|     SDL_AudioSpec spec;
 | |
|     bool flushed;
 | |
|     int *chmap;
 | |
|     size_t queued_bytes = SDL_NextAudioQueueIter(stream->queue, inout_iter, &spec, &chmap, &flushed);
 | |
| 
 | |
|     if (out_spec) {
 | |
|         SDL_copyp(out_spec, &spec);
 | |
|     }
 | |
| 
 | |
|     if (out_chmap) {
 | |
|         *out_chmap = chmap;
 | |
|     }
 | |
| 
 | |
|     // There is infinite audio available, whether or not we are resampling
 | |
|     if (queued_bytes == SDL_SIZE_MAX) {
 | |
|         *inout_resample_offset = 0;
 | |
| 
 | |
|         if (out_flushed) {
 | |
|             *out_flushed = false;
 | |
|         }
 | |
| 
 | |
|         return SDL_MAX_SINT32;
 | |
|     }
 | |
| 
 | |
|     Sint64 resample_offset = *inout_resample_offset;
 | |
|     Sint64 resample_rate = GetAudioStreamResampleRate(stream, spec.freq, resample_offset);
 | |
|     Sint64 output_frames = (Sint64)(queued_bytes / SDL_AUDIO_FRAMESIZE(spec));
 | |
| 
 | |
|     if (resample_rate) {
 | |
|         // Resampling requires padding frames to the left and right of the current position.
 | |
|         // Past the end of the track, the right padding is filled with silence.
 | |
|         // But we only want to do that if the track is actually finished (flushed).
 | |
|         if (!flushed) {
 | |
|             output_frames -= SDL_GetResamplerPaddingFrames(resample_rate);
 | |
|         }
 | |
| 
 | |
|         output_frames = SDL_GetResamplerOutputFrames(output_frames, resample_rate, &resample_offset);
 | |
|     }
 | |
| 
 | |
|     if (flushed) {
 | |
|         resample_offset = 0;
 | |
|     }
 | |
| 
 | |
|     *inout_resample_offset = resample_offset;
 | |
| 
 | |
|     if (out_flushed) {
 | |
|         *out_flushed = flushed;
 | |
|     }
 | |
| 
 | |
|     return output_frames;
 | |
| }
 | |
| 
 | |
| static Sint64 GetAudioStreamAvailableFrames(SDL_AudioStream* stream, Sint64* out_resample_offset)
 | |
| {
 | |
|     void* iter = SDL_BeginAudioQueueIter(stream->queue);
 | |
| 
 | |
|     Sint64 resample_offset = stream->resample_offset;
 | |
|     Sint64 output_frames = 0;
 | |
| 
 | |
|     while (iter) {
 | |
|         output_frames += NextAudioStreamIter(stream, &iter, &resample_offset, NULL, NULL, NULL);
 | |
| 
 | |
|         // Already got loads of frames. Just clamp it to something reasonable
 | |
|         if (output_frames >= SDL_MAX_SINT32) {
 | |
|             output_frames = SDL_MAX_SINT32;
 | |
|             break;
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (out_resample_offset) {
 | |
|         *out_resample_offset = resample_offset;
 | |
|     }
 | |
| 
 | |
|     return output_frames;
 | |
| }
 | |
| 
 | |
| static Sint64 GetAudioStreamHead(SDL_AudioStream* stream, SDL_AudioSpec* out_spec, int **out_chmap, bool* out_flushed)
 | |
| {
 | |
|     void* iter = SDL_BeginAudioQueueIter(stream->queue);
 | |
| 
 | |
|     if (!iter) {
 | |
|         SDL_zerop(out_spec);
 | |
|         *out_flushed = false;
 | |
|         return 0;
 | |
|     }
 | |
| 
 | |
|     Sint64 resample_offset = stream->resample_offset;
 | |
|     return NextAudioStreamIter(stream, &iter, &resample_offset, out_spec, out_chmap, out_flushed);
 | |
| }
 | |
| 
 | |
| // You must hold stream->lock and validate your parameters before calling this!
 | |
| // Enough input data MUST be available!
 | |
| static bool GetAudioStreamDataInternal(SDL_AudioStream *stream, void *buf, int output_frames, float gain)
 | |
| {
 | |
|     const SDL_AudioSpec* src_spec = &stream->input_spec;
 | |
|     const SDL_AudioSpec* dst_spec = &stream->dst_spec;
 | |
| 
 | |
|     const SDL_AudioFormat src_format = src_spec->format;
 | |
|     const int src_channels = src_spec->channels;
 | |
| 
 | |
|     const SDL_AudioFormat dst_format = dst_spec->format;
 | |
|     const int dst_channels = dst_spec->channels;
 | |
|     const int *dst_map = stream->dst_chmap;
 | |
| 
 | |
|     const int max_frame_size = CalculateMaxFrameSize(src_format, src_channels, dst_format, dst_channels);
 | |
|     const Sint64 resample_rate = GetAudioStreamResampleRate(stream, src_spec->freq, stream->resample_offset);
 | |
| 
 | |
| #if DEBUG_AUDIOSTREAM
 | |
|     SDL_Log("AUDIOSTREAM: asking for %d frames.", output_frames);
 | |
| #endif
 | |
| 
 | |
|     SDL_assert(output_frames > 0);
 | |
| 
 | |
|     // Not resampling? It's an easy conversion (and maybe not even that!)
 | |
|     if (resample_rate == 0) {
 | |
|         Uint8* work_buffer = NULL;
 | |
| 
 | |
|         // Ensure we have enough scratch space for any conversions
 | |
|         if ((src_format != dst_format) || (src_channels != dst_channels) || (gain != 1.0f)) {
 | |
|             work_buffer = EnsureAudioStreamWorkBufferSize(stream, output_frames * max_frame_size);
 | |
| 
 | |
|             if (!work_buffer) {
 | |
|                 return false;
 | |
|             }
 | |
|         }
 | |
| 
 | |
|         if (SDL_ReadFromAudioQueue(stream->queue, (Uint8 *)buf, dst_format, dst_channels, dst_map, 0, output_frames, 0, work_buffer, gain) != buf) {
 | |
|             return SDL_SetError("Not enough data in queue");
 | |
|         }
 | |
| 
 | |
|         return true;
 | |
|     }
 | |
| 
 | |
|     // Time to do some resampling!
 | |
|     // Calculate the number of input frames necessary for this request.
 | |
|     // Because resampling happens "between" frames, The same number of output_frames
 | |
|     // can require a different number of input_frames, depending on the resample_offset.
 | |
|     // In fact, input_frames can sometimes even be zero when upsampling.
 | |
|     const int input_frames = (int) SDL_GetResamplerInputFrames(output_frames, resample_rate, stream->resample_offset);
 | |
| 
 | |
|     const int padding_frames = SDL_GetResamplerPaddingFrames(resample_rate);
 | |
| 
 | |
|     const SDL_AudioFormat resample_format = SDL_AUDIO_F32;
 | |
| 
 | |
|     // If increasing channels, do it after resampling, since we'd just
 | |
|     // do more work to resample duplicate channels. If we're decreasing, do
 | |
|     // it first so we resample the interpolated data instead of interpolating
 | |
|     // the resampled data.
 | |
|     const int resample_channels = SDL_min(src_channels, dst_channels);
 | |
| 
 | |
|     // The size of the frame used when resampling
 | |
|     const int resample_frame_size = SDL_AUDIO_BYTESIZE(resample_format) * resample_channels;
 | |
| 
 | |
|     // The main portion of the work_buffer can be used to store 3 things:
 | |
|     // src_sample_frame_size * (left_padding+input_buffer+right_padding)
 | |
|     //   resample_frame_size * (left_padding+input_buffer+right_padding)
 | |
|     // dst_sample_frame_size * output_frames
 | |
|     //
 | |
|     // ResampleAudio also requires an additional buffer if it can't write straight to the output:
 | |
|     //   resample_frame_size * output_frames
 | |
|     //
 | |
|     // Note, ConvertAudio requires (num_frames * max_sample_frame_size) of scratch space
 | |
|     const int work_buffer_frames = input_frames + (padding_frames * 2);
 | |
|     int work_buffer_capacity = work_buffer_frames * max_frame_size;
 | |
|     int resample_buffer_offset = -1;
 | |
| 
 | |
|     // Check if we can resample directly into the output buffer.
 | |
|     // Note, this is just to avoid extra copies.
 | |
|     // Some other formats may fit directly into the output buffer, but i'd rather process data in a SIMD-aligned buffer.
 | |
|     if ((dst_format != resample_format) || (dst_channels != resample_channels)) {
 | |
|         // Allocate space for converting the resampled output to the destination format
 | |
|         int resample_convert_bytes = output_frames * max_frame_size;
 | |
|         work_buffer_capacity = SDL_max(work_buffer_capacity, resample_convert_bytes);
 | |
| 
 | |
|         // SIMD-align the buffer
 | |
|         int simd_alignment = (int) SDL_GetSIMDAlignment();
 | |
|         work_buffer_capacity += simd_alignment - 1;
 | |
|         work_buffer_capacity -= work_buffer_capacity % simd_alignment;
 | |
| 
 | |
|         // Allocate space for the resampled output
 | |
|         int resample_bytes = output_frames * resample_frame_size;
 | |
|         resample_buffer_offset = work_buffer_capacity;
 | |
|         work_buffer_capacity += resample_bytes;
 | |
|     }
 | |
| 
 | |
|     Uint8* work_buffer = EnsureAudioStreamWorkBufferSize(stream, work_buffer_capacity);
 | |
| 
 | |
|     if (!work_buffer) {
 | |
|         return false;
 | |
|     }
 | |
| 
 | |
|     // adjust gain either before resampling or after, depending on which point has less
 | |
|     // samples to process.
 | |
|     const float preresample_gain = (input_frames > output_frames) ? 1.0f : gain;
 | |
|     const float postresample_gain = (input_frames > output_frames) ? gain : 1.0f;
 | |
| 
 | |
|     // (dst channel map is NULL because we'll do the final swizzle on ConvertAudio after resample.)
 | |
|     const Uint8* input_buffer = SDL_ReadFromAudioQueue(stream->queue,
 | |
|         NULL, resample_format, resample_channels, NULL,
 | |
|         padding_frames, input_frames, padding_frames, work_buffer, preresample_gain);
 | |
| 
 | |
|     if (!input_buffer) {
 | |
|         return SDL_SetError("Not enough data in queue (resample)");
 | |
|     }
 | |
| 
 | |
|     input_buffer += padding_frames * resample_frame_size;
 | |
| 
 | |
|     // Decide where the resampled output goes
 | |
|     void* resample_buffer = (resample_buffer_offset != -1) ? (work_buffer + resample_buffer_offset) : buf;
 | |
| 
 | |
|     SDL_ResampleAudio(resample_channels,
 | |
|                   (const float *) input_buffer, input_frames,
 | |
|                   (float*) resample_buffer, output_frames,
 | |
|                   resample_rate, &stream->resample_offset);
 | |
| 
 | |
|     // Convert to the final format, if necessary (src channel map is NULL because SDL_ReadFromAudioQueue already handled this).
 | |
|     ConvertAudio(output_frames, resample_buffer, resample_format, resample_channels, NULL, buf, dst_format, dst_channels, dst_map, work_buffer, postresample_gain);
 | |
| 
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| // get converted/resampled data from the stream
 | |
| int SDL_GetAudioStreamDataAdjustGain(SDL_AudioStream *stream, void *voidbuf, int len, float extra_gain)
 | |
| {
 | |
|     Uint8 *buf = (Uint8 *) voidbuf;
 | |
| 
 | |
| #if DEBUG_AUDIOSTREAM
 | |
|     SDL_Log("AUDIOSTREAM: want to get %d converted bytes", len);
 | |
| #endif
 | |
| 
 | |
|     if (!stream) {
 | |
|         SDL_InvalidParamError("stream");
 | |
|         return -1;
 | |
|     } else if (!buf) {
 | |
|         SDL_InvalidParamError("buf");
 | |
|         return -1;
 | |
|     } else if (len < 0) {
 | |
|         SDL_InvalidParamError("len");
 | |
|         return -1;
 | |
|     } else if (len == 0) {
 | |
|         return 0; // nothing to do.
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
| 
 | |
|     if (!CheckAudioStreamIsFullySetup(stream)) {
 | |
|         SDL_UnlockMutex(stream->lock);
 | |
|         return -1;
 | |
|     }
 | |
| 
 | |
|     const float gain = stream->gain * extra_gain;
 | |
|     const int dst_frame_size = SDL_AUDIO_FRAMESIZE(stream->dst_spec);
 | |
| 
 | |
|     len -= len % dst_frame_size;  // chop off any fractional sample frame.
 | |
| 
 | |
|     // give the callback a chance to fill in more stream data if it wants.
 | |
|     if (stream->get_callback) {
 | |
|         Sint64 total_request = len / dst_frame_size;  // start with sample frames desired
 | |
|         Sint64 additional_request = total_request;
 | |
| 
 | |
|         Sint64 resample_offset = 0;
 | |
|         Sint64 available_frames = GetAudioStreamAvailableFrames(stream, &resample_offset);
 | |
| 
 | |
|         additional_request -= SDL_min(additional_request, available_frames);
 | |
| 
 | |
|         Sint64 resample_rate = GetAudioStreamResampleRate(stream, stream->src_spec.freq, resample_offset);
 | |
| 
 | |
|         if (resample_rate) {
 | |
|             total_request = SDL_GetResamplerInputFrames(total_request, resample_rate, resample_offset);
 | |
|             additional_request = SDL_GetResamplerInputFrames(additional_request, resample_rate, resample_offset);
 | |
|         }
 | |
| 
 | |
|         total_request *= SDL_AUDIO_FRAMESIZE(stream->src_spec);  // convert sample frames to bytes.
 | |
|         additional_request *= SDL_AUDIO_FRAMESIZE(stream->src_spec);  // convert sample frames to bytes.
 | |
|         stream->get_callback(stream->get_callback_userdata, stream, (int) SDL_min(additional_request, SDL_INT_MAX), (int) SDL_min(total_request, SDL_INT_MAX));
 | |
|     }
 | |
| 
 | |
|     // Process the data in chunks to avoid allocating too much memory (and potential integer overflows)
 | |
|     const int chunk_size = 4096;
 | |
| 
 | |
|     int total = 0;
 | |
| 
 | |
|     while (total < len) {
 | |
|         // Audio is processed a track at a time.
 | |
|         SDL_AudioSpec input_spec;
 | |
|         int *input_chmap;
 | |
|         bool flushed;
 | |
|         const Sint64 available_frames = GetAudioStreamHead(stream, &input_spec, &input_chmap, &flushed);
 | |
| 
 | |
|         if (available_frames == 0) {
 | |
|             if (flushed) {
 | |
|                 SDL_PopAudioQueueHead(stream->queue);
 | |
|                 SDL_zero(stream->input_spec);
 | |
|                 stream->resample_offset = 0;
 | |
|                 stream->input_chmap = NULL;
 | |
|                 continue;
 | |
|             }
 | |
|             // There are no frames available, but the track hasn't been flushed, so more might be added later.
 | |
|             break;
 | |
|         }
 | |
| 
 | |
|         if (!UpdateAudioStreamInputSpec(stream, &input_spec, input_chmap)) {
 | |
|             total = total ? total : -1;
 | |
|             break;
 | |
|         }
 | |
| 
 | |
|         // Clamp the output length to the maximum currently available.
 | |
|         // GetAudioStreamDataInternal requires enough input data is available.
 | |
|         int output_frames = (len - total) / dst_frame_size;
 | |
|         output_frames = SDL_min(output_frames, chunk_size);
 | |
|         output_frames = (int) SDL_min(output_frames, available_frames);
 | |
| 
 | |
|         if (!GetAudioStreamDataInternal(stream, &buf[total], output_frames, gain)) {
 | |
|             total = total ? total : -1;
 | |
|             break;
 | |
|         }
 | |
| 
 | |
|         total += output_frames * dst_frame_size;
 | |
|     }
 | |
| 
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
| #if DEBUG_AUDIOSTREAM
 | |
|     SDL_Log("AUDIOSTREAM: Final result was %d", total);
 | |
| #endif
 | |
| 
 | |
|     return total;
 | |
| }
 | |
| 
 | |
| int SDL_GetAudioStreamData(SDL_AudioStream *stream, void *voidbuf, int len)
 | |
| {
 | |
|     return SDL_GetAudioStreamDataAdjustGain(stream, voidbuf, len, 1.0f);
 | |
| }
 | |
| 
 | |
| // number of converted/resampled bytes available for output
 | |
| int SDL_GetAudioStreamAvailable(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         SDL_InvalidParamError("stream");
 | |
|         return -1;
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
| 
 | |
|     if (!CheckAudioStreamIsFullySetup(stream)) {
 | |
|         SDL_UnlockMutex(stream->lock);
 | |
|         return 0;
 | |
|     }
 | |
| 
 | |
|     Sint64 count = GetAudioStreamAvailableFrames(stream, NULL);
 | |
| 
 | |
|     // convert from sample frames to bytes in destination format.
 | |
|     count *= SDL_AUDIO_FRAMESIZE(stream->dst_spec);
 | |
| 
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     // if this overflows an int, just clamp it to a maximum.
 | |
|     return (int) SDL_min(count, SDL_INT_MAX);
 | |
| }
 | |
| 
 | |
| // number of sample frames that are currently queued as input.
 | |
| int SDL_GetAudioStreamQueued(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         SDL_InvalidParamError("stream");
 | |
|         return -1;
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
| 
 | |
|     size_t total = SDL_GetAudioQueueQueued(stream->queue);
 | |
| 
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
| 
 | |
|     // if this overflows an int, just clamp it to a maximum.
 | |
|     return (int) SDL_min(total, SDL_INT_MAX);
 | |
| }
 | |
| 
 | |
| bool SDL_ClearAudioStream(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return SDL_InvalidParamError("stream");
 | |
|     }
 | |
| 
 | |
|     SDL_LockMutex(stream->lock);
 | |
| 
 | |
|     SDL_ClearAudioQueue(stream->queue);
 | |
|     SDL_zero(stream->input_spec);
 | |
|     stream->input_chmap = NULL;
 | |
|     stream->resample_offset = 0;
 | |
| 
 | |
|     SDL_UnlockMutex(stream->lock);
 | |
|     return true;
 | |
| }
 | |
| 
 | |
| void SDL_DestroyAudioStream(SDL_AudioStream *stream)
 | |
| {
 | |
|     if (!stream) {
 | |
|         return;
 | |
|     }
 | |
| 
 | |
|     SDL_DestroyProperties(stream->props);
 | |
| 
 | |
|     OnAudioStreamDestroy(stream);
 | |
| 
 | |
|     const bool simplified = stream->simplified;
 | |
|     if (simplified) {
 | |
|         if (stream->bound_device) {
 | |
|             SDL_assert(stream->bound_device->simplified);
 | |
|             SDL_CloseAudioDevice(stream->bound_device->instance_id);  // this will unbind the stream.
 | |
|         }
 | |
|     } else {
 | |
|         SDL_UnbindAudioStream(stream);
 | |
|     }
 | |
| 
 | |
|     SDL_aligned_free(stream->work_buffer);
 | |
|     SDL_DestroyAudioQueue(stream->queue);
 | |
|     SDL_DestroyMutex(stream->lock);
 | |
| 
 | |
|     SDL_free(stream);
 | |
| }
 | |
| 
 | |
| static void SDLCALL DontFreeThisAudioBuffer(void *userdata, const void *buf, int len)
 | |
| {
 | |
|     // We don't own the buffer, but know it will outlive the stream
 | |
| }
 | |
| 
 | |
| bool SDL_ConvertAudioSamples(const SDL_AudioSpec *src_spec, const Uint8 *src_data, int src_len, const SDL_AudioSpec *dst_spec, Uint8 **dst_data, int *dst_len)
 | |
| {
 | |
|     if (dst_data) {
 | |
|         *dst_data = NULL;
 | |
|     }
 | |
| 
 | |
|     if (dst_len) {
 | |
|         *dst_len = 0;
 | |
|     }
 | |
| 
 | |
|     if (!src_data) {
 | |
|         return SDL_InvalidParamError("src_data");
 | |
|     } else if (src_len < 0) {
 | |
|         return SDL_InvalidParamError("src_len");
 | |
|     } else if (!dst_data) {
 | |
|         return SDL_InvalidParamError("dst_data");
 | |
|     } else if (!dst_len) {
 | |
|         return SDL_InvalidParamError("dst_len");
 | |
|     }
 | |
| 
 | |
|     bool result = false;
 | |
|     Uint8 *dst = NULL;
 | |
|     int dstlen = 0;
 | |
| 
 | |
|     SDL_AudioStream *stream = SDL_CreateAudioStream(src_spec, dst_spec);
 | |
|     if (stream) {
 | |
|         if (PutAudioStreamBuffer(stream, src_data, src_len, DontFreeThisAudioBuffer, NULL) &&
 | |
|             SDL_FlushAudioStream(stream)) {
 | |
|             dstlen = SDL_GetAudioStreamAvailable(stream);
 | |
|             if (dstlen >= 0) {
 | |
|                 dst = (Uint8 *)SDL_malloc(dstlen);
 | |
|                 if (dst) {
 | |
|                     result = (SDL_GetAudioStreamData(stream, dst, dstlen) == dstlen);
 | |
|                 }
 | |
|             }
 | |
|         }
 | |
|     }
 | |
| 
 | |
|     if (result) {
 | |
|         *dst_data = dst;
 | |
|         *dst_len = dstlen;
 | |
|     } else {
 | |
|         SDL_free(dst);
 | |
|     }
 | |
| 
 | |
|     SDL_DestroyAudioStream(stream);
 | |
|     return result;
 | |
| }
 | 
