Setup entry point + integrated moonworks stuff

This commit is contained in:
2024-07-05 14:32:58 +02:00
parent e7a4a862be
commit 8334a24fd1
116 changed files with 16988 additions and 3 deletions

View File

@ -0,0 +1,78 @@
using System;
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Contains raw audio data in a specified Format. <br/>
/// Submit this to a SourceVoice to play audio.
/// </summary>
public class AudioBuffer : AudioResource
{
IntPtr BufferDataPtr;
uint BufferDataLength;
private bool OwnsBufferData;
public Format Format { get; }
/// <summary>
/// Create a new AudioBuffer.
/// </summary>
/// <param name="ownsBufferData">If true, the buffer data will be destroyed when this AudioBuffer is destroyed.</param>
public AudioBuffer(
AudioDevice device,
Format format,
IntPtr bufferPtr,
uint bufferLengthInBytes,
bool ownsBufferData) : base(device)
{
Format = format;
BufferDataPtr = bufferPtr;
BufferDataLength = bufferLengthInBytes;
OwnsBufferData = ownsBufferData;
}
/// <summary>
/// Create another AudioBuffer from this audio buffer.
/// It will not own the buffer data.
/// </summary>
/// <param name="offset">Offset in bytes from the top of the original buffer.</param>
/// <param name="length">Length in bytes of the new buffer.</param>
/// <returns></returns>
public AudioBuffer Slice(int offset, uint length)
{
return new AudioBuffer(Device, Format, BufferDataPtr + offset, length, false);
}
/// <summary>
/// Create an FAudioBuffer struct from this AudioBuffer.
/// </summary>
/// <param name="loop">Whether we should set the FAudioBuffer to loop.</param>
public FAudio.FAudioBuffer ToFAudioBuffer(bool loop = false)
{
return new FAudio.FAudioBuffer
{
Flags = FAudio.FAUDIO_END_OF_STREAM,
pContext = IntPtr.Zero,
pAudioData = BufferDataPtr,
AudioBytes = BufferDataLength,
PlayBegin = 0,
PlayLength = 0,
LoopBegin = 0,
LoopLength = 0,
LoopCount = loop ? FAudio.FAUDIO_LOOP_INFINITE : 0
};
}
protected override unsafe void Dispose(bool disposing)
{
if (!IsDisposed)
{
if (OwnsBufferData)
{
NativeMemory.Free((void*) BufferDataPtr);
}
}
base.Dispose(disposing);
}
}

View File

@ -0,0 +1,146 @@
using System;
using System.IO;
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Streamable audio in Ogg format.
/// </summary>
public class AudioDataOgg : AudioDataStreamable
{
private IntPtr FileDataPtr = IntPtr.Zero;
private IntPtr VorbisHandle = IntPtr.Zero;
private string FilePath;
public override bool Loaded => VorbisHandle != IntPtr.Zero;
public override uint DecodeBufferSize => 32768;
public AudioDataOgg(AudioDevice device, string filePath) : base(device)
{
FilePath = filePath;
IntPtr handle = FAudio.stb_vorbis_open_filename(filePath, out int error, IntPtr.Zero);
if (error != 0)
{
throw new InvalidOperationException("Error loading file!");
}
FAudio.stb_vorbis_info info = FAudio.stb_vorbis_get_info(handle);
Format = new Format
{
Tag = FormatTag.IEEE_FLOAT,
BitsPerSample = 32,
Channels = (ushort) info.channels,
SampleRate = info.sample_rate
};
FAudio.stb_vorbis_close(handle);
}
public override unsafe void Decode(void* buffer, int bufferLengthInBytes, out int filledLengthInBytes, out bool reachedEnd)
{
int lengthInFloats = bufferLengthInBytes / sizeof(float);
/* NOTE: this function returns samples per channel, not total samples */
int samples = FAudio.stb_vorbis_get_samples_float_interleaved(
VorbisHandle,
Format.Channels,
(IntPtr) buffer,
lengthInFloats
);
int sampleCount = samples * Format.Channels;
reachedEnd = sampleCount < lengthInFloats;
filledLengthInBytes = sampleCount * sizeof(float);
}
/// <summary>
/// Prepares the Ogg data for streaming.
/// </summary>
public override unsafe void Load()
{
if (!Loaded)
{
FileStream fileStream = new FileStream(FilePath, FileMode.Open, FileAccess.Read);
FileDataPtr = (nint) NativeMemory.Alloc((nuint) fileStream.Length);
Span<byte> fileDataSpan = new Span<byte>((void*) FileDataPtr, (int) fileStream.Length);
fileStream.ReadExactly(fileDataSpan);
fileStream.Close();
VorbisHandle = FAudio.stb_vorbis_open_memory(FileDataPtr, fileDataSpan.Length, out int error, IntPtr.Zero);
if (error != 0)
{
NativeMemory.Free((void*) FileDataPtr);
Log.Error("Error opening OGG file!");
Log.Error("Error: " + error);
throw new InvalidOperationException("Error opening OGG file!");
}
}
}
public override void Seek(uint sampleFrame)
{
FAudio.stb_vorbis_seek(VorbisHandle, sampleFrame);
}
/// <summary>
/// Unloads the Ogg data, freeing resources.
/// </summary>
public override unsafe void Unload()
{
if (Loaded)
{
FAudio.stb_vorbis_close(VorbisHandle);
NativeMemory.Free((void*) FileDataPtr);
VorbisHandle = IntPtr.Zero;
FileDataPtr = IntPtr.Zero;
}
}
/// <summary>
/// Loads an entire ogg file into an AudioBuffer. Useful for static audio.
/// </summary>
public static unsafe AudioBuffer CreateBuffer(AudioDevice device, string filePath)
{
IntPtr filePointer = FAudio.stb_vorbis_open_filename(filePath, out int error, IntPtr.Zero);
if (error != 0)
{
throw new InvalidOperationException("Error loading file!");
}
FAudio.stb_vorbis_info info = FAudio.stb_vorbis_get_info(filePointer);
long lengthInFloats =
FAudio.stb_vorbis_stream_length_in_samples(filePointer) * info.channels;
long lengthInBytes = lengthInFloats * Marshal.SizeOf<float>();
void* buffer = NativeMemory.Alloc((nuint) lengthInBytes);
FAudio.stb_vorbis_get_samples_float_interleaved(
filePointer,
info.channels,
(nint) buffer,
(int) lengthInFloats
);
FAudio.stb_vorbis_close(filePointer);
Format format = new Format
{
Tag = FormatTag.IEEE_FLOAT,
BitsPerSample = 32,
Channels = (ushort) info.channels,
SampleRate = info.sample_rate
};
return new AudioBuffer(
device,
format,
(nint) buffer,
(uint) lengthInBytes,
true);
}
}

View File

@ -0,0 +1,163 @@
using System;
using System.IO;
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Streamable audio in QOA format.
/// </summary>
public class AudioDataQoa : AudioDataStreamable
{
private IntPtr QoaHandle = IntPtr.Zero;
private IntPtr FileDataPtr = IntPtr.Zero;
private string FilePath;
private const uint QOA_MAGIC = 0x716f6166; /* 'qoaf' */
public override bool Loaded => QoaHandle != IntPtr.Zero;
private uint decodeBufferSize;
public override uint DecodeBufferSize => decodeBufferSize;
public AudioDataQoa(AudioDevice device, string filePath) : base(device)
{
FilePath = filePath;
using FileStream stream = new FileStream(FilePath, FileMode.Open, FileAccess.Read);
using BinaryReader reader = new BinaryReader(stream);
UInt64 fileHeader = ReverseEndianness(reader.ReadUInt64());
if ((fileHeader >> 32) != QOA_MAGIC)
{
throw new InvalidOperationException("Specified file is not a QOA file.");
}
uint totalSamplesPerChannel = (uint) (fileHeader & (0xFFFFFFFF));
if (totalSamplesPerChannel == 0)
{
throw new InvalidOperationException("Specified file is not a valid QOA file.");
}
UInt64 frameHeader = ReverseEndianness(reader.ReadUInt64());
uint channels = (uint) ((frameHeader >> 56) & 0x0000FF);
uint samplerate = (uint) ((frameHeader >> 32) & 0xFFFFFF);
uint samplesPerChannelPerFrame = (uint) ((frameHeader >> 16) & 0x00FFFF);
Format = new Format
{
Tag = FormatTag.PCM,
BitsPerSample = 16,
Channels = (ushort) channels,
SampleRate = samplerate
};
decodeBufferSize = channels * samplesPerChannelPerFrame * sizeof(short);
}
public override unsafe void Decode(void* buffer, int bufferLengthInBytes, out int filledLengthInBytes, out bool reachedEnd)
{
int lengthInShorts = bufferLengthInBytes / sizeof(short);
// NOTE: this function returns samples per channel!
uint samples = FAudio.qoa_decode_next_frame(QoaHandle, (short*) buffer);
uint sampleCount = samples * Format.Channels;
reachedEnd = sampleCount < lengthInShorts;
filledLengthInBytes = (int) (sampleCount * sizeof(short));
}
/// <summary>
/// Prepares qoa data for streaming.
/// </summary>
public override unsafe void Load()
{
if (!Loaded)
{
FileStream fileStream = new FileStream(FilePath, FileMode.Open, FileAccess.Read);
FileDataPtr = (nint) NativeMemory.Alloc((nuint) fileStream.Length);
Span<byte> fileDataSpan = new Span<byte>((void*) FileDataPtr, (int) fileStream.Length);
fileStream.ReadExactly(fileDataSpan);
fileStream.Close();
QoaHandle = FAudio.qoa_open_from_memory((char*) FileDataPtr, (uint) fileDataSpan.Length, 0);
if (QoaHandle == IntPtr.Zero)
{
NativeMemory.Free((void*) FileDataPtr);
Log.Error("Error opening QOA file!");
throw new InvalidOperationException("Error opening QOA file!");
}
}
}
public override void Seek(uint sampleFrame)
{
FAudio.qoa_seek_frame(QoaHandle, (int) sampleFrame);
}
/// <summary>
/// Unloads the qoa data, freeing resources.
/// </summary>
public override unsafe void Unload()
{
if (Loaded)
{
FAudio.qoa_close(QoaHandle);
NativeMemory.Free((void*) FileDataPtr);
QoaHandle = IntPtr.Zero;
FileDataPtr = IntPtr.Zero;
}
}
/// <summary>
/// Loads the entire qoa file into an AudioBuffer. Useful for static audio.
/// </summary>
public unsafe static AudioBuffer CreateBuffer(AudioDevice device, string filePath)
{
using FileStream fileStream = new FileStream(filePath, FileMode.Open, FileAccess.Read);
void* fileDataPtr = NativeMemory.Alloc((nuint) fileStream.Length);
Span<byte> fileDataSpan = new Span<byte>(fileDataPtr, (int) fileStream.Length);
fileStream.ReadExactly(fileDataSpan);
fileStream.Close();
IntPtr qoaHandle = FAudio.qoa_open_from_memory((char*) fileDataPtr, (uint) fileDataSpan.Length, 0);
if (qoaHandle == 0)
{
NativeMemory.Free(fileDataPtr);
Log.Error("Error opening QOA file!");
throw new InvalidOperationException("Error opening QOA file!");
}
FAudio.qoa_attributes(qoaHandle, out uint channels, out uint samplerate, out uint samples_per_channel_per_frame, out uint total_samples_per_channel);
uint bufferLengthInBytes = total_samples_per_channel * channels * sizeof(short);
void* buffer = NativeMemory.Alloc(bufferLengthInBytes);
FAudio.qoa_decode_entire(qoaHandle, (short*) buffer);
FAudio.qoa_close(qoaHandle);
NativeMemory.Free(fileDataPtr);
Format format = new Format
{
Tag = FormatTag.PCM,
BitsPerSample = 16,
Channels = (ushort) channels,
SampleRate = samplerate
};
return new AudioBuffer(device, format, (nint) buffer, bufferLengthInBytes, true);
}
private static unsafe UInt64 ReverseEndianness(UInt64 value)
{
byte* bytes = (byte*) &value;
return
((UInt64)(bytes[0]) << 56) | ((UInt64)(bytes[1]) << 48) |
((UInt64)(bytes[2]) << 40) | ((UInt64)(bytes[3]) << 32) |
((UInt64)(bytes[4]) << 24) | ((UInt64)(bytes[5]) << 16) |
((UInt64)(bytes[6]) << 8) | ((UInt64)(bytes[7]) << 0);
}
}

View File

@ -0,0 +1,48 @@
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Use this in conjunction with a StreamingVoice to play back streaming audio data.
/// </summary>
public abstract class AudioDataStreamable : AudioResource
{
public Format Format { get; protected set; }
public abstract bool Loaded { get; }
public abstract uint DecodeBufferSize { get; }
protected AudioDataStreamable(AudioDevice device) : base(device)
{
}
/// <summary>
/// Loads the raw audio data into memory to prepare it for stream decoding.
/// </summary>
public abstract void Load();
/// <summary>
/// Unloads the raw audio data from memory.
/// </summary>
public abstract void Unload();
/// <summary>
/// Seeks to the given sample frame.
/// </summary>
public abstract void Seek(uint sampleFrame);
/// <summary>
/// Attempts to decodes data of length bufferLengthInBytes into the provided buffer.
/// </summary>
/// <param name="buffer">The buffer that decoded bytes will be placed into.</param>
/// <param name="bufferLengthInBytes">Requested length of decoded audio data.</param>
/// <param name="filledLengthInBytes">How much data was actually filled in by the decode.</param>
/// <param name="reachedEnd">Whether the end of the data was reached on this decode.</param>
public abstract unsafe void Decode(void* buffer, int bufferLengthInBytes, out int filledLengthInBytes, out bool reachedEnd);
protected override void Dispose(bool disposing)
{
if (!IsDisposed)
{
Unload();
}
base.Dispose(disposing);
}
}

View File

@ -0,0 +1,99 @@
using System;
using System.IO;
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
public static class AudioDataWav
{
/// <summary>
/// Create an AudioBuffer containing all the WAV audio data in a file.
/// </summary>
/// <returns></returns>
public unsafe static AudioBuffer CreateBuffer(AudioDevice device, string filePath)
{
// mostly borrowed from https://github.com/FNA-XNA/FNA/blob/b71b4a35ae59970ff0070dea6f8620856d8d4fec/src/Audio/SoundEffect.cs#L385
// WaveFormatEx data
ushort wFormatTag;
ushort nChannels;
uint nSamplesPerSec;
uint nAvgBytesPerSec;
ushort nBlockAlign;
ushort wBitsPerSample;
using FileStream stream = new FileStream(filePath, FileMode.Open, FileAccess.Read);
using BinaryReader reader = new BinaryReader(stream);
// RIFF Signature
string signature = new string(reader.ReadChars(4));
if (signature != "RIFF")
{
throw new NotSupportedException("Specified stream is not a wave file.");
}
reader.ReadUInt32(); // Riff Chunk Size
string wformat = new string(reader.ReadChars(4));
if (wformat != "WAVE")
{
throw new NotSupportedException("Specified stream is not a wave file.");
}
// WAVE Header
string format_signature = new string(reader.ReadChars(4));
while (format_signature != "fmt ")
{
reader.ReadBytes(reader.ReadInt32());
format_signature = new string(reader.ReadChars(4));
}
int format_chunk_size = reader.ReadInt32();
wFormatTag = reader.ReadUInt16();
nChannels = reader.ReadUInt16();
nSamplesPerSec = reader.ReadUInt32();
nAvgBytesPerSec = reader.ReadUInt32();
nBlockAlign = reader.ReadUInt16();
wBitsPerSample = reader.ReadUInt16();
// Reads residual bytes
if (format_chunk_size > 16)
{
reader.ReadBytes(format_chunk_size - 16);
}
// data Signature
string data_signature = new string(reader.ReadChars(4));
while (data_signature.ToLowerInvariant() != "data")
{
reader.ReadBytes(reader.ReadInt32());
data_signature = new string(reader.ReadChars(4));
}
if (data_signature != "data")
{
throw new NotSupportedException("Specified wave file is not supported.");
}
int waveDataLength = reader.ReadInt32();
void* waveDataBuffer = NativeMemory.Alloc((nuint) waveDataLength);
Span<byte> waveDataSpan = new Span<byte>(waveDataBuffer, waveDataLength);
stream.ReadExactly(waveDataSpan);
Format format = new Format
{
Tag = (FormatTag) wFormatTag,
BitsPerSample = wBitsPerSample,
Channels = nChannels,
SampleRate = nSamplesPerSec
};
return new AudioBuffer(
device,
format,
(nint) waveDataBuffer,
(uint) waveDataLength,
true
);
}
}

View File

@ -0,0 +1,309 @@
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// AudioDevice manages all audio-related concerns.
/// </summary>
public class AudioDevice : IDisposable
{
public IntPtr Handle { get; }
public byte[] Handle3D { get; }
public FAudio.FAudioDeviceDetails DeviceDetails { get; }
private IntPtr trueMasteringVoice;
// this is a fun little trick where we use a submix voice as a "faux" mastering voice
// this lets us maintain API consistency for effects like panning and reverb
private SubmixVoice fauxMasteringVoice;
public SubmixVoice MasteringVoice => fauxMasteringVoice;
public float CurveDistanceScalar = 1f;
public float DopplerScale = 1f;
public float SpeedOfSound = 343.5f;
private readonly HashSet<GCHandle> resourceHandles = new HashSet<GCHandle>();
private readonly HashSet<UpdatingSourceVoice> updatingSourceVoices = new HashSet<UpdatingSourceVoice>();
private SourceVoicePool VoicePool;
private List<SourceVoice> VoicesToReturn = new List<SourceVoice>();
private const int Step = 200;
private TimeSpan UpdateInterval;
private System.Diagnostics.Stopwatch TickStopwatch = new System.Diagnostics.Stopwatch();
private long previousTickTime;
private Thread Thread;
private AutoResetEvent WakeSignal;
internal readonly object StateLock = new object();
private bool Running;
public bool IsDisposed { get; private set; }
internal unsafe AudioDevice()
{
UpdateInterval = TimeSpan.FromTicks(TimeSpan.TicksPerSecond / Step);
FAudio.FAudioCreate(out IntPtr handle, 0, FAudio.FAUDIO_DEFAULT_PROCESSOR);
Handle = handle;
/* Find a suitable device */
FAudio.FAudio_GetDeviceCount(Handle, out uint devices);
if (devices == 0)
{
Log.Error("No audio devices found!");
FAudio.FAudio_Release(Handle);
Handle = IntPtr.Zero;
return;
}
FAudio.FAudioDeviceDetails deviceDetails;
uint i = 0;
for (i = 0; i < devices; i++)
{
FAudio.FAudio_GetDeviceDetails(
Handle,
i,
out deviceDetails
);
if ((deviceDetails.Role & FAudio.FAudioDeviceRole.FAudioDefaultGameDevice) == FAudio.FAudioDeviceRole.FAudioDefaultGameDevice)
{
DeviceDetails = deviceDetails;
break;
}
}
if (i == devices)
{
i = 0; /* whatever we'll just use the first one I guess */
FAudio.FAudio_GetDeviceDetails(
Handle,
i,
out deviceDetails
);
DeviceDetails = deviceDetails;
}
/* Init Mastering Voice */
uint result = FAudio.FAudio_CreateMasteringVoice(
Handle,
out trueMasteringVoice,
FAudio.FAUDIO_DEFAULT_CHANNELS,
FAudio.FAUDIO_DEFAULT_SAMPLERATE,
0,
i,
IntPtr.Zero
);
if (result != 0)
{
Log.Error("Failed to create a mastering voice!");
Log.Error("Audio device creation failed!");
return;
}
fauxMasteringVoice = SubmixVoice.CreateFauxMasteringVoice(this);
/* Init 3D Audio */
Handle3D = new byte[FAudio.F3DAUDIO_HANDLE_BYTESIZE];
FAudio.F3DAudioInitialize(
DeviceDetails.OutputFormat.dwChannelMask,
SpeedOfSound,
Handle3D
);
VoicePool = new SourceVoicePool(this);
WakeSignal = new AutoResetEvent(true);
Thread = new Thread(ThreadMain);
Thread.IsBackground = true;
Thread.Start();
Running = true;
TickStopwatch.Start();
previousTickTime = 0;
}
private void ThreadMain()
{
while (Running)
{
lock (StateLock)
{
try
{
ThreadMainTick();
}
catch (Exception e)
{
Log.Error(e.ToString());
}
}
WakeSignal.WaitOne(UpdateInterval);
}
}
private void ThreadMainTick()
{
previousTickTime = TickStopwatch.Elapsed.Ticks;
foreach (UpdatingSourceVoice voice in updatingSourceVoices)
{
voice.Update();
}
foreach (SourceVoice voice in VoicesToReturn)
{
if (voice is UpdatingSourceVoice updatingSourceVoice)
{
updatingSourceVoices.Remove(updatingSourceVoice);
}
voice.Reset();
VoicePool.Return(voice);
}
VoicesToReturn.Clear();
}
/// <summary>
/// Triggers all pending operations with the given syncGroup value.
/// </summary>
public void TriggerSyncGroup(uint syncGroup)
{
FAudio.FAudio_CommitChanges(Handle, syncGroup);
}
/// <summary>
/// Obtains an appropriate source voice from the voice pool.
/// </summary>
/// <param name="format">The format that the voice must match.</param>
/// <returns>A source voice with the given format.</returns>
public T Obtain<T>(Format format) where T : SourceVoice, IPoolable<T>
{
lock (StateLock)
{
T voice = VoicePool.Obtain<T>(format);
if (voice is UpdatingSourceVoice updatingSourceVoice)
{
updatingSourceVoices.Add(updatingSourceVoice);
}
return voice;
}
}
/// <summary>
/// Returns the source voice to the voice pool.
/// </summary>
/// <param name="voice"></param>
internal void Return(SourceVoice voice)
{
lock (StateLock)
{
VoicesToReturn.Add(voice);
}
}
internal void WakeThread()
{
WakeSignal.Set();
}
internal void AddResourceReference(GCHandle resourceReference)
{
lock (StateLock)
{
resourceHandles.Add(resourceReference);
if (resourceReference.Target is UpdatingSourceVoice updatableVoice)
{
updatingSourceVoices.Add(updatableVoice);
}
}
}
internal void RemoveResourceReference(GCHandle resourceReference)
{
lock (StateLock)
{
resourceHandles.Remove(resourceReference);
if (resourceReference.Target is UpdatingSourceVoice updatableVoice)
{
updatingSourceVoices.Remove(updatableVoice);
}
}
}
protected virtual void Dispose(bool disposing)
{
if (!IsDisposed)
{
Running = false;
if (disposing)
{
Thread.Join();
// dispose all source voices first
foreach (GCHandle handle in resourceHandles)
{
if (handle.Target is SourceVoice voice)
{
voice.Dispose();
}
}
// dispose all submix voices except the faux mastering voice
foreach (GCHandle handle in resourceHandles)
{
if (handle.Target is SubmixVoice voice && voice != fauxMasteringVoice)
{
voice.Dispose();
}
}
// dispose the faux mastering voice
fauxMasteringVoice.Dispose();
// dispose the true mastering voice
FAudio.FAudioVoice_DestroyVoice(trueMasteringVoice);
// destroy all other audio resources
foreach (GCHandle handle in resourceHandles)
{
if (handle.Target is AudioResource resource)
{
resource.Dispose();
}
}
resourceHandles.Clear();
}
FAudio.FAudio_Release(Handle);
IsDisposed = true;
}
}
~AudioDevice()
{
// Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
Dispose(disposing: false);
}
public void Dispose()
{
// Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
Dispose(disposing: true);
GC.SuppressFinalize(this);
}
}

View File

@ -0,0 +1,135 @@
using System;
using System.Numerics;
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// An emitter for 3D spatial audio.
/// </summary>
public class AudioEmitter : AudioResource
{
internal FAudio.F3DAUDIO_EMITTER emitterData;
public float DopplerScale
{
get
{
return emitterData.DopplerScaler;
}
set
{
if (value < 0.0f)
{
throw new ArgumentOutOfRangeException("AudioEmitter.DopplerScale must be greater than or equal to 0.0f");
}
emitterData.DopplerScaler = value;
}
}
public Vector3 Forward
{
get
{
return new Vector3(
emitterData.OrientFront.x,
emitterData.OrientFront.y,
-emitterData.OrientFront.z
);
}
set
{
emitterData.OrientFront.x = value.X;
emitterData.OrientFront.y = value.Y;
emitterData.OrientFront.z = -value.Z;
}
}
public Vector3 Position
{
get
{
return new Vector3(
emitterData.Position.x,
emitterData.Position.y,
-emitterData.Position.z
);
}
set
{
emitterData.Position.x = value.X;
emitterData.Position.y = value.Y;
emitterData.Position.z = -value.Z;
}
}
public Vector3 Up
{
get
{
return new Vector3(
emitterData.OrientTop.x,
emitterData.OrientTop.y,
-emitterData.OrientTop.z
);
}
set
{
emitterData.OrientTop.x = value.X;
emitterData.OrientTop.y = value.Y;
emitterData.OrientTop.z = -value.Z;
}
}
public Vector3 Velocity
{
get
{
return new Vector3(
emitterData.Velocity.x,
emitterData.Velocity.y,
-emitterData.Velocity.z
);
}
set
{
emitterData.Velocity.x = value.X;
emitterData.Velocity.y = value.Y;
emitterData.Velocity.z = -value.Z;
}
}
private static readonly float[] stereoAzimuth = new float[]
{
0.0f, 0.0f
};
private static readonly GCHandle stereoAzimuthHandle = GCHandle.Alloc(
stereoAzimuth,
GCHandleType.Pinned
);
public AudioEmitter(AudioDevice device) : base(device)
{
emitterData = new FAudio.F3DAUDIO_EMITTER();
DopplerScale = 1f;
Forward = Vector3.UnitZ;
Position = Vector3.Zero;
Up = Vector3.UnitY;
Velocity = Vector3.Zero;
/* Unexposed variables, defaults based on XNA behavior */
emitterData.pCone = IntPtr.Zero;
emitterData.ChannelCount = 1;
emitterData.ChannelRadius = 1.0f;
emitterData.pChannelAzimuths = stereoAzimuthHandle.AddrOfPinnedObject();
emitterData.pVolumeCurve = IntPtr.Zero;
emitterData.pLFECurve = IntPtr.Zero;
emitterData.pLPFDirectCurve = IntPtr.Zero;
emitterData.pLPFReverbCurve = IntPtr.Zero;
emitterData.pReverbCurve = IntPtr.Zero;
emitterData.CurveDistanceScaler = 1.0f;
}
}

View File

@ -0,0 +1,97 @@
using System;
using System.Numerics;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// A listener for 3D spatial audio. Usually attached to a camera.
/// </summary>
public class AudioListener : AudioResource
{
internal FAudio.F3DAUDIO_LISTENER listenerData;
public Vector3 Forward
{
get
{
return new Vector3(
listenerData.OrientFront.x,
listenerData.OrientFront.y,
-listenerData.OrientFront.z
);
}
set
{
listenerData.OrientFront.x = value.X;
listenerData.OrientFront.y = value.Y;
listenerData.OrientFront.z = -value.Z;
}
}
public Vector3 Position
{
get
{
return new Vector3(
listenerData.Position.x,
listenerData.Position.y,
-listenerData.Position.z
);
}
set
{
listenerData.Position.x = value.X;
listenerData.Position.y = value.Y;
listenerData.Position.z = -value.Z;
}
}
public Vector3 Up
{
get
{
return new Vector3(
listenerData.OrientTop.x,
listenerData.OrientTop.y,
-listenerData.OrientTop.z
);
}
set
{
listenerData.OrientTop.x = value.X;
listenerData.OrientTop.y = value.Y;
listenerData.OrientTop.z = -value.Z;
}
}
public Vector3 Velocity
{
get
{
return new Vector3(
listenerData.Velocity.x,
listenerData.Velocity.y,
-listenerData.Velocity.z
);
}
set
{
listenerData.Velocity.x = value.X;
listenerData.Velocity.y = value.Y;
listenerData.Velocity.z = -value.Z;
}
}
public AudioListener(AudioDevice device) : base(device)
{
listenerData = new FAudio.F3DAUDIO_LISTENER();
Forward = Vector3.UnitZ;
Position = Vector3.Zero;
Up = Vector3.UnitY;
Velocity = Vector3.Zero;
/* Unexposed variables, defaults based on XNA behavior */
listenerData.pCone = IntPtr.Zero;
}
}

View File

@ -0,0 +1,52 @@
using System;
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
public abstract class AudioResource : IDisposable
{
public AudioDevice Device { get; }
public bool IsDisposed { get; private set; }
private GCHandle SelfReference;
protected AudioResource(AudioDevice device)
{
Device = device;
SelfReference = GCHandle.Alloc(this, GCHandleType.Weak);
Device.AddResourceReference(SelfReference);
}
protected virtual void Dispose(bool disposing)
{
if (!IsDisposed)
{
if (disposing)
{
Device.RemoveResourceReference(SelfReference);
SelfReference.Free();
}
IsDisposed = true;
}
}
~AudioResource()
{
#if DEBUG
// If you see this log message, you leaked an audio resource without disposing it!
// We can't clean it up for you because this can cause catastrophic issues.
// You should really fix this when it happens.
Log.Warning($"A resource of type {GetType().Name} was not Disposed.");
#endif
}
public void Dispose()
{
// Do not change this code. Put cleanup code in 'Dispose(bool disposing)' method
Dispose(disposing: true);
GC.SuppressFinalize(this);
}
}

View File

@ -0,0 +1,9 @@
namespace Nerfed.Runtime.Audio;
public enum FilterType
{
None,
LowPass,
BandPass,
HighPass
}

View File

@ -0,0 +1,35 @@
namespace Nerfed.Runtime.Audio;
public enum FormatTag : ushort
{
Unknown = 0,
PCM = 1,
MSADPCM = 2,
IEEE_FLOAT = 3
}
/// <summary>
/// Describes the format of audio data. Usually specified in an audio file's header information.
/// </summary>
public record struct Format
{
public FormatTag Tag;
public ushort Channels;
public uint SampleRate;
public ushort BitsPerSample;
internal FAudio.FAudioWaveFormatEx ToFAudioFormat()
{
ushort blockAlign = (ushort) ((BitsPerSample / 8) * Channels);
return new FAudio.FAudioWaveFormatEx
{
wFormatTag = (ushort) Tag,
nChannels = Channels,
nSamplesPerSec = SampleRate,
wBitsPerSample = BitsPerSample,
nBlockAlign = blockAlign,
nAvgBytesPerSec = blockAlign * SampleRate
};
}
}

View File

@ -0,0 +1,6 @@
namespace Nerfed.Runtime.Audio;
public interface IPoolable<T>
{
static abstract T Create(AudioDevice device, Format format);
}

View File

@ -0,0 +1,27 @@
namespace Nerfed.Runtime.Audio;
/// <summary>
/// PersistentVoice should be used when you need to maintain a long-term reference to a source voice.
/// </summary>
public class PersistentVoice : SourceVoice, IPoolable<PersistentVoice>
{
public PersistentVoice(AudioDevice device, Format format) : base(device, format)
{
}
public static PersistentVoice Create(AudioDevice device, Format format)
{
return new PersistentVoice(device, format);
}
/// <summary>
/// Adds an AudioBuffer to the voice queue.
/// The voice processes and plays back the buffers in its queue in the order that they were submitted.
/// </summary>
/// <param name="buffer">The buffer to submit to the voice.</param>
/// <param name="loop">Whether the voice should loop this buffer.</param>
public void Submit(AudioBuffer buffer, bool loop = false)
{
Submit(buffer.ToFAudioBuffer(loop));
}
}

View File

@ -0,0 +1,82 @@
using System;
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Use this in conjunction with SourceVoice.SetReverbEffectChain to add reverb to a voice.
/// </summary>
public unsafe class ReverbEffect : SubmixVoice
{
// Defaults based on FAUDIOFX_I3DL2_PRESET_GENERIC
public static FAudio.FAudioFXReverbParameters DefaultParams = new FAudio.FAudioFXReverbParameters
{
WetDryMix = 100.0f,
ReflectionsDelay = 7,
ReverbDelay = 11,
RearDelay = FAudio.FAUDIOFX_REVERB_DEFAULT_REAR_DELAY,
PositionLeft = FAudio.FAUDIOFX_REVERB_DEFAULT_POSITION,
PositionRight = FAudio.FAUDIOFX_REVERB_DEFAULT_POSITION,
PositionMatrixLeft = FAudio.FAUDIOFX_REVERB_DEFAULT_POSITION_MATRIX,
PositionMatrixRight = FAudio.FAUDIOFX_REVERB_DEFAULT_POSITION_MATRIX,
EarlyDiffusion = 15,
LateDiffusion = 15,
LowEQGain = 8,
LowEQCutoff = 4,
HighEQGain = 8,
HighEQCutoff = 6,
RoomFilterFreq = 5000f,
RoomFilterMain = -10f,
RoomFilterHF = -1f,
ReflectionsGain = -26.0200005f,
ReverbGain = 10.0f,
DecayTime = 1.49000001f,
Density = 100.0f,
RoomSize = FAudio.FAUDIOFX_REVERB_DEFAULT_ROOM_SIZE
};
public FAudio.FAudioFXReverbParameters Params { get; private set; }
public ReverbEffect(AudioDevice audioDevice, uint processingStage) : base(audioDevice, 1, audioDevice.DeviceDetails.OutputFormat.Format.nSamplesPerSec, processingStage)
{
/* Init reverb */
IntPtr reverb;
FAudio.FAudioCreateReverb(out reverb, 0);
FAudio.FAudioEffectChain chain = new FAudio.FAudioEffectChain();
FAudio.FAudioEffectDescriptor descriptor = new FAudio.FAudioEffectDescriptor
{
InitialState = 1,
OutputChannels = 1,
pEffect = reverb
};
chain.EffectCount = 1;
chain.pEffectDescriptors = (nint) (&descriptor);
FAudio.FAudioVoice_SetEffectChain(
Handle,
ref chain
);
FAudio.FAPOBase_Release(reverb);
SetParams(DefaultParams);
}
public void SetParams(in FAudio.FAudioFXReverbParameters reverbParams)
{
Params = reverbParams;
fixed (FAudio.FAudioFXReverbParameters* reverbParamsPtr = &reverbParams)
{
FAudio.FAudioVoice_SetEffectParameters(
Handle,
0,
(nint) reverbParamsPtr,
(uint) Marshal.SizeOf<FAudio.FAudioFXReverbParameters>(),
0
);
}
}
}

View File

@ -0,0 +1,63 @@
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Plays back a series of AudioBuffers in sequence. Set the OnSoundNeeded callback to add AudioBuffers dynamically.
/// </summary>
public class SoundSequence : UpdatingSourceVoice, IPoolable<SoundSequence>
{
public int NeedSoundThreshold = 0;
public delegate void OnSoundNeededFunc();
public OnSoundNeededFunc OnSoundNeeded;
public SoundSequence(AudioDevice device, Format format) : base(device, format)
{
}
public SoundSequence(AudioDevice device, AudioBuffer templateSound) : base(device, templateSound.Format)
{
}
public static SoundSequence Create(AudioDevice device, Format format)
{
return new SoundSequence(device, format);
}
public override void Update()
{
lock (StateLock)
{
if (State != SoundState.Playing) { return; }
if (NeedSoundThreshold > 0)
{
int buffersNeeded = NeedSoundThreshold - (int) BuffersQueued;
for (int i = 0; i < buffersNeeded; i += 1)
{
if (OnSoundNeeded != null)
{
OnSoundNeeded();
}
}
}
}
}
public void EnqueueSound(AudioBuffer buffer)
{
#if DEBUG
if (!(buffer.Format == Format))
{
Log.Warning("Sound sequence audio format mismatch!");
return;
}
#endif
lock (StateLock)
{
Submit(buffer.ToFAudioBuffer());
}
}
}

View File

@ -0,0 +1,8 @@
namespace Nerfed.Runtime.Audio;
public enum SoundState
{
Playing,
Paused,
Stopped
}

View File

@ -0,0 +1,217 @@
using System;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Emits audio from submitted audio buffers.
/// </summary>
public abstract class SourceVoice : Voice
{
private Format format;
public Format Format => format;
protected bool PlaybackInitiated;
/// <summary>
/// The number of buffers queued in the voice.
/// This includes the currently playing voice!
/// </summary>
public uint BuffersQueued
{
get
{
FAudio.FAudioSourceVoice_GetState(
Handle,
out FAudio.FAudioVoiceState state,
FAudio.FAUDIO_VOICE_NOSAMPLESPLAYED
);
return state.BuffersQueued;
}
}
private SoundState state = SoundState.Stopped;
public SoundState State
{
get
{
if (BuffersQueued == 0)
{
Stop();
}
return state;
}
internal set
{
state = value;
}
}
protected object StateLock = new object();
public SourceVoice(
AudioDevice device,
Format format
) : base(device, format.Channels, device.DeviceDetails.OutputFormat.Format.nChannels)
{
this.format = format;
FAudio.FAudioWaveFormatEx fAudioFormat = format.ToFAudioFormat();
FAudio.FAudio_CreateSourceVoice(
device.Handle,
out handle,
ref fAudioFormat,
FAudio.FAUDIO_VOICE_USEFILTER,
FAudio.FAUDIO_DEFAULT_FREQ_RATIO,
IntPtr.Zero,
IntPtr.Zero, // default sends to mastering voice!
IntPtr.Zero
);
SetOutputVoice(device.MasteringVoice);
}
/// <summary>
/// Starts consumption and processing of audio by the voice.
/// Delivers the result to any connected submix or mastering voice.
/// </summary>
/// <param name="syncGroup">Optional. Denotes that the operation will be pending until AudioDevice.TriggerSyncGroup is called.</param>
public void Play(uint syncGroup = FAudio.FAUDIO_COMMIT_NOW)
{
lock (StateLock)
{
FAudio.FAudioSourceVoice_Start(Handle, 0, syncGroup);
State = SoundState.Playing;
}
}
/// <summary>
/// Pauses playback.
/// All source buffers that are queued on the voice and the current cursor position are preserved.
/// </summary>
/// <param name="syncGroup">Optional. Denotes that the operation will be pending until AudioDevice.TriggerSyncGroup is called.</param>
public void Pause(uint syncGroup = FAudio.FAUDIO_COMMIT_NOW)
{
lock (StateLock)
{
FAudio.FAudioSourceVoice_Stop(Handle, 0, syncGroup);
State = SoundState.Paused;
}
}
/// <summary>
/// Stops looping the voice when it reaches the end of the current loop region.
/// If the cursor for the voice is not in a loop region, ExitLoop does nothing.
/// </summary>
/// <param name="syncGroup">Optional. Denotes that the operation will be pending until AudioDevice.TriggerSyncGroup is called.</param>
public void ExitLoop(uint syncGroup = FAudio.FAUDIO_COMMIT_NOW)
{
lock (StateLock)
{
FAudio.FAudioSourceVoice_ExitLoop(Handle, syncGroup);
}
}
/// <summary>
/// Stops playback and removes all pending audio buffers from the voice queue.
/// </summary>
/// <param name="syncGroup">Optional. Denotes that the operation will be pending until AudioDevice.TriggerSyncGroup is called.</param>
public void Stop(uint syncGroup = FAudio.FAUDIO_COMMIT_NOW)
{
lock (StateLock)
{
FAudio.FAudioSourceVoice_Stop(Handle, 0, syncGroup);
FAudio.FAudioSourceVoice_FlushSourceBuffers(Handle);
State = SoundState.Stopped;
}
}
/// <summary>
/// Adds an AudioBuffer to the voice queue.
/// The voice processes and plays back the buffers in its queue in the order that they were submitted.
/// </summary>
/// <param name="buffer">The buffer to submit to the voice.</param>
public void Submit(AudioBuffer buffer)
{
Submit(buffer.ToFAudioBuffer());
}
/// <summary>
/// Calculates positional sound. This must be called continuously to update positional sound.
/// </summary>
/// <param name="listener"></param>
/// <param name="emitter"></param>
public unsafe void Apply3D(AudioListener listener, AudioEmitter emitter)
{
Is3D = true;
emitter.emitterData.CurveDistanceScaler = Device.CurveDistanceScalar;
emitter.emitterData.ChannelCount = SourceChannelCount;
FAudio.F3DAUDIO_DSP_SETTINGS dspSettings = new FAudio.F3DAUDIO_DSP_SETTINGS
{
DopplerFactor = DopplerFactor,
SrcChannelCount = SourceChannelCount,
DstChannelCount = DestinationChannelCount,
pMatrixCoefficients = (nint) pMatrixCoefficients
};
FAudio.F3DAudioCalculate(
Device.Handle3D,
ref listener.listenerData,
ref emitter.emitterData,
FAudio.F3DAUDIO_CALCULATE_MATRIX | FAudio.F3DAUDIO_CALCULATE_DOPPLER,
ref dspSettings
);
UpdatePitch();
FAudio.FAudioVoice_SetOutputMatrix(
Handle,
OutputVoice.Handle,
SourceChannelCount,
DestinationChannelCount,
(nint) pMatrixCoefficients,
0
);
}
/// <summary>
/// Specifies that this source voice can be returned to the voice pool.
/// Holding on to the reference after calling this will cause problems!
/// </summary>
public void Return()
{
Stop();
Device.Return(this);
}
/// <summary>
/// Adds an FAudio buffer to the voice queue.
/// The voice processes and plays back the buffers in its queue in the order that they were submitted.
/// </summary>
/// <param name="buffer">The buffer to submit to the voice.</param>
protected void Submit(FAudio.FAudioBuffer buffer)
{
lock (StateLock)
{
FAudio.FAudioSourceVoice_SubmitSourceBuffer(
Handle,
ref buffer,
IntPtr.Zero
);
}
}
public override void Reset()
{
Stop();
PlaybackInitiated = false;
base.Reset();
}
}

View File

@ -0,0 +1,38 @@
using System.Collections.Generic;
namespace Nerfed.Runtime.Audio;
internal class SourceVoicePool
{
private AudioDevice Device;
Dictionary<(System.Type, Format), Queue<SourceVoice>> VoiceLists = new Dictionary<(System.Type, Format), Queue<SourceVoice>>();
public SourceVoicePool(AudioDevice device)
{
Device = device;
}
public T Obtain<T>(Format format) where T : SourceVoice, IPoolable<T>
{
if (!VoiceLists.ContainsKey((typeof(T), format)))
{
VoiceLists.Add((typeof(T), format), new Queue<SourceVoice>());
}
Queue<SourceVoice> list = VoiceLists[(typeof(T), format)];
if (list.Count == 0)
{
list.Enqueue(T.Create(Device, format));
}
return (T) list.Dequeue();
}
public void Return(SourceVoice voice)
{
Queue<SourceVoice> list = VoiceLists[(voice.GetType(), voice.Format)];
list.Enqueue(voice);
}
}

View File

@ -0,0 +1,168 @@
using System;
using System.Runtime.InteropServices;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Use in conjunction with an AudioDataStreamable object to play back streaming audio data.
/// </summary>
public class StreamingVoice : UpdatingSourceVoice, IPoolable<StreamingVoice>
{
private const int BUFFER_COUNT = 3;
private readonly IntPtr[] buffers;
private int nextBufferIndex = 0;
private uint BufferSize;
public bool Loop { get; set; }
public AudioDataStreamable AudioData { get; protected set; }
public unsafe StreamingVoice(AudioDevice device, Format format) : base(device, format)
{
buffers = new IntPtr[BUFFER_COUNT];
}
public static StreamingVoice Create(AudioDevice device, Format format)
{
return new StreamingVoice(device, format);
}
/// <summary>
/// Loads and prepares an AudioDataStreamable for streaming playback.
/// This automatically calls Load on the given AudioDataStreamable.
/// </summary>
public void Load(AudioDataStreamable data)
{
lock (StateLock)
{
if (AudioData != null)
{
AudioData.Unload();
}
data.Load();
AudioData = data;
InitializeBuffers();
QueueBuffers();
}
}
/// <summary>
/// Unloads AudioDataStreamable from this voice.
/// This automatically calls Unload on the given AudioDataStreamable.
/// </summary>
public void Unload()
{
lock (StateLock)
{
if (AudioData != null)
{
Stop();
AudioData.Unload();
AudioData = null;
}
}
}
public override void Reset()
{
Unload();
base.Reset();
}
public override void Update()
{
lock (StateLock)
{
if (AudioData == null || State != SoundState.Playing)
{
return;
}
QueueBuffers();
}
}
private void QueueBuffers()
{
int buffersNeeded = BUFFER_COUNT - (int) BuffersQueued; // don't get got by uint underflow!
for (int i = 0; i < buffersNeeded; i += 1)
{
AddBuffer();
}
}
private unsafe void AddBuffer()
{
IntPtr buffer = buffers[nextBufferIndex];
nextBufferIndex = (nextBufferIndex + 1) % BUFFER_COUNT;
AudioData.Decode(
(void*) buffer,
(int) BufferSize,
out int filledLengthInBytes,
out bool reachedEnd
);
if (filledLengthInBytes > 0)
{
FAudio.FAudioBuffer buf = new FAudio.FAudioBuffer
{
AudioBytes = (uint) filledLengthInBytes,
pAudioData = buffer,
PlayLength = (
(uint) (filledLengthInBytes /
Format.Channels /
(uint) (Format.BitsPerSample / 8))
)
};
Submit(buf);
}
if (reachedEnd)
{
/* We have reached the end of the data, what do we do? */
if (Loop)
{
AudioData.Seek(0);
}
}
}
private unsafe void InitializeBuffers()
{
BufferSize = AudioData.DecodeBufferSize;
for (int i = 0; i < BUFFER_COUNT; i += 1)
{
if (buffers[i] != IntPtr.Zero)
{
NativeMemory.Free((void*) buffers[i]);
}
buffers[i] = (IntPtr) NativeMemory.Alloc(BufferSize);
}
}
protected override unsafe void Dispose(bool disposing)
{
if (!IsDisposed)
{
lock (StateLock)
{
Stop();
for (int i = 0; i < BUFFER_COUNT; i += 1)
{
if (buffers[i] != IntPtr.Zero)
{
NativeMemory.Free((void*) buffers[i]);
}
}
}
}
base.Dispose(disposing);
}
}

View File

@ -0,0 +1,55 @@
using System;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// SourceVoices can send audio to a SubmixVoice for convenient effects processing.
/// Submixes process in order of processingStage, from lowest to highest.
/// Therefore submixes early in a chain should have a low processingStage, and later in the chain they should have a higher one.
/// </summary>
public class SubmixVoice : Voice
{
public SubmixVoice(
AudioDevice device,
uint sourceChannelCount,
uint sampleRate,
uint processingStage
) : base(device, sourceChannelCount, device.DeviceDetails.OutputFormat.Format.nChannels)
{
FAudio.FAudio_CreateSubmixVoice(
device.Handle,
out handle,
sourceChannelCount,
sampleRate,
FAudio.FAUDIO_VOICE_USEFILTER,
processingStage,
IntPtr.Zero,
IntPtr.Zero
);
SetOutputVoice(device.MasteringVoice);
}
private SubmixVoice(
AudioDevice device
) : base(device, device.DeviceDetails.OutputFormat.Format.nChannels, device.DeviceDetails.OutputFormat.Format.nChannels)
{
FAudio.FAudio_CreateSubmixVoice(
device.Handle,
out handle,
device.DeviceDetails.OutputFormat.Format.nChannels,
device.DeviceDetails.OutputFormat.Format.nSamplesPerSec,
FAudio.FAUDIO_VOICE_USEFILTER,
int.MaxValue,
IntPtr.Zero, // default sends to mastering voice
IntPtr.Zero
);
OutputVoice = null;
}
internal static SubmixVoice CreateFauxMasteringVoice(AudioDevice device)
{
return new SubmixVoice(device);
}
}

View File

@ -0,0 +1,28 @@
namespace Nerfed.Runtime.Audio;
/// <summary>
/// TransientVoice is intended for playing one-off sound effects that don't have a long term reference. <br/>
/// It will be automatically returned to the AudioDevice SourceVoice pool once it is done playing back.
/// </summary>
public class TransientVoice : UpdatingSourceVoice, IPoolable<TransientVoice>
{
static TransientVoice IPoolable<TransientVoice>.Create(AudioDevice device, Format format)
{
return new TransientVoice(device, format);
}
public TransientVoice(AudioDevice device, Format format) : base(device, format)
{
}
public override void Update()
{
lock (StateLock)
{
if (PlaybackInitiated && BuffersQueued == 0)
{
Return();
}
}
}
}

View File

@ -0,0 +1,10 @@
namespace Nerfed.Runtime.Audio;
public abstract class UpdatingSourceVoice : SourceVoice
{
protected UpdatingSourceVoice(AudioDevice device, Format format) : base(device, format)
{
}
public abstract void Update();
}

View File

@ -0,0 +1,433 @@
using System;
using System.Runtime.InteropServices;
using EasingFunction = System.Func<float, float>;
namespace Nerfed.Runtime.Audio;
/// <summary>
/// Handles audio playback from audio buffer data. Can be configured with a variety of parameters.
/// </summary>
public abstract unsafe class Voice : AudioResource
{
protected IntPtr handle;
public IntPtr Handle => handle;
public uint SourceChannelCount { get; }
public uint DestinationChannelCount { get; }
protected SubmixVoice OutputVoice;
private ReverbEffect ReverbEffect;
protected byte* pMatrixCoefficients;
public bool Is3D { get; protected set; }
private float dopplerFactor;
/// <summary>
/// The strength of the doppler effect on this voice.
/// </summary>
public float DopplerFactor
{
get => dopplerFactor;
set
{
if (dopplerFactor != value)
{
dopplerFactor = value;
UpdatePitch();
}
}
}
private float volume = 1;
/// <summary>
/// The overall volume level for the voice.
/// </summary>
public float Volume
{
get => volume;
internal set
{
value = MathF.Max(0f, value);
if (volume != value)
{
volume = value;
FAudio.FAudioVoice_SetVolume(Handle, volume, 0);
}
}
}
private float pitch = 0;
/// <summary>
/// The pitch of the voice.
/// </summary>
public float Pitch
{
get => pitch;
internal set
{
value = Math.Clamp(value, -1f, 1f);
if (pitch != value)
{
pitch = value;
UpdatePitch();
}
}
}
private const float MAX_FILTER_FREQUENCY = 1f;
private const float MAX_FILTER_ONEOVERQ = 1.5f;
private FAudio.FAudioFilterParameters filterParameters = new FAudio.FAudioFilterParameters
{
Type = FAudio.FAudioFilterType.FAudioLowPassFilter,
Frequency = 1f,
OneOverQ = 1f
};
/// <summary>
/// The frequency cutoff on the voice filter.
/// </summary>
public float FilterFrequency
{
get => filterParameters.Frequency;
internal set
{
value = System.Math.Clamp(value, 0.01f, MAX_FILTER_FREQUENCY);
if (filterParameters.Frequency != value)
{
filterParameters.Frequency = value;
FAudio.FAudioVoice_SetFilterParameters(
Handle,
ref filterParameters,
0
);
}
}
}
/// <summary>
/// Reciprocal of Q factor.
/// Controls how quickly frequencies beyond the filter frequency are dampened.
/// </summary>
public float FilterOneOverQ
{
get => filterParameters.OneOverQ;
internal set
{
value = System.Math.Clamp(value, 0.01f, MAX_FILTER_ONEOVERQ);
if (filterParameters.OneOverQ != value)
{
filterParameters.OneOverQ = value;
FAudio.FAudioVoice_SetFilterParameters(
Handle,
ref filterParameters,
0
);
}
}
}
private FilterType filterType;
/// <summary>
/// The frequency filter that is applied to the voice.
/// </summary>
public FilterType FilterType
{
get => filterType;
set
{
if (filterType != value)
{
filterType = value;
switch (filterType)
{
case FilterType.None:
filterParameters = new FAudio.FAudioFilterParameters
{
Type = FAudio.FAudioFilterType.FAudioLowPassFilter,
Frequency = 1f,
OneOverQ = 1f
};
break;
case FilterType.LowPass:
filterParameters.Type = FAudio.FAudioFilterType.FAudioLowPassFilter;
filterParameters.Frequency = 1f;
break;
case FilterType.BandPass:
filterParameters.Type = FAudio.FAudioFilterType.FAudioBandPassFilter;
break;
case FilterType.HighPass:
filterParameters.Type = FAudio.FAudioFilterType.FAudioHighPassFilter;
filterParameters.Frequency = 0f;
break;
}
FAudio.FAudioVoice_SetFilterParameters(
Handle,
ref filterParameters,
0
);
}
}
}
protected float pan = 0;
/// <summary>
/// Left-right panning. -1 is hard left pan, 1 is hard right pan.
/// </summary>
public float Pan
{
get => pan;
internal set
{
value = Math.Clamp(value, -1f, 1f);
if (pan != value)
{
pan = value;
if (pan < -1f)
{
pan = -1f;
}
if (pan > 1f)
{
pan = 1f;
}
if (Is3D) { return; }
SetPanMatrixCoefficients();
FAudio.FAudioVoice_SetOutputMatrix(
Handle,
OutputVoice.Handle,
SourceChannelCount,
DestinationChannelCount,
(nint) pMatrixCoefficients,
0
);
}
}
}
private float reverb;
/// <summary>
/// The wet-dry mix of the reverb effect.
/// Has no effect if SetReverbEffectChain has not been called.
/// </summary>
public unsafe float Reverb
{
get => reverb;
internal set
{
if (ReverbEffect != null)
{
value = MathF.Max(0, value);
if (reverb != value)
{
reverb = value;
float* outputMatrix = (float*) pMatrixCoefficients;
outputMatrix[0] = reverb;
if (SourceChannelCount == 2)
{
outputMatrix[1] = reverb;
}
FAudio.FAudioVoice_SetOutputMatrix(
Handle,
ReverbEffect.Handle,
SourceChannelCount,
1,
(nint) pMatrixCoefficients,
0
);
}
}
#if DEBUG
if (ReverbEffect == null)
{
Log.Warning("Tried to set reverb value before applying a reverb effect");
}
#endif
}
}
public Voice(AudioDevice device, uint sourceChannelCount, uint destinationChannelCount) : base(device)
{
SourceChannelCount = sourceChannelCount;
DestinationChannelCount = destinationChannelCount;
nuint memsize = 4 * sourceChannelCount * destinationChannelCount;
pMatrixCoefficients = (byte*) NativeMemory.AllocZeroed(memsize);
SetPanMatrixCoefficients();
}
/// <summary>
/// Sets the output voice for this voice.
/// </summary>
/// <param name="send">Where the output should be sent.</param>
public unsafe void SetOutputVoice(SubmixVoice send)
{
OutputVoice = send;
if (ReverbEffect != null)
{
SetReverbEffectChain(ReverbEffect);
}
else
{
FAudio.FAudioSendDescriptor* sendDesc = stackalloc FAudio.FAudioSendDescriptor[1];
sendDesc[0].Flags = 0;
sendDesc[0].pOutputVoice = send.Handle;
FAudio.FAudioVoiceSends sends = new FAudio.FAudioVoiceSends();
sends.SendCount = 1;
sends.pSends = (nint) sendDesc;
FAudio.FAudioVoice_SetOutputVoices(
Handle,
ref sends
);
}
}
/// <summary>
/// Applies a reverb effect chain to this voice.
/// </summary>
public unsafe void SetReverbEffectChain(ReverbEffect reverbEffect)
{
FAudio.FAudioSendDescriptor* sendDesc = stackalloc FAudio.FAudioSendDescriptor[2];
sendDesc[0].Flags = 0;
sendDesc[0].pOutputVoice = OutputVoice.Handle;
sendDesc[1].Flags = 0;
sendDesc[1].pOutputVoice = reverbEffect.Handle;
FAudio.FAudioVoiceSends sends = new FAudio.FAudioVoiceSends();
sends.SendCount = 2;
sends.pSends = (nint) sendDesc;
FAudio.FAudioVoice_SetOutputVoices(
Handle,
ref sends
);
ReverbEffect = reverbEffect;
}
/// <summary>
/// Removes the reverb effect chain from this voice.
/// </summary>
public void RemoveReverbEffectChain()
{
if (ReverbEffect != null)
{
ReverbEffect = null;
reverb = 0;
SetOutputVoice(OutputVoice);
}
}
/// <summary>
/// Resets all voice parameters to defaults.
/// </summary>
public virtual void Reset()
{
RemoveReverbEffectChain();
Volume = 1;
Pan = 0;
Pitch = 0;
FilterType = FilterType.None;
SetOutputVoice(Device.MasteringVoice);
}
// Taken from https://github.com/FNA-XNA/FNA/blob/master/src/Audio/SoundEffectInstance.cs
private unsafe void SetPanMatrixCoefficients()
{
/* Two major things to notice:
* 1. The spec assumes any speaker count >= 2 has Front Left/Right.
* 2. Stereo panning is WAY more complicated than you think.
* The main thing is that hard panning does NOT eliminate an
* entire channel; the two channels are blended on each side.
* -flibit
*/
float* outputMatrix = (float*) pMatrixCoefficients;
if (SourceChannelCount == 1)
{
if (DestinationChannelCount == 1)
{
outputMatrix[0] = 1.0f;
}
else
{
outputMatrix[0] = (pan > 0.0f) ? (1.0f - pan) : 1.0f;
outputMatrix[1] = (pan < 0.0f) ? (1.0f + pan) : 1.0f;
}
}
else
{
if (DestinationChannelCount == 1)
{
outputMatrix[0] = 1.0f;
outputMatrix[1] = 1.0f;
}
else
{
if (pan <= 0.0f)
{
// Left speaker blends left/right channels
outputMatrix[0] = 0.5f * pan + 1.0f;
outputMatrix[1] = 0.5f * -pan;
// Right speaker gets less of the right channel
outputMatrix[2] = 0.0f;
outputMatrix[3] = pan + 1.0f;
}
else
{
// Left speaker gets less of the left channel
outputMatrix[0] = -pan + 1.0f;
outputMatrix[1] = 0.0f;
// Right speaker blends right/left channels
outputMatrix[2] = 0.5f * pan;
outputMatrix[3] = 0.5f * -pan + 1.0f;
}
}
}
}
protected void UpdatePitch()
{
float doppler;
float dopplerScale = Device.DopplerScale;
if (!Is3D || dopplerScale == 0.0f)
{
doppler = 1.0f;
}
else
{
doppler = DopplerFactor * dopplerScale;
}
FAudio.FAudioSourceVoice_SetFrequencyRatio(
Handle,
(float) System.Math.Pow(2.0, pitch) * doppler,
0
);
}
protected override unsafe void Dispose(bool disposing)
{
if (!IsDisposed)
{
NativeMemory.Free(pMatrixCoefficients);
FAudio.FAudioVoice_DestroyVoice(Handle);
}
base.Dispose(disposing);
}
}