-
-
Save darktable/2317063 to your computer and use it in GitHub Desktop.
// Copyright (c) 2012 Calvin Rien | |
// http://the.darktable.com | |
// | |
// This software is provided 'as-is', without any express or implied warranty. In | |
// no event will the authors be held liable for any damages arising from the use | |
// of this software. | |
// | |
// Permission is granted to anyone to use this software for any purpose, | |
// including commercial applications, and to alter it and redistribute it freely, | |
// subject to the following restrictions: | |
// | |
// 1. The origin of this software must not be misrepresented; you must not claim | |
// that you wrote the original software. If you use this software in a product, | |
// an acknowledgment in the product documentation would be appreciated but is not | |
// required. | |
// | |
// 2. Altered source versions must be plainly marked as such, and must not be | |
// misrepresented as being the original software. | |
// | |
// 3. This notice may not be removed or altered from any source distribution. | |
// | |
// ============================================================================= | |
// | |
// derived from Gregorio Zanon's script | |
// http://forum.unity3d.com/threads/119295-Writing-AudioListener.GetOutputData-to-wav-problem?p=806734&viewfull=1#post806734 | |
using System; | |
using System.IO; | |
using UnityEngine; | |
using System.Collections.Generic; | |
public static class SavWav { | |
const int HEADER_SIZE = 44; | |
public static bool Save(string filename, AudioClip clip) { | |
if (!filename.ToLower().EndsWith(".wav")) { | |
filename += ".wav"; | |
} | |
var filepath = Path.Combine(Application.persistentDataPath, filename); | |
Debug.Log(filepath); | |
// Make sure directory exists if user is saving to sub dir. | |
Directory.CreateDirectory(Path.GetDirectoryName(filepath)); | |
using (var fileStream = CreateEmpty(filepath)) { | |
ConvertAndWrite(fileStream, clip); | |
WriteHeader(fileStream, clip); | |
} | |
return true; // TODO: return false if there's a failure saving the file | |
} | |
public static AudioClip TrimSilence(AudioClip clip, float min) { | |
var samples = new float[clip.samples]; | |
clip.GetData(samples, 0); | |
return TrimSilence(new List<float>(samples), min, clip.channels, clip.frequency); | |
} | |
public static AudioClip TrimSilence(List<float> samples, float min, int channels, int hz) { | |
return TrimSilence(samples, min, channels, hz, false, false); | |
} | |
public static AudioClip TrimSilence(List<float> samples, float min, int channels, int hz, bool _3D, bool stream) { | |
int i; | |
for (i=0; i<samples.Count; i++) { | |
if (Mathf.Abs(samples[i]) > min) { | |
break; | |
} | |
} | |
samples.RemoveRange(0, i); | |
for (i=samples.Count - 1; i>0; i--) { | |
if (Mathf.Abs(samples[i]) > min) { | |
break; | |
} | |
} | |
samples.RemoveRange(i, samples.Count - i); | |
var clip = AudioClip.Create("TempClip", samples.Count, channels, hz, _3D, stream); | |
clip.SetData(samples.ToArray(), 0); | |
return clip; | |
} | |
static FileStream CreateEmpty(string filepath) { | |
var fileStream = new FileStream(filepath, FileMode.Create); | |
byte emptyByte = new byte(); | |
for(int i = 0; i < HEADER_SIZE; i++) //preparing the header | |
{ | |
fileStream.WriteByte(emptyByte); | |
} | |
return fileStream; | |
} | |
static void ConvertAndWrite(FileStream fileStream, AudioClip clip) { | |
var samples = new float[clip.samples]; | |
clip.GetData(samples, 0); | |
Int16[] intData = new Int16[samples.Length]; | |
//converting in 2 float[] steps to Int16[], //then Int16[] to Byte[] | |
Byte[] bytesData = new Byte[samples.Length * 2]; | |
//bytesData array is twice the size of | |
//dataSource array because a float converted in Int16 is 2 bytes. | |
int rescaleFactor = 32767; //to convert float to Int16 | |
for (int i = 0; i<samples.Length; i++) { | |
intData[i] = (short) (samples[i] * rescaleFactor); | |
Byte[] byteArr = new Byte[2]; | |
byteArr = BitConverter.GetBytes(intData[i]); | |
byteArr.CopyTo(bytesData, i * 2); | |
} | |
fileStream.Write(bytesData, 0, bytesData.Length); | |
} | |
static void WriteHeader(FileStream fileStream, AudioClip clip) { | |
var hz = clip.frequency; | |
var channels = clip.channels; | |
var samples = clip.samples; | |
fileStream.Seek(0, SeekOrigin.Begin); | |
Byte[] riff = System.Text.Encoding.UTF8.GetBytes("RIFF"); | |
fileStream.Write(riff, 0, 4); | |
Byte[] chunkSize = BitConverter.GetBytes(fileStream.Length - 8); | |
fileStream.Write(chunkSize, 0, 4); | |
Byte[] wave = System.Text.Encoding.UTF8.GetBytes("WAVE"); | |
fileStream.Write(wave, 0, 4); | |
Byte[] fmt = System.Text.Encoding.UTF8.GetBytes("fmt "); | |
fileStream.Write(fmt, 0, 4); | |
Byte[] subChunk1 = BitConverter.GetBytes(16); | |
fileStream.Write(subChunk1, 0, 4); | |
UInt16 two = 2; | |
UInt16 one = 1; | |
Byte[] audioFormat = BitConverter.GetBytes(one); | |
fileStream.Write(audioFormat, 0, 2); | |
Byte[] numChannels = BitConverter.GetBytes(channels); | |
fileStream.Write(numChannels, 0, 2); | |
Byte[] sampleRate = BitConverter.GetBytes(hz); | |
fileStream.Write(sampleRate, 0, 4); | |
Byte[] byteRate = BitConverter.GetBytes(hz * channels * 2); // sampleRate * bytesPerSample*number of channels, here 44100*2*2 | |
fileStream.Write(byteRate, 0, 4); | |
UInt16 blockAlign = (ushort) (channels * 2); | |
fileStream.Write(BitConverter.GetBytes(blockAlign), 0, 2); | |
UInt16 bps = 16; | |
Byte[] bitsPerSample = BitConverter.GetBytes(bps); | |
fileStream.Write(bitsPerSample, 0, 2); | |
Byte[] datastring = System.Text.Encoding.UTF8.GetBytes("data"); | |
fileStream.Write(datastring, 0, 4); | |
Byte[] subChunk2 = BitConverter.GetBytes(samples * channels * 2); | |
fileStream.Write(subChunk2, 0, 4); | |
// fileStream.Close(); | |
} | |
} |
I have used this script to save audio wav file. The above code records a wav file and saves but when we try to play it back the audio quality and modulation of recorded voice is changed but it plays the recorded one.
I need to convert this wav file to base 64 and pass it it to ispeech api but it says as bad audio data. so we contacted the support team of ispeech they said us to set sample rate = 16khz bit rate = 16 bit channel = mono
How can I set these values while writing a wav.
Why in function WriteHeader have comment out:
// fileStream.Close();
can it make memory leak, can you explain for me, thanks you
Is there a nice way to reduce file size?
this code does the job in seconds!
using System;
using System.IO;
using UnityEngine;
using System.Collections.Generic;
using System.Threading;
public class SavWav {
const int HEADER_SIZE = 44;
struct ClipData{
public int samples;
public int channels;
public float[] samplesData;
}
public bool Save(string filename, AudioClip clip) {
if (!filename.ToLower().EndsWith(".wav")) {
filename += ".wav";
}
var filepath = filename;
Debug.Log(filepath);
// Make sure directory exists if user is saving to sub dir.
Directory.CreateDirectory(Path.GetDirectoryName(filepath));
ClipData clipdata = new ClipData ();
clipdata.samples = clip.samples;
clipdata.channels = clip.channels;
float[] dataFloat = new float[clip.samples*clip.channels];
clip.GetData (dataFloat, 0);
clipdata.samplesData = dataFloat;
using (var fileStream = CreateEmpty(filepath)) {
MemoryStream memstrm = new MemoryStream ();
ConvertAndWrite(memstrm, clipdata);
memstrm.WriteTo (fileStream);
WriteHeader(fileStream, clip);
}
return true; // TODO: return false if there's a failure saving the file
}
public AudioClip TrimSilence(AudioClip clip, float min) {
var samples = new float[clip.samples];
clip.GetData(samples, 0);
return TrimSilence(new List<float>(samples), min, clip.channels, clip.frequency);
}
public AudioClip TrimSilence(List<float> samples, float min, int channels, int hz) {
return TrimSilence(samples, min, channels, hz, false, false);
}
public AudioClip TrimSilence(List<float> samples, float min, int channels, int hz, bool _3D, bool stream) {
int i;
for (i=0; i<samples.Count; i++) {
if (Mathf.Abs(samples[i]) > min) {
break;
}
}
samples.RemoveRange(0, i);
for (i=samples.Count - 1; i>0; i--) {
if (Mathf.Abs(samples[i]) > min) {
break;
}
}
samples.RemoveRange(i, samples.Count - i);
var clip = AudioClip.Create("TempClip", samples.Count, channels, hz, _3D, stream);
clip.SetData(samples.ToArray(), 0);
return clip;
}
FileStream CreateEmpty(string filepath) {
var fileStream = new FileStream(filepath, FileMode.Create);
byte emptyByte = new byte();
for(int i = 0; i < HEADER_SIZE; i++) //preparing the header
{
fileStream.WriteByte(emptyByte);
}
return fileStream;
}
void ConvertAndWrite(MemoryStream memStream, ClipData clipData)
{
float[] samples = new float[clipData.samples*clipData.channels];
samples = clipData.samplesData;
Int16[] intData = new Int16[samples.Length];
Byte[] bytesData = new Byte[samples.Length * 2];
const float rescaleFactor = 32767; //to convert float to Int16
for (int i = 0; i < samples.Length; i++)
{
intData[i] = (short)(samples[i] * rescaleFactor);
//Debug.Log (samples [i]);
}
Buffer.BlockCopy(intData, 0, bytesData, 0, bytesData.Length);
memStream.Write(bytesData, 0, bytesData.Length);
}
void WriteHeader(FileStream fileStream, AudioClip clip) {
var hz = clip.frequency;
var channels = clip.channels;
var samples = clip.samples;
fileStream.Seek(0, SeekOrigin.Begin);
Byte[] riff = System.Text.Encoding.UTF8.GetBytes("RIFF");
fileStream.Write(riff, 0, 4);
Byte[] chunkSize = BitConverter.GetBytes(fileStream.Length - 8);
fileStream.Write(chunkSize, 0, 4);
Byte[] wave = System.Text.Encoding.UTF8.GetBytes("WAVE");
fileStream.Write(wave, 0, 4);
Byte[] fmt = System.Text.Encoding.UTF8.GetBytes("fmt ");
fileStream.Write(fmt, 0, 4);
Byte[] subChunk1 = BitConverter.GetBytes(16);
fileStream.Write(subChunk1, 0, 4);
UInt16 two = 2;
UInt16 one = 1;
Byte[] audioFormat = BitConverter.GetBytes(one);
fileStream.Write(audioFormat, 0, 2);
Byte[] numChannels = BitConverter.GetBytes(channels);
fileStream.Write(numChannels, 0, 2);
Byte[] sampleRate = BitConverter.GetBytes(hz);
fileStream.Write(sampleRate, 0, 4);
Byte[] byteRate = BitConverter.GetBytes(hz * channels * 2); // sampleRate * bytesPerSample*number of channels, here 44100*2*2
fileStream.Write(byteRate, 0, 4);
UInt16 blockAlign = (ushort) (channels * 2);
fileStream.Write(BitConverter.GetBytes(blockAlign), 0, 2);
UInt16 bps = 16;
Byte[] bitsPerSample = BitConverter.GetBytes(bps);
fileStream.Write(bitsPerSample, 0, 2);
Byte[] datastring = System.Text.Encoding.UTF8.GetBytes("data");
fileStream.Write(datastring, 0, 4);
Byte[] subChunk2 = BitConverter.GetBytes(samples * channels * 2);
fileStream.Write(subChunk2, 0, 4);
// fileStream.Close();
}
}
also I have gathered some libraries and made a package to save audioclip to mp3!
you can find it here: https://github.com/BeatUpir/Unity3D-save-audioClip-to-MP3
Just noting that Byte[] subChunk2 = BitConverter.GetBytes(samples * channels * 2);
is incorrect. The variable samples
comes from clip.samples
, which is the total number of samples with all channels already included. This means that multiplying by channels
will effectively double the expected number of samples if your clip has 2 channels, which is wrong. It should simply be samples * 2
.
The result of this bug is that some audio players will report the wav file as being double its real duration, as they may read only this value and not the size of the data section itself (i.e. the samples). Most of them will stop the clip "midway", when the samples themselves end, because there are no more samples to play.
I have two questions after tried the original code in this post. It worked with saving a WAV file onto my disk. However, I can't trim the silence at the beginning and the end of the wav file. Each wav file saved is with 30 seconds (if Byte[] subChunk2 = BitConverter.GetBytes(samples * channels)), or 1 minute (if Byte[] subChunk2 = BitConverter.GetBytes(samples * channels * 2)). How to solve this problem?
Is there a modified code to convert the source AudioClip into FLAC format? I can only use FLAC format for my application.
Thanks and hope to get help from you guys!
Correct, I recently found this as well. iTunes would play it normal, but Unity would play it twice as long, with the 2nd half silent.
Question: Has anyone had issues with volume being low? I have a version that combines arrays of audio clips:
https://github.com/infinitypbr/AudioClipArrayCombiner
Basically if each "Layer" has 5 clips, the script will export 125 variations, every possible combination of them. It has options for volume & delay as well. It works great with small clips.
But, when I try to combine some song layers, basically 2 minutes each, the result sounds right, but is much quieter than it should be. Any thoughts?
hi
Can I load saved audio into my audio clip?
Really good script.
However, I recommend you set the return type of the Save method to void,
as that method currently always returns true
, which is absolutely non-informative.
There is also an extra empty line in the method, which is useless to me.
And as a C# developer, I virtually die when I read the var
keyword. C# is a strongly-typed language, so go ahead and use it the way it was designed for! (I'm always open to discuss further on this, though)
I would put the comment of CreateDirectory()
on the same line as the method call, to prevent leaving the useless comment alone if ever its associated code was moved around, or copied in another place.
Finally, you probably want the recording file to be not longer than the actual recorded audio, e.g. if you declared the clip to be 120s long with Microphone.Start(deviceName, false, 120, sampleRate);
, the recording file will be 120s long no matter what, even if you only recorded actual audio, let's say, the first 6 seconds. You need to use the TrimSilence()
method in order to cut the recording file right after no audio is detected.
Thus, to fulfill my needs, the Save()
method becomes (I also removed the Debug.Log):
public static void Save(string filename, AudioClip clip, bool makeClipShort = true)
{
if (!filename.ToLower().EndsWith(".wav"))
{
filename += ".wav";
}
string filepath = Path.Combine(Application.persistentDataPath, filename);
Directory.CreateDirectory(Path.GetDirectoryName(filepath)); // Make sure directory exists if user is saving to sub dir.
if (makeClipShort)
{
clip = TrimSilence(clip, 0);
}
using (var fileStream = CreateEmpty(filepath))
{
ConvertAndWrite(fileStream, clip);
WriteHeader(fileStream, clip);
}
}
I'd be happy if this code sample saved you some time.
Cheers!
Hey, thanks for the script. I put together a little repo using this script along with the ability to record and load files if anyone is interested.
https://github.com/EXP-Productions/Unity-Audio_Recording_and_serialization_example/
I'm using the script and it works quite well with one glitch for me. It truncates the files. I can't see anything in the code which limits the file size of the clip. Am I missing something?
Sorry to open this old thread, It records only for 1 sec, if anyone has any idea?
Could this easily be translated to 48/24 wavs? I can change the sample rate easily, but changing the bit-rate to 24 seems more of a challenge?
I have made a fork and optimised this script:
https://gist.github.com/R-WebsterNoble/70614880b0d3940d3b2b741fbbb311a2
This version is 20 times faster (accounting for not writing to disk)
It also provides easy access to data in memory instead of just writing a file.
Any further suggestions would be greatly appreciated!
}
How we can we implement this without using onGUI ? I want to implement this with using UI buttons for VR(Google Cardboard)
I'm having the opposite problem. Saving out the clip only saves the first half. http://oi63.tinypic.com/2ep2ebp.jpg
Here's the fix for length of saved WAV getting truncated by half...
private static byte[] ConvertAndWrite(AudioClip clip, out uint length, out uint samplesAfterTrimming, bool trim)
{
//var samples = new float[clip.samples];
var samples = new float[clip.samples * clip.channels];
clip.GetData(samples, 0);
Excuse me, if I only want to keep the sound part of the recorded audio, remove the mute part, how should I achieve it? @belzecue
Nice one, thank you ♥
When i use this the below code.
savwav.Save("test", myAudioClip);
it says .. NullReferenceException: Object reference not set to an instance of an object
what i have to do for saving the file . and how i replay it..
How Can I Read Saved clip?
How Can I Read Saved clip?
Unity has native methods for reading the sample data of an audio clip.
I'm still working on this, but here's a script file that enables you to read metadata from a wave file directly. I originally made this to read the cues inside a wave, but I later added the ability to get the sample data as well. I've tested with a small handful of wave files and they have worked well so far, but I'm sure it has some bugs.
`using UnityEngine;
using System;
using System.IO;
namespace Mordi
{
namespace WaveFile
{
///
/// Metadata of a wave file.
///
[Serializable]
public class Metadata
{
///
/// Name of the file, including extension.
///
public string filename;
/// <summary>
/// Duration of audio (in seconds).
/// </summary>
public float duration;
/// <summary>
/// Total file size in bytes.
/// </summary>
public uint fileBytes;
/// <summary>
/// RIFF type ID. Usually "WAVE".
/// </summary>
public string riffTypeID;
/// <summary>
/// Compression code. Uncompressed PCM audio will have a value of 1.
/// </summary>
public uint compressionCode;
/// <summary>
/// Number of audio channels. 1 = Mono, 2 = Stereo.
/// </summary>
public uint channelCount;
/// <summary>
/// Samples per second.
/// </summary>
public uint sampleRate;
/// <summary>
/// Average bytes per second. For example, a PCM wave file that has a sampling rate of 44100 Hz, 1 channel, and sampling resolution of 16 bits (2 bytes) per sample, will have an average number of bytes equal to 44100 * 2 * 1 = 88,200.
/// </summary>
public uint avgBytesPerSec;
/// <summary>
/// Byte-size of sample blocks. For example, a PCM wave that has a sampling resolution of 16 bits (2 bytes) and has 2 channels will record a block of samples in 2 * 2 = 4 bytes.
/// </summary>
public uint blockAlign;
/// <summary>
/// Significant bits per sample. Defines the sampling resolution of the file. A typical sampling resolution is 16 bits per sample, but could be anything greater than 1.
/// </summary>
public uint bitRate;
/// <summary>
/// Total number of audio samples.
/// </summary>
public uint sampleCount;
/// <summary>
/// Cues/markers found in the wave file.
/// </summary>
public Cue[] cues;
public void Print() {
string str;
str = filename + "\n";
str += "Duration: " + duration + " s\n";
str += "Size: " + fileBytes + "\n";
str += "Riff type ID: " + riffTypeID + "\n";
str += "Compression code: " + compressionCode + "\n";
str += "Channel count: " + channelCount + "\n";
str += "Sample rate: " + sampleRate + "\n";
str += "Avg bytes per sec: " + avgBytesPerSec + "\n";
str += "Block align: " + blockAlign + "\n";
str += "Bitrate: " + bitRate + "\n";
str += "Sample count: " + sampleCount + "\n";
if (cues == null)
str += "No cues";
else
str += "Cues:\n";
foreach(Cue c in cues) {
str += string.Format(" - ID: {0} - Name: {1} - Position: {2} - dataChunkID: {3}\n", c.ID, c.name, c.position, c.dataChunkID);
}
Debug.Log(str);
}
}
/// <summary>
/// A cue from inside the wave-file.
/// </summary>
[Serializable]
public struct Cue
{
/// <summary>
/// Unique index of this cue.
/// </summary>
public uint ID;
/// <summary>
/// Identifier-string for the cue/marker.
/// </summary>
public string name;
/// <summary>
/// The sample on which this cue appears within the audio.
/// </summary>
public uint position;
/// <summary>
/// Either "data" or "slnt" depending on whether the cue occurs in a data chunk or in a silent chunk.
/// </summary>
public uint dataChunkID;
}
/// <summary>
/// Reads metadata from a wave file.
/// </summary>
public static class Reader
{
/// <summary>
/// Get metadata from a given wave file.
/// </summary>
/// <param name="path">Path to the file, including extension.</param>
/// <returns>Metadata object.</returns>
public static Metadata GetMetadata(string path) {
// Check if file exists
if (!File.Exists(path)) {
Debug.LogError("Couldn't locate file: " + path);
return null;
}
// Check filetype
string ext = Path.GetExtension(path);
if (!(ext == ".wav" || ext == ".bwf")) {
Debug.LogWarning("Only extensions .wav and .bwf are supported for reading metadata: " + path);
return null;
}
Metadata data = new Metadata();
data.filename = Path.GetFileName(path);
FileStream fs = new FileStream(path, FileMode.Open);
int n = 0;
// TODO: Change into a for-loop
while (fs.Position < fs.Length) {
ReadNextChunk(fs, data);
if (n > 999) {
Debug.LogError("Cancelled infinite loop upon reading wave file metadata...");
break;
}
n++;
}
fs.Close();
// Calculate duration
data.duration = (float)data.sampleCount / data.sampleRate;
// Debug
//data.Print();
return data;
}
/// <summary>
/// Reads the next chunk of a wave file. Reference: https://www.recordingblogs.com/wiki/wave-file-format
/// </summary>
/// <param name="fs">FileStream object</param>
/// <param name="data">Metadata object</param>
static void ReadNextChunk(FileStream fs, Metadata data) {
long initialPos = fs.Position;
string chunkID = GetString(fs, 4);
uint chunkSize = GetUInt(fs, 4);
long chunkEndPos = initialPos + chunkSize + 8;
//Debug.Log(chunkID);
switch (chunkID.ToUpper()) {
case "RIFF":
data.fileBytes = chunkSize + 8;
data.riffTypeID = GetString(fs, 4);
break;
case "FMT ":
data.compressionCode = GetUInt(fs, 2);
data.channelCount = GetUInt(fs, 2);
data.sampleRate = GetUInt(fs, 4);
data.avgBytesPerSec = GetUInt(fs, 4);
data.blockAlign = GetUInt(fs, 2);
data.bitRate = GetUInt(fs, 2);
fs.Position = chunkEndPos; // Go to end of chunk
break;
case "DATA":
data.sampleCount = chunkSize / (data.channelCount + data.bitRate / 8);
fs.Position = chunkEndPos; // Go to end of chunk
break;
case "CUE ":
uint cueCount = (GetUInt(fs, 4));
data.cues = new Cue[cueCount];
// Loop through cues
for (int i = 0; i < cueCount; i++) {
long p = fs.Position;
data.cues[i].ID = GetUInt(fs, 4);
data.cues[i].position = GetUInt(fs, 4);
data.cues[i].dataChunkID = GetUInt(fs, 4);
fs.Position = p + 24; // Skip to next cue
}
fs.Position = chunkEndPos; // Go to end of chunk
break;
case "LIST":
string listID = GetString(fs, 4).ToUpper();
if (listID == "ADTL") { // ADTL = Associated Data List
uint remainingBytes = chunkSize - 4;
string subChunkID;
uint subChunkSize;
int cueIndex = 0;
while (remainingBytes > 0) {
subChunkID = GetString(fs, 4); // labl
subChunkSize = GetUInt(fs, 4); // chunk size
if (subChunkID.ToUpper() == "LABL" && data.cues != null) {
data.cues[cueIndex].ID = GetUInt(fs, 4);
data.cues[cueIndex].name = GetString(fs, (int)subChunkSize - 4);
remainingBytes -= subChunkSize + 8;
// Check for uneven number of remaining bytes (which means the next byte is an empty padding)
if (remainingBytes % 2 == 1) {
remainingBytes -= 1;
fs.ReadByte(); // Read the padded byte
}
cueIndex++;
} else {
remainingBytes -= subChunkSize;
fs.Seek(subChunkSize, SeekOrigin.Current); // Go to end of subchunk
}
}
}
fs.Position = chunkEndPos; // Go to end of chunk
break;
default:
fs.Position = chunkEndPos; // Go to end of chunk
break;
}
}
/// <summary>
/// Read a file and get only the data chunk.
/// </summary>
/// <param name="fs">FileStream object reading from a wave file.</param>
static int[] GetDataChunk(FileStream fs, int arraySize) {
uint channelCount = 0, bitRate = 0, compressionCode = 0;
for (int i = 0; i < 99; i++) {
long initialPos = fs.Position;
string chunkID = GetString(fs, 4);
uint chunkSize = GetUInt(fs, 4);
long chunkEndPos = initialPos + chunkSize + 8;
switch (chunkID.ToUpper()) {
case "RIFF":
fs.Position += 4;
break;
case "FMT ":
compressionCode = GetUInt(fs, 2);
channelCount = GetUInt(fs, 2);
GetUInt(fs, 4); // sample rate
GetUInt(fs, 4); // avg bytes per sec
GetUInt(fs, 2); // block align
bitRate = GetUInt(fs, 2);
fs.Position = chunkEndPos; // Go to end of chunk
break;
case "DATA":
if (compressionCode != 1)
return null;
// Convert byte array to int array
uint sampleCount = chunkSize / (channelCount + bitRate / 8);
int[] sampleData = new int[arraySize];
for(int n = 0; n < sampleData.Length; n ++) {
int avg = 0;
int numberOfCollatedSamples = ((int)sampleCount / sampleData.Length) * (int)channelCount;
for (int m = 0; m < numberOfCollatedSamples; m++) {
avg += Mathf.Abs(GetInt16(fs, 2));
}
avg = avg / numberOfCollatedSamples;
sampleData[n] = avg;
}
return sampleData;
default:
fs.Position = chunkEndPos;
break;
}
}
return null;
}
static uint GetUInt(FileStream fs, int num) {
return BitConverter.ToUInt32(ReadBytes(fs, num), 0);
}
static int GetInt16(FileStream fs, int num) {
return BitConverter.ToInt16(ReadBytes(fs, num), 0);
}
static string GetString(FileStream fs, int num) {
return System.Text.Encoding.UTF8.GetString(ReadBytes(fs, num)).Trim('\0');
}
static byte[] ReadBytes(FileStream fs, int num) {
byte[] bytes = new byte[num < 4 ? 4 : num];
for (int i = 0; i < num; i++) {
int b = fs.ReadByte();
bytes[i] = (byte)b;
}
return bytes;
}
}
}
}
`
The SavWav and the Script attached are both in my assets, I have this script to record my Voice in game, I took the script and put it in a random game object.. when I hit record and then save, I can see the .wav file in the assets folder but it only records for 10 seconds and when I play the file I can't hear anything..
Can anyone please help..
ConvertAndWrite has a bug, please refer to https://docs.unity3d.com/ScriptReference/AudioClip.GetData.html
var samples = new float[clip.samples];
should be fixed as
var samples = new float[clip.samples * clip.channels];
This bug results to stereo files are saved with half duration.
GetData returns data, not 'samples'. In case of mono they are equal, but in case of stereo each sample is 2 bytes of data.
To be correct, you should also rename 'samples' variable to 'data'.
ConvertAndWrite has a bug, please refer to https://docs.unity3d.com/ScriptReference/AudioClip.GetData.html
var samples = new float[clip.samples];
should be fixed asvar samples = new float[clip.samples * clip.channels];
This bug results to stereo files are saved with half duration.
GetData returns data, not 'samples'. In case of mono they are equal, but in case of stereo each sample is 2 bytes of data. To be correct, you should also rename 'samples' variable to 'data'.
Thanks you 👍. Helped me a lot.
Great work!
This code is just what I needed for our current project. Thank you very much!
NIce,Its very helpful,Thx