Java 类android.media.MediaRecorder.AudioSource 实例源码
项目:LittleBitLouder
文件:TOne.java
public AudioRecord findAudioRecord() {
for (int rate : mSampleRates) {
for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT }) {
for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO }) {
try {
Log.d("C.TAG", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
+ channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, AudioFormat.CHANNEL_IN_MONO , AudioFormat.ENCODING_PCM_16BIT);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(AudioSource.MIC, DEFAULT_RATE, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
return recorder;
}
} catch (Exception e) {
Log.e("C.TAG", rate + "Exception, keep trying.",e);
}
}
}
}
return null;
}
项目:CXJPadProject
文件:ExtAudioRecorder.java
@SuppressWarnings("deprecation")
public static ExtAudioRecorder getInstanse(Boolean recordingCompressed) {
ExtAudioRecorder result = null;
if (recordingCompressed) {
result = new ExtAudioRecorder(false, AudioSource.MIC,
sampleRates[3], AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
} else {
int i = 0;
do {
result = new ExtAudioRecorder(true, AudioSource.MIC,
sampleRates[3], AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
} while ((++i < sampleRates.length)
& !(result.getState() == ExtAudioRecorder.State.INITIALIZING));
}
return result;
}
项目:CXJPadProject
文件:ExtAudioRecorder.java
/**
*
*
* Resets the recorder to the INITIALIZING state, as if it was just created.
* In case the class was in RECORDING state, the recording is stopped. In
* case of exceptions the class is set to the ERROR state.
*
*/
public void reset() {
try {
if (state != State.ERROR) {
release();
filePath = null; // Reset file path
cAmplitude = 0; // Reset amplitude
if (rUncompressed) {
audioRecorder = new AudioRecord(aSource, sRate,
nChannels + 1, aFormat, bufferSize);
} else {
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder
.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder
.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
state = State.INITIALIZING;
}
} catch (Exception e) {
Log.e(ExtAudioRecorder.class.getName(), e.getMessage());
state = State.ERROR;
}
}
项目:assistance-platform-client-sdk-android
文件:LoudnessSensor.java
public AudioRecorder(LoudnessSensor sensor) {
this.mSensor = sensor;
int channel = AudioFormat.CHANNEL_IN_MONO;
int mic = AudioSource.MIC;
// Berechne den Puffer
int minAudioBuffer = AudioRecord.getMinBufferSize(
COMMON_AUDIO_FREQUENCY,
channel,
AudioFormat.ENCODING_PCM_16BIT);
int audioBuffer = minAudioBuffer * 6;
// Erstelle den Recorder
audioInput = new AudioRecord(
mic,
COMMON_AUDIO_FREQUENCY,
channel,
AudioFormat.ENCODING_PCM_16BIT,
audioBuffer);
}
项目:EntboostIM
文件:ExtAudioRecorder.java
public static ExtAudioRecorder getInstance(Boolean recordingCompressed, VoiceCallback callback) {
if (recordingCompressed) {
result = new ExtAudioRecorder(false, AudioSource.MIC,
sampleRates[3], AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, callback);
} else {
int i = 3;
do {
result = new ExtAudioRecorder(true, AudioSource.MIC,
sampleRates[i], AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, callback);
} while ((--i >= 0)
&& !(result.getState() == ExtAudioRecorder.State.INITIALIZING));
}
return result;
}
项目:EntboostIM
文件:ExtAudioRecorder.java
/**
*
*
* Resets the recorder to the INITIALIZING state, as if it was just created.
* In case the class was in RECORDING state, the recording is stopped. In
* case of exceptions the class is set to the ERROR state.
*
*/
public void reset() {
try {
if (state != State.ERROR) {
release();
filePath = null; // Reset file path
cAmplitude = 0; // Reset amplitude
if (rUncompressed) {
audioRecorder = new AudioRecord(aSource, sRate,
nChannels + 1, aFormat, bufferSize);
} else {
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder
.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder
.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
state = State.INITIALIZING;
}
} catch (Exception e) {
Log4jLog.e(LONG_TAG, e.getMessage());
state = State.ERROR;
}
}
项目:woefzela
文件:MainRecordingActivity.java
public void startRecordingPCM() throws IOException {
String methodTAG = "startRecordingPCM";
//Create record object
recWAV = new RecordingWAV(AudioSource.MIC, sampleFreq, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
log.logD(TAG,"recWAV.State after constructor is: " + recWAV.getState());
if (recWAV.state == RecordingWAV.State.ERROR) {
log.logD(methodTAG, "recWAV.State after constructor is ERROR, thus shutting down. Writing a log.");
log.logCriticalError(TAG, methodTAG, "recWAV.State after constructor is ERROR, thus shutting down.");
}
recWAV.setOutputFile(fPath);
log.logI(TAG,"recWAV.State after setOutputFile() is: " + recWAV.getState());
recWAV.prepare();
log.logI(TAG,"recWAV.State after prepare() is: " + recWAV.getState());
tPromptString.setTextColor(getResources().getColor(R.color.hltGreen));
recWAV.start();
log.logI(TAG,"recWAV.State after start() is: " + recWAV.getState());
}
项目:PARLA
文件:ExtAudioRecorder.java
@SuppressWarnings("deprecation")
public static ExtAudioRecorder getInstanse(Boolean recordingCompressed) {
ExtAudioRecorder result = null;
if (recordingCompressed) {
result = new ExtAudioRecorder(false,
AudioSource.MIC,
sampleRates[3],
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
} else {
int i = 2;
do {
result = new ExtAudioRecorder(true,
AudioSource.MIC,
sampleRates[i],
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
}
while ((++i < sampleRates.length) & !(result.getState() == ExtAudioRecorder.State.INITIALIZING));
}
return result;
}
项目:PARLA
文件:ExtAudioRecorder.java
/**
* Resets the recorder to the INITIALIZING state, as if it was just created.
* In case the class was in RECORDING state, the recording is stopped.
* In case of exceptions the class is set to the ERROR state.
*/
public void reset() {
try {
if (state != State.ERROR) {
release();
filePath = null; // Reset file path
cAmplitude = 0; // Reset amplitude
if (rUncompressed) {
audioRecorder = new AudioRecord(aSource, sRate, nChannels + 1, aFormat, bufferSize);
} else {
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
state = State.INITIALIZING;
}
} catch (Exception e) {
Log.e(ExtAudioRecorder.class.getName(), e.getMessage());
state = State.ERROR;
}
}
项目:WhistleApp
文件:WhistleReceiver.java
@Override
public void run() {
int minBufferSize = AudioRecord.getMinBufferSize(sampleRate, CHANNEL,
ENCODING);
AudioRecord recorder = new AudioRecord(AudioSource.MIC, sampleRate,
CHANNEL, ENCODING, minBufferSize);
recorder.startRecording();
PcmAudioRecordReader in = new PcmAudioRecordReader(recorder);
PcmDftFilter dft = new PcmDftFilter(sampleRate, 12000, 22000, 100);
data = dft.getData();
PcmFilterReader fin = new PcmFilterReader(in, dft);
try {
while (!stopped) {
double read = fin.read();
}
} catch (IOException e) {
e.printStackTrace();
} finally {
recorder.stop();
recorder.release();
}
}
项目:crowdpp
文件:AudioRecorder.java
@SuppressWarnings("deprecation")
public static AudioRecorder getInstanse(Boolean recordingCompressed) {
AudioRecorder result = null;
if (recordingCompressed) {
result = new AudioRecorder( false,
AudioSource.MIC,
sampleRates[2],
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
}
// wav format
else {
int i = 0;
do {
result = new AudioRecorder( true,
AudioSource.MIC,
sampleRates[i],
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
} while((++i<sampleRates.length) & !(result.getState() == AudioRecorder.State.INITIALIZING));
}
return result;
}
项目:crowdpp
文件:AudioRecorder.java
/**
* Resets the recorder to the INITIALIZING state, as if it was just created.
* In case the class was in RECORDING state, the recording is stopped.
* In case of exceptions the class is set to the ERROR state.
*/
public void reset() {
try {
if (state != State.ERROR) {
release();
filePath = null; // Reset file path
cAmplitude = 0; // Reset amplitude
if (rUncompressed) {
audioRecorder = new AudioRecord(aSource, sRate, nChannels+1, aFormat, bufferSize);
}
else {
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
state = State.INITIALIZING;
}
}
catch (Exception e) {
Log.e(AudioRecorder.class.getName(), e.getMessage());
state = State.ERROR;
}
}
项目:wav-recorder
文件:ExtAudioRecorder.java
public static ExtAudioRecorder getInstance(WAVRecorder handler, String id, int sampleRate, int channels, int encoding)
{
ExtAudioRecorder result = null;
int[] processedSampleRates = sampleRates;
if (0 != sampleRate) {
processedSampleRates = new int[1];
processedSampleRates[0] = sampleRate;
}
int i=0;
do
{
result = new ExtAudioRecorder( handler,
id,
AudioSource.MIC,
processedSampleRates[i],
channels,
encoding);
} while((++i<processedSampleRates.length) & !(result.getState() == ExtAudioRecorder.State.INITIALIZING));
return result;
}
项目:gdk-waveform-sample
文件:WaveformActivity.java
@Override
public void run() {
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_AUDIO);
AudioRecord record = new AudioRecord(AudioSource.MIC, SAMPLING_RATE,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, mBufferSize);
record.startRecording();
while (shouldContinue()) {
record.read(mAudioBuffer, 0, mBufferSize / 2);
mWaveformView.updateAudioData(mAudioBuffer);
updateDecibelLevel();
}
record.stop();
record.release();
}
项目:renderScriptFFT
文件:Mel_Test_Microphone.java
private byte[] fillBuffer(byte[] audioData, int bufferSize) {
AudioRecord recorder = new AudioRecord(AudioSource.MIC, 8000, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT, bufferSize); // instantiate the
// AudioRecorder
if (recorder.getRecordingState() == android.media.AudioRecord.RECORDSTATE_STOPPED)
recorder.startRecording(); // check to see if the Recorder
// has stopped or is not
// recording, and make it
// record.
recorder.read(audioData, 0, bufferSize); // read the PCM
// audio data
// into the
// audioData
// array
if (recorder.getState() == android.media.AudioRecord.RECORDSTATE_RECORDING)
recorder.stop(); // stop the recorder
return audioData;
}
项目:glassless
文件:WaveformActivity.java
@Override
public void run() {
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_AUDIO);
AudioRecord record = new AudioRecord(AudioSource.MIC, SAMPLING_RATE,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, mBufferSize);
record.startRecording();
while (shouldContinue()) {
record.read(mAudioBuffer, 0, mBufferSize / 2);
mWaveformView.updateAudioData(mAudioBuffer);
updateDecibelLevel();
}
record.stop();
record.release();
}
项目:drumometer
文件:RecorderThread.java
public AudioRecord findAudioRecord() {
try {
int bufferSize = AudioRecord
.getMinBufferSize(sampleRate, channelConfiguration,
AudioFormat.ENCODING_PCM_16BIT);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(AudioSource.MIC,
sampleRate, channelConfiguration,
AudioFormat.ENCODING_PCM_16BIT, bufferSize);
Log.d("tag", "done1");
if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
Log.d("tag", "done1.6");
return recorder;
}
} catch (Exception e) {
Log.d("tag", "done2");
}
return null;
}
项目:CSipSimple
文件:Compatibility.java
public static String getDefaultMicroSource() {
// Except for galaxy S II :(
if (!isCompatible(11) && Build.DEVICE.toUpperCase().startsWith("GT-I9100")) {
return Integer.toString(AudioSource.MIC);
}
if (isCompatible(10)) {
// Note that in APIs this is only available from level 11.
// VOICE_COMMUNICATION
return Integer.toString(0x7);
}
/*
* Too risky in terms of regressions else if (isCompatible(4)) { //
* VOICE_CALL return 0x4; }
*/
/*
* if(android.os.Build.MODEL.equalsIgnoreCase("X10i")) { // VOICE_CALL
* return Integer.toString(0x4); }
*/
/*
* Not relevant anymore, atrix I tested sounds fine with that
* if(android.os.Build.DEVICE.equalsIgnoreCase("olympus")) { //Motorola
* atrix bug // CAMCORDER return Integer.toString(0x5); }
*/
return Integer.toString(AudioSource.DEFAULT);
}
项目:buildAPKsApps
文件:ControllerFactory.java
private void createAudioRecord() throws InitializationException {
// The AudioRecord configurations parameters used here, are guaranteed
// to be supported on all devices.
// AudioFormat.CHANNEL_IN_MONO should be used in place of deprecated
// AudioFormat.CHANNEL_CONFIGURATION_MONO, but it is not available for
// API level 3.
// Unlike AudioTrack buffer, AudioRecord buffer could be larger than
// minimum without causing any problems. But minimum works well.
final int audioRecordBufferSizeInBytes = AudioRecord.getMinBufferSize(
SpeechTrainerConfig.SAMPLE_RATE_HZ, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if (audioRecordBufferSizeInBytes <= 0) {
throw new InitializationException("Failed to initialize recording.");
}
// CHANNEL_IN_MONO is guaranteed to work on all devices.
// ENCODING_PCM_16BIT is guaranteed to work on all devices.
audioRecord = new AudioRecord(AudioSource.MIC, SpeechTrainerConfig.SAMPLE_RATE_HZ,
AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
audioRecordBufferSizeInBytes);
if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
audioRecord = null;
throw new InitializationException("Failed to initialize recording.");
}
}
项目:AppRTC-Android
文件:WebRtcAudioRecord.java
@TargetApi(23)
private AudioRecord createAudioRecordOnMarshmallowOrHigher(
int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
Logging.d(TAG, "createAudioRecordOnMarshmallowOrHigher");
return new AudioRecord.Builder()
.setAudioSource(AudioSource.VOICE_COMMUNICATION)
.setAudioFormat(new AudioFormat.Builder()
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
.setSampleRate(sampleRateInHz)
.setChannelMask(channelConfig)
.build())
.setBufferSizeInBytes(bufferSizeInBytes)
.build();
}
项目:RecordHelper
文件:AudioRecordingThread.java
@Override
public void run() {
FileOutputStream out = prepareWriting();
if (out == null) {
return;
}
AudioRecord record = new AudioRecord(AudioSource.VOICE_RECOGNITION, /*AudioSource.MIC*/
SAMPLING_RATE,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
bufferSize);
record.startRecording();
int read = 0;
while (isRecording) {
read = record.read(audioBuffer, 0, bufferSize);
if ((read == AudioRecord.ERROR_INVALID_OPERATION) ||
(read == AudioRecord.ERROR_BAD_VALUE) ||
(read <= 0)) {
continue;
}
proceed();
write(out);
}
record.stop();
record.release();
finishWriting(out);
convertRawToWav();
}
项目:Rocket.Chat-android
文件:AudioRecordingThread.java
@Override
public void run() {
FileOutputStream out = prepareWriting();
if (out == null) {
return;
}
AudioRecord record = new AudioRecord(AudioSource.VOICE_RECOGNITION, /*AudioSource.MIC*/
SAMPLING_RATE,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
bufferSize);
record.startRecording();
int read = 0;
while (isRecording) {
read = record.read(audioBuffer, 0, bufferSize);
if ((read == AudioRecord.ERROR_INVALID_OPERATION) ||
(read == AudioRecord.ERROR_BAD_VALUE) ||
(read <= 0)) {
continue;
}
proceed();
write(out);
}
record.stop();
record.release();
finishWriting(out);
convertRawToWav();
}
项目:talkback
文件:SpeechController.java
/**
* Returns {@code true} if speech should be silenced. Does not prevent
* haptic or auditory feedback from occurring. The controller will run
* utterance completion actions immediately for silenced utterances.
* <p>
* Silences speech in the following cases:
* <ul>
* <li>Speech recognition is active and the user is not using a headset
* </ul>
*/
@SuppressWarnings("deprecation")
private boolean shouldSilenceSpeech(FeedbackItem item) {
if (item == null) {
return false;
}
// Unless otherwise flagged, don't speak during speech recognition.
return !item.hasFlag(FeedbackItem.FLAG_DURING_RECO)
&& AudioSystemCompatUtils.isSourceActive(AudioSource.VOICE_RECOGNITION)
&& !mAudioManager.isBluetoothA2dpOn() && !mAudioManager.isWiredHeadsetOn();
}
项目:JABtalk
文件:JTAudioRecorder.java
private AudioRecord getAudioRecorder() {
for (int rate : sampleRates) {
for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT, AudioFormat.ENCODING_PCM_8BIT }) {
for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO }) {
try {
bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize > 0) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(AudioSource.MIC, rate, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
selectedRate = rate;
selectedChannel = channelConfig == AudioFormat.CHANNEL_IN_STEREO ? (short)2 : (short)1;
selectedBPP = audioFormat == AudioFormat.ENCODING_PCM_16BIT ? (short)16 : (short)8;
String format = audioFormat == AudioFormat.ENCODING_PCM_16BIT ? "PCM 16 Bit" : "PCM 8 Bit";
String channels = channelConfig == AudioFormat.CHANNEL_IN_STEREO ? "Stereo" : "Mono";
String diags = "Audio recorded using following settings: Rate: " + String.valueOf(rate) + " " +
"Audio Format: " + format + " " +
"Channel Config: " + channels;
JTApp.logMessage(TAG, JTApp.LOG_SEVERITY_INFO, diags);
return recorder;
}
}
} catch (Exception ignored) {
}
}
}
}
return null;
}
项目:android_bluetooth
文件:DemoUtil.java
public static AudioRecord findAudioRecord() {
for (int rate : mSampleRates) {
for (short audioFormat : new short[] {
AudioFormat.ENCODING_PCM_8BIT,
AudioFormat.ENCODING_PCM_16BIT }) {
for (short channelConfig : new short[] {
AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.CHANNEL_IN_MONO }) {
try {
int bufferSize = AudioRecord.getMinBufferSize(rate,
channelConfig, audioFormat);
Log.d(TAG, "findAudioRecord:Attempting rate " + rate + "Hz, bits: "
+ audioFormat + ", channel: " + channelConfig
+ ", bufferSizeInBytes:" + bufferSize);
if (bufferSize > 0) {
AudioRecord recorder = new AudioRecord(
AudioSource.MIC, rate, channelConfig,
audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
gBufferSize = bufferSize;
return recorder;
}
recorder.release();
recorder = null;
}
} catch (Exception e) {
Log.e(TAG, rate + "Exception, keep trying.", e);
}
}
}
}
return null;
}
项目:MoST
文件:RecorderThread.java
/**
* Instantiates a new recorder thread.
*
* @param context
* the context
*/
public RecorderThread(MoSTApplication context, InputAudio input) {
super("MoST InputAudio Recorder Thread");
SharedPreferences sp = context.getSharedPreferences(MoSTApplication.PREF_INPUT, Context.MODE_PRIVATE);
int sampleRate = sp.getInt(InputAudio.PREF_KEY_SAMPLE_RATE, InputAudio.PREF_DEFAULT_SAMPLE_RATE);
_bufferSize = AudioRecord.getMinBufferSize(sampleRate, CHANNEL_CONFIGURATION, ENCODING) * 8;
_recorder = new AudioRecord(AudioSource.MIC, sampleRate, CHANNEL_CONFIGURATION, ENCODING, _bufferSize);
_recording = new AtomicBoolean(false);
_input = input;
}
项目:AndroidRecording
文件:AudioRecordingThread.java
@Override
public void run() {
FileOutputStream out = prepareWriting();
if (out == null) { return; }
AudioRecord record = new AudioRecord(AudioSource.VOICE_RECOGNITION, /*AudioSource.MIC*/
SAMPLING_RATE,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
bufferSize);
record.startRecording();
int read = 0;
while (isRecording) {
read = record.read(audioBuffer, 0, bufferSize);
if ((read == AudioRecord.ERROR_INVALID_OPERATION) ||
(read == AudioRecord.ERROR_BAD_VALUE) ||
(read <= 0)) {
continue;
}
proceed();
write(out);
}
record.stop();
record.release();
finishWriting(out);
convertRawToWav();
}
项目:CameraV
文件:AACHelper.java
private int initAudioRecord(int rate)
{
try
{
Log.v("===========Attempting rate ", rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE)
{
// check if we can instantiate and have a success
recorder = new AudioRecord(AudioSource.MIC, rate, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
{
Log.v("===========final rate ", rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
return rate;
}
}
}
catch (Exception e)
{
Log.v("error", "" + rate);
}
return -1;
}
项目:klammer
文件:Audio.java
@Override
public void run() {
mic = new AudioRecord(
AudioSource.VOICE_COMMUNICATION,
SampleRate,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
buffer.length * 2);
if (mic.getState() != AudioRecord.STATE_INITIALIZED) {
Logg.d(TAG, "couldn't initialize microphone");
onException();
return;
}
mic.startRecording();
while (inPump()) {
mic.read(buffer, 0, buffer.length);
for (int i = 0; i < buffer.length; i += Codecs.AudioFrameSize) {
int outlen = streamer.getCodecs().encodeAudioFrame(buffer, i, packet);
if (outlen > 0) {
if (streamer.getNetwork().isReady()) {
streamer.getNetwork().sendAudioFrame(packet, outlen);
}
}
}
}
mic.stop();
mic.release();
}
项目:sample-googleassistant
文件:EmbeddedAssistant.java
/**
* Returns an AssistantManager if all required parameters have been supplied.
*
* @return An inactive AssistantManager. Call {@link EmbeddedAssistant#connect()} to start
* it.
*/
public EmbeddedAssistant build() {
if (mEmbeddedAssistant.mRequestCallback == null) {
throw new NullPointerException("There must be a defined RequestCallback");
}
if (mEmbeddedAssistant.mConversationCallback == null) {
throw new NullPointerException("There must be a defined ConversationCallback");
}
if (mEmbeddedAssistant.mUserCredentials == null) {
throw new NullPointerException("There must be provided credentials");
}
if (mSampleRate == 0) {
throw new NullPointerException("There must be a defined sample rate");
}
final int audioEncoding = AudioFormat.ENCODING_PCM_16BIT;
// Construct audio configurations.
mEmbeddedAssistant.mAudioInConfig = AudioInConfig.newBuilder()
.setEncoding(AudioInConfig.Encoding.LINEAR16)
.setSampleRateHertz(mSampleRate)
.build();
mEmbeddedAssistant.mAudioOutConfig = AudioOutConfig.newBuilder()
.setEncoding(AudioOutConfig.Encoding.LINEAR16)
.setSampleRateHertz(mSampleRate)
.setVolumePercentage(mEmbeddedAssistant.mVolume)
.build();
// Initialize Audio framework parameters.
mEmbeddedAssistant.mAudioInputFormat = new AudioFormat.Builder()
.setChannelMask(AudioFormat.CHANNEL_IN_MONO)
.setEncoding(audioEncoding)
.setSampleRate(mSampleRate)
.build();
mEmbeddedAssistant.mAudioInputBufferSize = AudioRecord.getMinBufferSize(
mEmbeddedAssistant.mAudioInputFormat.getSampleRate(),
mEmbeddedAssistant.mAudioInputFormat.getChannelMask(),
mEmbeddedAssistant.mAudioInputFormat.getEncoding());
mEmbeddedAssistant.mAudioOutputFormat = new AudioFormat.Builder()
.setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
.setEncoding(audioEncoding)
.setSampleRate(mSampleRate)
.build();
mEmbeddedAssistant.mAudioOutputBufferSize = AudioTrack.getMinBufferSize(
mEmbeddedAssistant.mAudioOutputFormat.getSampleRate(),
mEmbeddedAssistant.mAudioOutputFormat.getChannelMask(),
mEmbeddedAssistant.mAudioOutputFormat.getEncoding());
// create new AudioRecord to workaround audio routing issues.
mEmbeddedAssistant.mAudioRecord = new AudioRecord.Builder()
.setAudioSource(AudioSource.VOICE_RECOGNITION)
.setAudioFormat(mEmbeddedAssistant.mAudioInputFormat)
.setBufferSizeInBytes(mEmbeddedAssistant.mAudioInputBufferSize)
.build();
if (mEmbeddedAssistant.mAudioInputDevice != null) {
boolean result = mEmbeddedAssistant.mAudioRecord.setPreferredDevice(
mEmbeddedAssistant.mAudioInputDevice);
if (!result) {
Log.e(TAG, "failed to set preferred input device");
}
}
return mEmbeddedAssistant;
}
项目:CXJPadProject
文件:ExtAudioRecorder.java
/**
*
*
* Default constructor
*
* Instantiates a new recorder, in case of compressed recording the
* parameters can be left as 0. In case of errors, no exception is thrown,
* but the state is set to ERROR
*
*/
@SuppressWarnings("deprecation")
public ExtAudioRecorder(boolean uncompressed, int audioSource,
int sampleRate, int channelConfig, int audioFormat) {
try {
rUncompressed = uncompressed;
if (rUncompressed) { // RECORDING_UNCOMPRESSED
if (audioFormat == AudioFormat.ENCODING_PCM_16BIT) {
bSamples = 16;
} else {
bSamples = 8;
}
if (channelConfig == AudioFormat.CHANNEL_CONFIGURATION_MONO) {
nChannels = 1;
} else {
nChannels = 2;
}
aSource = audioSource;
sRate = sampleRate;
aFormat = audioFormat;
framePeriod = sampleRate * TIMER_INTERVAL / 1000;
bufferSize = framePeriod * 2 * bSamples * nChannels / 8;
if (bufferSize < AudioRecord.getMinBufferSize(sampleRate,
channelConfig, audioFormat)) { // Check to make sure
// buffer size is not
// smaller than the
// smallest allowed one
bufferSize = AudioRecord.getMinBufferSize(sampleRate,
channelConfig, audioFormat);
// Set frame period and timer interval accordingly
framePeriod = bufferSize / (2 * bSamples * nChannels / 8);
Log.w(ExtAudioRecorder.class.getName(),
"Increasing buffer size to "
+ Integer.toString(bufferSize));
}
audioRecorder = new AudioRecord(audioSource, sampleRate,
channelConfig, audioFormat, bufferSize);
if (audioRecorder.getState() != AudioRecord.STATE_INITIALIZED)
throw new Exception("AudioRecord initialization failed");
audioRecorder.setRecordPositionUpdateListener(updateListener);
audioRecorder.setPositionNotificationPeriod(framePeriod);
} else { // RECORDING_COMPRESSED
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder
.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder
.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
cAmplitude = 0;
filePath = null;
state = State.INITIALIZING;
} catch (Exception e) {
if (e.getMessage() != null) {
Log.e(ExtAudioRecorder.class.getName(), e.getMessage());
} else {
Log.e(ExtAudioRecorder.class.getName(),
"Unknown error occured while initializing recording");
}
state = State.ERROR;
}
}
项目:guitar_guy
文件:AudioIn.java
@Override
public void run()
{
android.os.Process
.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
AudioRecord recorder = null;
int ix = 0;
try
{ // ... initialize
recorder =
new AudioRecord(AudioSource.DEFAULT,
AudioIn.SAMPLING_RATE,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
AudioIn.READ_BUFFER_SIZE
* AudioIn.BYTES_IN_SAMPLE);
// ... loop
recorder.setPositionNotificationPeriod(AudioIn.READ_BUFFER_SIZE);
recorder.setRecordPositionUpdateListener(this);
int nReadBytes;
while (true)
{
if (!m_fRunning)
{
recorder.stop();
synchronized (this)
{
wait();
}
}
recorder.startRecording();
AudioReadBuffer readBuffer =
m_ReadBuffers[ix++
% m_ReadBuffers.length];
synchronized (readBuffer)
{
// Log.d("RECORDING", "Writing buffer"
// + readBuffer);
readBuffer.eStatus =
EBufferStatus.eBufferStatus_Filling;
short[] arBuffer = readBuffer.arSamples;
for (int i = 0; i < arBuffer.length; i++)
{
arBuffer[i] = 0;
}
nReadBytes =
recorder.read(arBuffer, 0,
arBuffer.length);
// Log.i(TAG, "buffer received");
readBuffer.eStatus =
EBufferStatus.eBufferStatus_Full;
m_queReadyBuffers.put(readBuffer);
}
if ((AudioRecord.ERROR_INVALID_OPERATION == nReadBytes)
|| (nReadBytes == AudioRecord.ERROR_BAD_VALUE))
{
Log.e("AUDIO_IN/ERROR", "Read failed :(");
}
}
}
catch (Throwable x)
{
Log.e("AUDIO/IN", "Error reading voice audio", x);
}
finally
{
if ((recorder != null)
&& (recorder.getState() == AudioRecord.STATE_INITIALIZED))
{
recorder.stop();
}
}
}
项目:PARLA
文件:ExtAudioRecorder.java
/**
* Default constructor
* <p/>
* Instantiates a new recorder, in case of compressed recording the parameters can be left as 0.
* In case of errors, no exception is thrown, but the state is set to ERROR
*/
@SuppressWarnings("deprecation")
public ExtAudioRecorder(boolean uncompressed, int audioSource, int sampleRate, int channelConfig, int audioFormat) {
try {
rUncompressed = uncompressed;
if (rUncompressed) { // RECORDING_UNCOMPRESSED
if (audioFormat == AudioFormat.ENCODING_PCM_16BIT) {
bSamples = 16;
} else {
bSamples = 8;
}
if (channelConfig == AudioFormat.CHANNEL_CONFIGURATION_MONO) {
nChannels = 1;
} else {
nChannels = 2;
}
aSource = audioSource;
sRate = sampleRate;
aFormat = audioFormat;
framePeriod = sampleRate * TIMER_INTERVAL / 1000;
bufferSize = framePeriod * 2 * bSamples * nChannels / 8;
if (bufferSize < AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat)) { // Check to make sure buffer size is not smaller than the smallest allowed one
bufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
// Set frame period and timer interval accordingly
framePeriod = bufferSize / (2 * bSamples * nChannels / 8);
Log.w(ExtAudioRecorder.class.getName(), "Increasing buffer size to " + Integer.toString(bufferSize));
}
audioRecorder = new AudioRecord(audioSource, sampleRate, channelConfig, audioFormat, bufferSize);
if (audioRecorder.getState() != AudioRecord.STATE_INITIALIZED)
throw new Exception("AudioRecord initialization failed");
audioRecorder.setRecordPositionUpdateListener(updateListener);
audioRecorder.setPositionNotificationPeriod(framePeriod);
} else { // RECORDING_COMPRESSED
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
cAmplitude = 0;
filePath = null;
state = State.INITIALIZING;
} catch (Exception e) {
if (e.getMessage() != null) {
Log.e(ExtAudioRecorder.class.getName(), e.getMessage());
} else {
Log.e(ExtAudioRecorder.class.getName(), "Unknown error occured while initializing recording");
}
state = State.ERROR;
}
}
项目:auth-client-demo-module-voice
文件:WaveRecorder.java
/**
* Default constructor. Leaves the recorder in {@link State#INITIALIZING}, except if some kind
* of error happens.
*
* @param sampleRate
* Audio sampling rate.
*/
public WaveRecorder(int sampleRate) {
try {
bitsPerSample = 16;
numChannels = 1;
audioSource = AudioSource.MIC;
this.sampleRate = sampleRate;
audioFormat = AudioFormat.ENCODING_PCM_16BIT;
framePeriod = sampleRate * TIMER_INTERVAL / 1000;
bufferSize = framePeriod * 2 * bitsPerSample * numChannels / 8;
if (bufferSize < AudioRecord.getMinBufferSize(sampleRate,
AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT)) {
// increase buffer size if needed
bufferSize =
AudioRecord.getMinBufferSize(sampleRate,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
// Set frame period and timer interval accordingly
framePeriod = bufferSize / (2 * bitsPerSample * numChannels / 8);
Log.w(TAG, "Increasing buffer size to " + bufferSize);
}
aRecorder =
new AudioRecord(audioSource, sampleRate,
AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
bufferSize);
if (aRecorder.getState() != AudioRecord.STATE_INITIALIZED) {
throw new Exception("AudioRecord initialization failed");
}
aRecorder.setRecordPositionUpdateListener(updateListener);
aRecorder.setPositionNotificationPeriod(framePeriod);
fPath = null;
state = State.INITIALIZING;
} catch (Exception e) {
if (e.getMessage() != null) {
Log.e(TAG, e.getMessage());
} else {
Log.e(TAG, "Unknown error occured while initializing recording");
}
state = State.ERROR;
}
}
项目:android-fskmodem
文件:MainActivity.java
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
/// INIT FSK CONFIG
try {
mConfig = new FSKConfig(FSKConfig.SAMPLE_RATE_44100, FSKConfig.PCM_16BIT, FSKConfig.CHANNELS_MONO, FSKConfig.SOFT_MODEM_MODE_4, FSKConfig.THRESHOLD_20P);
} catch (IOException e1) {
e1.printStackTrace();
}
/// INIT FSK DECODER
mDecoder = new FSKDecoder(mConfig, new FSKDecoderCallback() {
@Override
public void decoded(byte[] newData) {
final String text = new String(newData);
runOnUiThread(new Runnable() {
public void run() {
TextView view = ((TextView) findViewById(R.id.result));
view.setText(view.getText()+text);
}
});
}
});
///
//make sure that the settings of the recorder match the settings of the decoder
//most devices cant record anything but 44100 samples in 16bit PCM format...
mBufferSize = AudioRecord.getMinBufferSize(FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
//scale up the buffer... reading larger amounts of data
//minimizes the chance of missing data because of thread priority
mBufferSize *= 10;
//again, make sure the recorder settings match the decoder settings
mRecorder = new AudioRecord(AudioSource.MIC, FSKConfig.SAMPLE_RATE_44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, mBufferSize);
if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
mRecorder.startRecording();
//start a thread to read the audio data
Thread thread = new Thread(mRecordFeed);
thread.setPriority(Thread.MAX_PRIORITY);
thread.start();
}
else {
Log.i("FSKDecoder", "Please check the recorder settings, something is wrong!");
}
}
项目:AndroidToArduinoSonicComms
文件:SonicActivity.java
@Override
protected Void doInBackground(Void... arg0) {
log.v("audioIn thread started");
final int N = 4096;// AudioRecord.getMinBufferSize(SAMPLE_RATE,
// AudioFormat.CHANNEL_IN_MONO,
// AudioFormat.ENCODING_PCM_16BIT);
short buffer[] = new short[N];
log.v("N=%d", N);
AudioRecord recorder = new AudioRecord(AudioSource.MIC, SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, N * 2);
recorder.startRecording();
final double COEFF[] = new double[IN_TONES.length];
for (int i = 0; i < IN_TONES.length; i++) {
COEFF[i] = 2.0 * Math.cos(2 * Math.PI * IN_TONES[i] / SAMPLE_RATE);
}
for (int i = 0; i < msgs.length; i++) {
msgs[i] = new Msg();
}
final double ON_THRESHOLD = 8000;//16384;
final double OFF_THRESHOLD = ON_THRESHOLD * 1 / 2;
while (true) {
int read = 0;
while (read < N) {
read += recorder.read(buffer, read, N - read);
}
for (int j = 0; j < read / WINDOW_SIZE; j++) {
stamp++;
double mag = goertzelSimple(buffer, j * WINDOW_SIZE, COEFF[0]);
if (mag > maxMag) maxMag = mag;
if(mag < minMag) minMag = mag;
if (mag > ON_THRESHOLD) {
// log.v("mag = %f",mag);
FMSimpleRecv(true);
} else if (mag < OFF_THRESHOLD) {
FMSimpleRecv(false);
}
}
}
// recorder.stop();
// recorder.release();
// return null;
}
项目:nextgislogger
文件:AudioEngine.java
private void createAudioRecord() {
if (mSampleRate > 0 && mAudioFormat > 0 && mChannelConfig > 0) {
mAudioRecord = new AudioRecord(AudioSource.MIC, mSampleRate, mChannelConfig, mAudioFormat, mBufferSize);
return;
}
// Find best/compatible AudioRecord
for (int sampleRate : new int[] { 8000, 11025, 16000, 22050, 32000, 44100, 47250, 48000 }) {
for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT, AudioFormat.ENCODING_PCM_8BIT }) {
for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.CHANNEL_CONFIGURATION_STEREO }) {
// Try to initialize
try {
mBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
if (mBufferSize < 0) {
continue;
}
mBuffer = new short[mBufferSize];
mAudioRecord = new AudioRecord(AudioSource.MIC, sampleRate, channelConfig, audioFormat, mBufferSize);
if (mAudioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
mSampleRate = sampleRate;
mAudioFormat = audioFormat;
mChannelConfig = channelConfig;
return;
}
mAudioRecord.release();
mAudioRecord = null;
}
catch (Exception e) {
// Do nothing
}
}
}
}
}
项目:crowdpp
文件:AudioRecorder.java
/**
* Default constructor
*
* Instantiates a new recorder, in case of compressed recording the parameters can be left as 0.
* In case of errors, no exception is thrown, but the state is set to ERROR
*/
@SuppressWarnings("deprecation")
public AudioRecorder(boolean uncompressed, int audioSource, int sampleRate, int channelConfig, int audioFormat) {
try {
rUncompressed = uncompressed;
if (rUncompressed) {
// RECORDING_UNCOMPRESSED
if (audioFormat == AudioFormat.ENCODING_PCM_16BIT) {
bSamples = 16;
}
else {
bSamples = 8;
}
if (channelConfig == AudioFormat.CHANNEL_CONFIGURATION_MONO) {
nChannels = 1;
}
else {
nChannels = 2;
}
aSource = audioSource;
sRate = sampleRate;
aFormat = audioFormat;
framePeriod = sampleRate * TIMER_INTERVAL / 1000;
bufferSize = framePeriod * 2 * bSamples * nChannels / 8;
if (bufferSize < AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat)) {
// Check to make sure buffer size is not smaller than the smallest allowed one
bufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
// Set frame period and timer interval accordingly
framePeriod = bufferSize / ( 2 * bSamples * nChannels / 8 );
Log.w(AudioRecorder.class.getName(), "Increasing buffer size to " + Integer.toString(bufferSize));
}
audioRecorder = new AudioRecord(audioSource, sampleRate, channelConfig, audioFormat, bufferSize);
if (audioRecorder.getState() != AudioRecord.STATE_INITIALIZED)
throw new Exception("AudioRecord initialization failed");
audioRecorder.setRecordPositionUpdateListener(updateListener);
audioRecorder.setPositionNotificationPeriod(framePeriod);
}
else {
// RECORDING_COMPRESSED
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
cAmplitude = 0;
filePath = null;
state = State.INITIALIZING;
} catch (Exception e) {
if (e.getMessage() != null) {
Log.e(AudioRecorder.class.getName(), e.getMessage());
}
else {
Log.e(AudioRecorder.class.getName(), "Unknown error occured while initializing recording");
}
state = State.ERROR;
}
}
项目:opentok-android-sdk-samples
文件:CustomAudioDevice.java
@Override
public boolean initCapturer() {
// initalize audio mode
audioManagerMode.acquireMode(audioManager);
// get the minimum buffer size that can be used
int minRecBufSize = AudioRecord.getMinBufferSize(
captureSettings.getSampleRate(),
NUM_CHANNELS_CAPTURING == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.ENCODING_PCM_16BIT
);
// double size to be more safe
int recBufSize = minRecBufSize * 2;
// release the object
if (noiseSuppressor != null) {
noiseSuppressor.release();
noiseSuppressor = null;
}
if (echoCanceler != null) {
echoCanceler.release();
echoCanceler = null;
}
if (audioRecord != null) {
audioRecord.release();
audioRecord = null;
}
try {
audioRecord = new AudioRecord(AudioSource.VOICE_COMMUNICATION,
captureSettings.getSampleRate(),
NUM_CHANNELS_CAPTURING == 1 ? AudioFormat.CHANNEL_IN_MONO
: AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.ENCODING_PCM_16BIT, recBufSize);
if (NoiseSuppressor.isAvailable()) {
noiseSuppressor = NoiseSuppressor.create(audioRecord.getAudioSessionId());
}
if (AcousticEchoCanceler.isAvailable()) {
echoCanceler = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
}
} catch (Exception e) {
throw new RuntimeException(e.getMessage());
}
// check that the audioRecord is ready to be used
if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
throw new RuntimeException("Audio capture is not initialized " + captureSettings.getSampleRate());
}
shutdownCaptureThread = false;
new Thread(captureThread).start();
return true;
}
项目:droidkit-webrtc
文件:WebRtcAudioRecord.java
@SuppressWarnings("unused")
private int InitRecording(int audioSource, int sampleRate) {
audioSource = AudioSource.VOICE_COMMUNICATION;
// get the minimum buffer size that can be used
int minRecBufSize = AudioRecord.getMinBufferSize(
sampleRate,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
// DoLog("min rec buf size is " + minRecBufSize);
// double size to be more safe
int recBufSize = minRecBufSize * 2;
// On average half of the samples have been recorded/buffered and the
// recording interval is 1/100s.
_bufferedRecSamples = sampleRate / 200;
// DoLog("rough rec delay set to " + _bufferedRecSamples);
// release the object
if (_audioRecord != null) {
_audioRecord.release();
_audioRecord = null;
}
try {
_audioRecord = new AudioRecord(
audioSource,
sampleRate,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
recBufSize);
} catch (Exception e) {
DoLog(e.getMessage());
return -1;
}
// check that the audioRecord is ready to be used
if (_audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
// DoLog("rec not initialized " + sampleRate);
return -1;
}
// DoLog("rec sample rate set to " + sampleRate);
return _bufferedRecSamples;
}