通过Android NDK读写声卡通过 AudioRecord和AudioTrack两个类实现。
AudioTrack:负责声音数据的输出
AudioRecord:负责声音数据的采集
system/media/audio/include/system
├── audio-base.h
├── audio-base-utils.h
├── audio_effect-base.h
├── audio_effect.h
├── audio_effects
├── audio.h
├── audio_policy.h
└── sound_trigger.h
音频源:
typedef enum {
AUDIO_SOURCE_DEFAULT = 0, //默认输入源
AUDIO_SOURCE_MIC = 1, //Microphone audio source 麦克风输入源
AUDIO_SOURCE_VOICE_UPLINK = 2, //Voice call uplink (Tx) audio source 语音呼叫上行(Tx)输入源
AUDIO_SOURCE_VOICE_DOWNLINK = 3, //Voice call downlink (Rx) audio source 语音呼叫下行(Rx)输入源
AUDIO_SOURCE_VOICE_CALL = 4, //Voice call uplink + downlink audio source 语音呼叫上下行输入源
AUDIO_SOURCE_CAMCORDER = 5, //Microphone audio source tuned for video recording 视频录制的麦克风音频源
AUDIO_SOURCE_VOICE_RECOGNITION = 6, //Microphone audio source tuned for voice recognition 针对语音唤醒的输入源
AUDIO_SOURCE_VOICE_COMMUNICATION = 7, //Microphone audio source tuned for voice communications such as VoIP 针对VOIP语音的输入源
AUDIO_SOURCE_REMOTE_SUBMIX = 8,
AUDIO_SOURCE_UNPROCESSED = 9,
AUDIO_SOURCE_VOICE_PERFORMANCE = 10,
AUDIO_SOURCE_ECHO_REFERENCE = 1997,
AUDIO_SOURCE_FM_TUNER = 1998,
#ifndef AUDIO_NO_SYSTEM_DECLARATIONS
/**
* A low-priority, preemptible audio source for for background software
* hotword detection. Same tuning as VOICE_RECOGNITION.
* Used only internally by the framework.
*/
AUDIO_SOURCE_HOTWORD = 1999,
#endif // AUDIO_NO_SYSTEM_DECLARATIONS
} audio_source_t;
typedef enum {
AUDIO_SESSION_OUTPUT_STAGE = -1, // (-1)
AUDIO_SESSION_OUTPUT_MIX = 0,
AUDIO_SESSION_ALLOCATE = 0,
AUDIO_SESSION_NONE = 0,
} audio_session_t;
//音频格式
typedef enum {//省略部分定义
AUDIO_FORMAT_INVALID = 0xFFFFFFFFu,
AUDIO_FORMAT_DEFAULT = 0,
AUDIO_FORMAT_PCM = 0x00000000u,
AUDIO_FORMAT_MP3 = 0x01000000u,
AUDIO_FORMAT_AMR_NB = 0x02000000u,
/* Subformats */
AUDIO_FORMAT_PCM_SUB_16_BIT = 0x1u,
AUDIO_FORMAT_PCM_SUB_8_BIT = 0x2u,
AUDIO_FORMAT_PCM_SUB_32_BIT = 0x3u,
AUDIO_FORMAT_PCM_SUB_8_24_BIT = 0x4u,
AUDIO_FORMAT_PCM_SUB_FLOAT = 0x5u,
AUDIO_FORMAT_PCM_SUB_24_BIT_PACKED = 0x6u,
/* Aliases */
AUDIO_FORMAT_PCM_16_BIT = 0x1u, // (PCM | PCM_SUB_16_BIT) //PCM16位
AUDIO_FORMAT_PCM_8_BIT = 0x2u, // (PCM | PCM_SUB_8_BIT) //PCM 8位
AUDIO_FORMAT_PCM_32_BIT = 0x3u, // (PCM | PCM_SUB_32_BIT)
AUDIO_FORMAT_PCM_8_24_BIT = 0x4u, // (PCM | PCM_SUB_8_24_BIT)
AUDIO_FORMAT_PCM_FLOAT = 0x5u, // (PCM | PCM_SUB_FLOAT)
AUDIO_FORMAT_PCM_24_BIT_PACKED = 0x6u, // (PCM | PCM_SUB_24_BIT_PACKED)
AUDIO_FORMAT_AAC_MAIN = 0x4000001u, // (AAC | AAC_SUB_MAIN)
AUDIO_FORMAT_AAC_LC = 0x4000002u, // (AAC | AAC_SUB_LC)
AUDIO_FORMAT_AAC_SSR = 0x4000004u, // (AAC | AAC_SUB_SSR)
} audio_format_t;
enum {//省略部分定义
AUDIO_CHANNEL_REPRESENTATION_POSITION = 0x0u,
AUDIO_CHANNEL_REPRESENTATION_INDEX = 0x2u,
AUDIO_CHANNEL_NONE = 0x0u,
AUDIO_CHANNEL_INVALID = 0xC0000000u,
AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
AUDIO_CHANNEL_IN_TOP_RIGHT = 0x400000u,
AUDIO_CHANNEL_IN_VOICE_UPLINK = 0x4000u,
AUDIO_CHANNEL_IN_VOICE_DNLINK = 0x8000u,
AUDIO_CHANNEL_IN_MONO = 0x10u, // IN_FRONT //单声道
AUDIO_CHANNEL_IN_STEREO = 0xCu, // IN_LEFT | IN_RIGHT 立体声
AUDIO_CHANNEL_IN_FRONT_BACK = 0x30u, // IN_FRONT | IN_BACK
AUDIO_CHANNEL_IN_6 = 0xFCu, // IN_LEFT | IN_RIGHT | IN_FRONT | IN_BACK | IN_LEFT_PROCESSED | IN_RIGHT_PROCESSED
AUDIO_CHANNEL_IN_2POINT0POINT2 = 0x60000Cu, // IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT
AUDIO_CHANNEL_IN_2POINT1POINT2 = 0x70000Cu, // IN_LEFT | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT | IN_LOW_FREQUENCY
AUDIO_CHANNEL_IN_3POINT0POINT2 = 0x64000Cu, // IN_LEFT | IN_CENTER | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT
AUDIO_CHANNEL_IN_3POINT1POINT2 = 0x74000Cu, // IN_LEFT | IN_CENTER | IN_RIGHT | IN_TOP_LEFT | IN_TOP_RIGHT | IN_LOW_FREQUENCY
AUDIO_CHANNEL_IN_5POINT1 = 0x17000Cu, // IN_LEFT | IN_CENTER | IN_RIGHT | IN_BACK_LEFT | IN_BACK_RIGHT | IN_LOW_FREQUENCY
AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO = 0x4010u, // IN_VOICE_UPLINK | IN_MONO
AUDIO_CHANNEL_IN_VOICE_DNLINK_MONO = 0x8010u, // IN_VOICE_DNLINK | IN_MONO
AUDIO_CHANNEL_IN_VOICE_CALL_MONO = 0xC010u, // IN_VOICE_UPLINK_MONO | IN_VOICE_DNLINK_MONO
};
typedef enum {
AUDIO_INPUT_FLAG_NONE = 0x0,
AUDIO_INPUT_FLAG_FAST = 0x1,
AUDIO_INPUT_FLAG_HW_HOTWORD = 0x2,
AUDIO_INPUT_FLAG_RAW = 0x4,
AUDIO_INPUT_FLAG_SYNC = 0x8,
AUDIO_INPUT_FLAG_MMAP_NOIRQ = 0x10,
AUDIO_INPUT_FLAG_VOIP_TX = 0x20,
AUDIO_INPUT_FLAG_HW_AV_SYNC = 0x40,
#ifndef AUDIO_NO_SYSTEM_DECLARATIONS // TODO: Expose at HAL interface, remove FRAMEWORK_FLAGS mask
AUDIO_INPUT_FLAG_DIRECT = 0x80,
AUDIO_INPUT_FRAMEWORK_FLAGS = AUDIO_INPUT_FLAG_DIRECT,
#endif
} audio_input_flags_t;
enum {
AUDIO_IO_HANDLE_NONE = 0,
AUDIO_MODULE_HANDLE_NONE = 0,
AUDIO_PORT_HANDLE_NONE = 0,
AUDIO_PATCH_HANDLE_NONE = 0,
};
TRANSFER_CALLBACK 通过回调函数传输数据
TRANSFER_OBTAIN
TRANSFER_SYNC
TRANSFER_DEFAULT
├── Android.mk
├── include
└── src
└── audio_main.cpp
audio_main.cpp:
#include <stdio.h>
#include <pthread.h>
#include <math.h>
#include <system/audio.h>
#include <media/AudioRecord.h>
#include <media/AudioTrack.h>
using namespace android;
sp<AudioRecord> mAudioRecord;
sp<AudioTrack> mAudioTrack;
FILE *g_read_pcm = NULL;
FILE *g_write_pcm = NULL;
audio_channel_mask_t channelmask = AUDIO_CHANNEL_IN_MONO;
audio_format_t audio_format = AUDIO_FORMAT_PCM_16_BIT;
int sample_rate = 16000;
int min_buf_size = 0;
void read_audio_data(int event, void *user, void *info)
{
if (event != AudioRecord::EVENT_MORE_DATA) {
printf("%s: event: %d\n", __FUNCTION__, event);
return;
}
AudioRecord::Buffer *buffer = static_cast<AudioRecord::Buffer *>(info);
if (buffer->size == 0) {
return;
}
//printf("%s: buf size: %d\n", __FUNCTION__, buffer->size);
fwrite(buffer->raw, buffer->size, 1, g_write_pcm);
}
//read from soundcard and write into file
int ndk_audio_read()
{
int ret = 0;
char file[256] = {'\0'};
size_t frame_count = 0;
int frame_size = 0;
String16 strName = String16("reader");
mAudioRecord = new AudioRecord(strName);
mAudioRecord.get();
status_t result = AudioRecord::getMinFrameCount(&frame_count, sample_rate,
audio_format, channelmask);
if (result == NO_ERROR) {
int channel_count = popcount(channelmask);
min_buf_size = frame_count * channel_count * (audio_format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);
} else if (result == BAD_VALUE) {
printf("Invalid param when get min frame count\n");
return -1;
} else {
printf("Faield to get min frame count\n");
return -1;
}
min_buf_size *= 2;// To prevent "buffer overflow" issue
if (min_buf_size > 0) {
printf("get min buf size[%d]\n", min_buf_size);
} else {
printf("get min buf size failed\n");
return -1;
}
frame_size = popcount(channelmask) * (audio_format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);
frame_count = min_buf_size / frame_size;
ret = mAudioRecord->set(
AUDIO_SOURCE_MIC,
sample_rate,
audio_format,
channelmask,
frame_count,
read_audio_data,
NULL,
0,
false,
AUDIO_SESSION_ALLOCATE,
AudioRecord::TRANSFER_CALLBACK,
AUDIO_INPUT_FLAG_FAST,
getuid(),
getpid(),
NULL,
AUDIO_PORT_HANDLE_NONE);
if (ret != NO_ERROR) {
printf("AudioRecord set failure\n");
return -1;
}
else
{
printf("set success\n");
}
if (mAudioRecord->initCheck() != NO_ERROR) {
printf("AudioRecord initialization failed!");
return -1;
}
snprintf(file, 256, "/data/ndksound.pcm");
g_write_pcm = fopen(file, "wb");
ret = mAudioRecord->start();
if (ret != NO_ERROR) {
printf("Audio Record start failure ret: [%d]", ret);
}
return 0;
}
void write_audio_data(int event, void *user, void *info)
{
if (event != AudioTrack::EVENT_MORE_DATA) {
printf("soundcard writer event: %d\n", event);
return;
}
AudioTrack::Buffer *buffer = static_cast<AudioTrack::Buffer *>(info);
if (buffer->size == 0) {
return;
}
memset(buffer->raw, 0, buffer->size);
int ret = fread(buffer->raw, 1, buffer->size, g_read_pcm);
if (ret <= 0) {
printf("%s: no more data:%d\n", __FUNCTION__, ret);
exit(1);
}
}
//read from file and write into soundcard
int ndk_audio_write()
{
int ret = 0;
char file[256] = {'\0'};
size_t frame_count = 0;
int frame_size = 0;
mAudioTrack = new AudioTrack();
mAudioTrack.get();
status_t result = AudioTrack::getMinFrameCount(&frame_count, AUDIO_STREAM_DEFAULT,
sample_rate);
if (result == NO_ERROR) {
int channel_count = popcount(channelmask);
min_buf_size = frame_count * channel_count * (audio_format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);
} else if (result == BAD_VALUE) {
printf("Invalid param when get min frame count\n");
return -1;
} else {
printf("Faield to get min frame count\n");
return -1;
}
if (min_buf_size > 0) {
printf("get min buf size[%d]\n", min_buf_size);
} else {
printf("get min buf size failed\n");
return -1;
}
channelmask = AUDIO_CHANNEL_OUT_MONO;
frame_size = popcount(channelmask) * (audio_format == AUDIO_FORMAT_PCM_16_BIT ? 2 : 1);
frame_count = min_buf_size / frame_size;
ret = mAudioTrack->set(
AUDIO_STREAM_VOICE_CALL,
sample_rate,
audio_format,
channelmask,
frame_count,
AUDIO_OUTPUT_FLAG_FAST,
write_audio_data,
NULL,
0,
0,
false,
AUDIO_SESSION_ALLOCATE,
AudioTrack::TRANSFER_CALLBACK,
NULL,
-1
);
if (ret != NO_ERROR) {
printf("mAudioTrack set failure\n");
return -1;
}
else
{
printf("set success\n");
}
if (mAudioTrack->initCheck() != NO_ERROR) {
printf("mAudioTrack initialization failed!");
return -1;
}
snprintf(file, 256, "/data/ndksound.pcm");
g_read_pcm = fopen(file, "rb");
if (!g_read_pcm) {
printf("open file failed\n");
return -1;
}
ret = mAudioTrack->start();
if (ret != NO_ERROR) {
printf("Audio Track start failure ret: [%d]", ret);
return -1;
}
printf("start success\n");
return 0;
}
int main(int argc, char *argv[])
{
int ret = 0;
if (argc < 2) {
printf("need 2 param\n");
return -1;
}
if (0 == strcmp(argv[1], "read")) {
printf("read soundcard\n");
ret = ndk_audio_read();
if (ret < 0) {
exit(1);
}
} else {
printf("write soundcard\n");
ret = ndk_audio_write();
if (ret < 0) {
exit(1);
}
}
while (1) {
sleep(5);
}
if (g_read_pcm) {
fclose(g_read_pcm);
}
if (g_write_pcm) {
fclose(g_write_pcm);
}
return 0;
}
Android.mk
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES += \
src/audio_main.cpp
LOCAL_C_INCLUDES += \
bionic \
external/stlport/stlport \
external/libcxx/include \
frameworks/av/include \
frameworks/av/media/libaudioclient/include \
frameworks/native/libs/nativebase/include \
frameworks/native/libs/math/include \
frameworks/av/media/ndk/include \
system/core/include \
system/core/libprocessgroup/include \
system/core/base/include \
system/core/libutils/include \
LOCAL_CFLAGS := -DANDROID -Wall -Wno-implicit-function-declaration -Wl,--unresolved-symbols=ignore-all
LOCAL_MODULE := ndk_audio
LOCAL_LDLIBS := -lm -lmediandk -landroid -laudioclient -lstdc++ -lutils
include $(BUILD_EXECUTABLE)
从声卡读声音数据写到文件: ./ndk_audio read
从文件读声音数据写到声卡: ./ndk_audio write
本人是音乐爱好者,从小就特别喜欢那个随着音乐跳动的方框效果,就是这个:arduino上一大把对,我忍你很久了,我就想用mpy做,全网没有,行我自己研究。果然兴趣是最好的老师,我之前有篇博客专门讲音频,有兴趣的可以回顾一下。提到可视化频谱,必然绕不开fft,大学学过这玩意,当时一心玩,老师讲的一个字都么听进去,网上教程简略扫了一下,大该就是把时域转频域的工具,我大mpy居然没有fft函数,奶奶的,先放着。音频信息如何收集?第一种傻瓜式的ADC,模拟转数字,原始粗暴,第二种,I2S库,我之前博客有讲过,数据是PCM编码。然后又去学PCM编码,一学豁然开朗,舒服,以代码为例:audio_in=I2S
我需要在一台没有安装Excel的Linux服务器上读写(->转换)Excel文件。对于Python,存在http://www.python-excel.org/.Ruby有类似的东西吗?可能不需要处理最新的Office格式。只需旧的xls文件就足够了。 最佳答案 我同意Gonzih的观点,并且我经常使用roo。它允许我使用模板文件进行读取、写入和写入。该项目在他们的site上有很好的记录。.我总是使用类似的东西:input=Excel.new(path)output=Array.newinput.default_sheet=inpu
我的表单通过POST接收数据。当我执行putsparams时,我可以看到:{"id"=>"123","id2"=>"456"}现在是命令:putsparams['id']#=>123putsparams[:id]#=>123params['id']='999'putsparams#=>{"id"=>"999","id2"=>"456"}但是当我这样做的时候:params[:id]='888'putsparams我明白了{"id"=>"999","id2"=>"456",:id=>"888"}在IRB中它工作正常:params#=>{"id2"=>"2","id"=>"1"}params
一、概述在之前的一篇博文中,记录了AT24C01、AT24C02芯片的读写驱动,先将之前的相关文章include一下:1.IIC驱动:4位数码管显示模块TM1637芯片C语言驱动程序2.AT24C01/AT24C02读写:AT24C01/AT24C02系列EEPROM芯片单片机读写驱动程序本文记录分享AT24C04、AT24C08、AT24C16芯片的单片机C语言读写驱动程序。二、芯片对比介绍型号容量bit容量byte页数字节/页器件寻址位可寻址器件数WordAddress位数/字节数备注AT24C044k5123216A2A149/1WordAddress使用P0位AT24C088k1024
解决台式机麦克风不可用问题戴尔灵越3880最近因为需要开线上会议,发现戴尔台式机音频只有输出没有输入,也就是只能听见声音,无法输入声音。先后尝试了各种驱动安装更新之类的调试,无果。之后通过戴尔支持解决~这里多说一句,专业的就是专业,问题描述过去,直接给了解决方案,可能是他们遇到的相似问题比较多了,但也告诉我们,有些时候是可以通过这些官方服务解决问题的,比起自己折腾效率要高很多。那就记录一下吧~问题描述:电脑只能输出声音,不能输入声音。1、前提需要准备一只带麦克风的耳机,将耳机插入面板。2、先确定是否可以听到声音,可以通过播放歌曲或者视频。3、然后确认麦克风是否可用,可以通过调用win自带麦克风
以VSTiTriforce为例,由Tweakbench提供。当加载到市场上的任何VST主机时,它允许主机向VSTi发送(大概是MIDI)信号。然后VSTi将处理该信号并输出由VSTi内的软件乐器创建的合成音频。例如,将A4(我相信是MIDI音符)发送到VSTi会导致它合成高于中央C的A。它将音频数据发送回VST主机,然后它可以在我的扬声器上播放或将其保存为.wav或其他一些音频文件格式。假设我有Triforce,我正在尝试用我选择的语言编写一个程序,它可以通过发送要合成的A4纸条与VSTi交互,并自动将其保存到系统上的文件?最终,我希望能够解析整个单轨MIDI文件(使用已经可用于此
我的Ruby程序从stdin读取行并使用puts打印到stdout(终端)。我可以使用RSpec来测试读写吗?我可以像在stdin中编写的那样向我的程序注入(inject)一个字符串,同时检查输出吗?line=STDIN.read.chomp.split另外,我在一个循环中进行读取和写入,直到line[0]被“退出”。我可以在循环运行时进行测试,还是应该调用subject.read_in和subject.write_out? 最佳答案 您可以使用模拟并通过在and_return()方法中列出多个值来多次调用该方法。这些将按照给定的顺
问题的产生经典的单reactor多线程模式采用的是用主线程处理连接事件以及socket读写事件,业务逻辑的处理则是让线程池里的线程各自竞争处理。既然多线程这么方便,为什么不让线程池里的线程也参与到read和send这个过程中呢?在发送数据的过程中,即使TCP的发送缓存满了,我们也可以记录下当前成功发送了多少字节,然后再次注册一个EPOLLOUT事件,只需等待下次可写事件,继续让子线程发送数据即可,岂不是美哉?解释陈硕大佬的解释对于TCP,通常多线程读写同一个socket是错误的设计,因为有shortwrite的可能。假如你加锁,而又发生shortwrite,你是不是要一直等到整条消息发送完才解
特性工作电压范围:6V-14V输出功率:7W(CLASSD,7.4V/4Ω,THD=10%)10W(CLASSD,9V/4Ω,THD=10%)18W(CLASSD,12V/4Ω,THD=10%)最高可达92%效率(12V/8Ω)电平设置工作模式无需输出滤波器差分输入优异的“上电,掉电”噪声抑制过流保护、过热保护、欠压保护 eSOP-8封装典型应用电路很简单:如下是本人的设计。 输入电阻:输入电阻主要是确定增益,即输出功率,所以一定要确定输入信号的幅度,喇叭的幅度,前后使用有效值计算。此设计搭配的喇叭是8R3W,额定功率3W,额定电压4.89V(有效值),最大功率4W。我们先确定输入信号的赋值,
做音频处理(虽然它也可以是图像处理)我有一个一维数字数组。(它们恰好是代表音频样本的16位有符号整数,这个问题同样适用于float或不同大小的整数。)为了匹配不同频率的音频(例如,将44.1kHz样本与22kHz样本混合),我需要拉伸(stretch)或压缩值数组以满足特定长度。将数组减半很简单:每隔一个样本丢弃一次。[231,8143,16341,2000,-9352,...]=>[231,16341,-9352,...]将数组宽度加倍稍微不那么简单:将每个条目加倍(或可选地在相邻的“真实”样本之间执行一些插值)。[231,8143,16341,2000,-9352,...]=>[2