VOIP通话和蜂窝通话的主要区别
VOIP,即Voice Over Internet Protocol,是一种通过互联网进行语音通信的技术。它的工作原理是将模拟声音信号转换成数字信号,然后将其压缩并分割成数据包,通过IP网络发送。到达目的地后,这些数据包被重新组装并解码,转换回模拟信号供接收方听筒播放。
蜂窝通话是通过移动网络运营商提供的服务进行的语音通话。它是通过一系列基站(蜂窝塔)来连接手机和其他设备的,这些基站覆盖一定的地理区域,形成“蜂窝”状的网络。相对于公共Wi-Fi上的VOIP通话,蜂窝通话通常更安全。但蜂窝通话依赖于移动网络的信号强度,如果没有足够的信号或者处于网络盲区,通话可能会中断。
音频通话功能的实现原理
音频通话场景中,需要同时处理音频输出和音频输入,涉及到双向通信,要让双方能够听到对方的声音并能实时响应。在软件层面,需要分别处理音频的渲染(输出)和捕获(输入),并确保这两个过程能够高效且同步地运行。以下是在鸿蒙OS 的具体实现方法:
使用 AudioRenderer 实现音频通话中的音频播放功能
import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs';
const TAG = 'VoiceCallDemoForAudioRenderer'; // 日志标签
export default class VoiceCallDemoForAudioRenderer {
private renderModel = undefined; // AudioRenderer实例
private audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率 48kHz
channels: audio.AudioChannel.CHANNEL_2, // 双声道
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 16位小端序格式
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 原始数据格式
}
private audioRendererInfo = {
content: audio.ContentType.CONTENT_TYPE_SPEECH, // 音频内容类型:语音
usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // 音频流使用类型:语音通信
rendererFlags: 0 // 音频渲染器标志:默认为0
}
private audioRendererOptions = {
streamInfo: this.audioStreamInfo,
rendererInfo: this.audioRendererInfo
}
// 初始化,创建实例,设置监听事件
init() {
audio.createAudioRenderer(this.audioRendererOptions, (err, renderer) => { // 创建AudioRenderer实例
if (!err) {
console.info(`${TAG}: creating AudioRenderer success`);
this.renderModel = renderer;
this.renderModel.on('stateChange', (state) => { // 设置状态变化监听事件
if (state == 1) {
console.info('audio renderer state is: STATE_PREPARED');
}
if (state == 2) {
console.info('audio renderer state is: STATE_RUNNING');
}
});
this.renderModel.on('markReach', 1000, (position) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调
if (position == 1000) {
console.info('ON Triggered successfully');
}
});
} else {
console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`);
}
});
}
// 开始一次音频渲染
async start() {
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf(this.renderModel.state) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动渲染
console.error(TAG + 'start failed');
return;
}
await this.renderModel.start(); // 启动渲染
const bufferSize = await this.renderModel.getBufferSize(); // 获取缓冲区大小
// 此处仅以读取音频文件的数据举例,实际音频通话开发中,需要读取的是通话对端传输来的音频数据
let context = getContext(this);
let path = context.filesDir;
const filePath = path + '/voice_call_data.wav'; // 音频文件路径
let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); // 打开音频文件
let stat = await fs.stat(filePath); // 获取文件信息
let buf = new ArrayBuffer(bufferSize); // 创建缓冲区
let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1); // 计算读取次数
for (let i = 0; i < len; i++) {
let options = {
offset: i * bufferSize,
length: bufferSize
};
let readsize = await fs.read(file.fd, buf, options); // 读取音频文件数据
// buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能
let writeSize = await new Promise((resolve, reject) => {
this.renderModel.write(buf, (err, writeSize) => { // 写入缓冲区
if (err) {
reject(err);
} else {
resolve(writeSize);
}
});
});
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为STATE_RELEASED,停止渲染
fs.close(file);
await this.renderModel.stop();
}
if (this.renderModel.state === audio.AudioState.STATE_RUNNING) {
if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染
fs.close(file);
await this.renderModel.stop();
}
}
}
}
// 暂停渲染
async pause() {
// 只有渲染器状态为STATE_RUNNING的时候才能暂停
if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) {
console.info('Renderer is not running');
return;
}
await this.renderModel.pause(); // 暂停渲染
if (this.renderModel.state === audio.AudioState.STATE_PAUSED) {
console.info('Renderer is paused.');
} else {
console.error('Pausing renderer failed.');
}
}
// 停止渲染
async stop() {
// 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) {
console.info('Renderer is not running or paused.');
return;
}
await this.renderModel.stop(); // 停止渲染
if (this.renderModel.state === audio.AudioState.STATE_STOPPED) {
console.info('Renderer stopped.');
} else {
console.error('Stopping renderer failed.');
}
}
// 销毁实例,释放资源
async release() {
// 渲染器状态不是STATE_RELEASED状态,才能release
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
console.info('Renderer already released');
return;
}
await this.renderModel.release(); // 释放资源
if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
console.info('Renderer released');
} else {
console.error('Renderer release failed.');
}
}
}
使用 AudioCapturer 实现音频通话中的音频录制功能
使用AudioCapturer
录制本端的通话声音时,需要对AudioCapturer
的配置做一些设定。对于语音通话,音源类型要设置为SOURCE_TYPE_VOICE_COMMUNICATION
。告诉系统你打算捕获的是语音通话的音频,而不是其他类型的音频源,如媒体播放或系统声音。
import audio from '@ohos.multimedia.audio';
import fs from '@ohos.file.fs';
const TAG = 'VoiceCallDemoForAudioCapturer'; // 日志标签
export default class VoiceCallDemoForAudioCapturer {
private audioCapturer = undefined; // AudioCapturer实例
private audioStreamInfo = {
samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // 采样率 44.1kHz
channels: audio.AudioChannel.CHANNEL_1, // 单声道
sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 16位小端序格式
encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 原始数据格式
}
private audioCapturerInfo = {
source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // 音源类型:语音通话
capturerFlags: 0 // 音频采集器标志:默认为0
}
private audioCapturerOptions = {
streamInfo: this.audioStreamInfo,
capturerInfo: this.audioCapturerInfo
}
// 初始化,创建实例,设置监听事件
init() {
audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // 创建AudioCapturer实例
if (err) {
console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
return;
}
console.info(`${TAG}: create AudioCapturer success`);
this.audioCapturer = capturer;
this.audioCapturer.on('markReach', 1000, (position) => { // 订阅markReach事件,当采集的帧数达到1000时触发回调
if (position === 1000) {
console.info('ON Triggered successfully');
}
});
this.audioCapturer.on('periodReach', 2000, (position) => { // 订阅periodReach事件,当采集的帧数达到2000时触发回调
if (position === 2000) {
console.info('ON Triggered successfully');
}
});
});
}
// 开始一次音频采集
async start() {
let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集
console.error(`${TAG}: start failed`);
return;
}
await this.audioCapturer.start(); // 启动采集
// 此处仅以将音频数据写入文件举例,实际音频通话开发中,需要将本端采集的音频数据编码打包,通过网络发送给通话对端
let context = getContext(this);
const path = context.filesDir + '/voice_call_data.wav'; // 采集到的音频文件存储路径
let file = fs.openSync(path, 0o2 | 0o100); // 如果文件不存在则创建文件
let fd = file.fd;
let numBuffersToCapture = 150; // 循环写入150次
let count = 0;
while (numBuffersToCapture) {
let bufferSize = await this.audioCapturer.getBufferSize(); // 获取缓冲区大小
let buffer = await this.audioCapturer.read(bufferSize, true); // 读取缓冲区数据
let options = {
offset: count * bufferSize,
length: bufferSize
};
if (buffer === undefined) {
console.error(`${TAG}: read buffer failed`);
} else {
let number = fs.writeSync(fd, buffer, options); // 将音频数据写入文件
console.info(`${TAG}: write date: ${number}`);
}
numBuffersToCapture--;
count++;
}
}
// 停止采集
async stop() {
// 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) {
console.info('Capturer is not running or paused');
return;
}
await this.audioCapturer.stop(); // 停止采集
if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) {
console.info('Capturer stopped');
} else {
console.error('Capturer stop failed');
}
}
// 销毁实例,释放资源
async release() {
// 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release
if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) {
console.info('Capturer already released');
return;
}
await this.audioCapturer.release(); // 释放资源
if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) {
console.info('Capturer released');
} else {
console.error('Capturer release failed');
}
}
}