|
abstract boolean | onRecordAudioFrame (int type, int samplesPerChannel, int bytesPerSample, int channels, int samplesPerSec, ByteBuffer buffer, long renderTimeMs, int avsync_type) |
|
abstract boolean | onPlaybackAudioFrame (int type, int samplesPerChannel, int bytesPerSample, int channels, int samplesPerSec, ByteBuffer buffer, long renderTimeMs, int avsync_type) |
|
abstract boolean | onMixedAudioFrame (int type, int samplesPerChannel, int bytesPerSample, int channels, int samplesPerSec, ByteBuffer buffer, long renderTimeMs, int avsync_type) |
|
abstract boolean | onPlaybackAudioFrameBeforeMixing (int userId, int type, int samplesPerChannel, int bytesPerSample, int channels, int samplesPerSec, ByteBuffer buffer, long renderTimeMs, int avsync_type) |
|
◆ onRecordAudioFrame()
abstract boolean io.agora.rtc2.IAudioFrameObserver.onRecordAudioFrame |
( |
int |
type, |
|
|
int |
samplesPerChannel, |
|
|
int |
bytesPerSample, |
|
|
int |
channels, |
|
|
int |
samplesPerSec, |
|
|
ByteBuffer |
buffer, |
|
|
long |
renderTimeMs, |
|
|
int |
avsync_type |
|
) |
| |
|
abstract |
Occurs when the recorded audio frame is received.
- Parameters
-
type | The audio frame type. |
samplesPerChannel | The samples per channel. |
bytesPerSample | The number of bytes per audio sample. For example, each PCM audio sample usually takes up 16 bits (2 bytes). |
channels | The number of audio channels. If the channel uses stereo, the data is interleaved.
|
samplesPerSec | The number of samples per channel per second in the audio frame. |
buffer | The audio frame payload. |
renderTimeMs | The render timestamp in ms. |
avsync_type | The audio/video sync type. |
- Returns
- true: The recorded audio frame is valid and is encoded and sent.
- false: The recorded audio frame is invalid and is not encoded or sent.
◆ onPlaybackAudioFrame()
abstract boolean io.agora.rtc2.IAudioFrameObserver.onPlaybackAudioFrame |
( |
int |
type, |
|
|
int |
samplesPerChannel, |
|
|
int |
bytesPerSample, |
|
|
int |
channels, |
|
|
int |
samplesPerSec, |
|
|
ByteBuffer |
buffer, |
|
|
long |
renderTimeMs, |
|
|
int |
avsync_type |
|
) |
| |
|
abstract |
Occurs when the playback audio frame is received.
- Parameters
-
type | The audio frame type. |
samplesPerChannel | The samples per channel. |
bytesPerSample | The number of bytes per audio sample. For example, each PCM audio sample usually takes up 16 bits (2 bytes). |
channels | The number of audio channels. If the channel uses stereo, the data is interleaved.
|
samplesPerSec | The number of samples per channel per second in the audio frame. |
buffer | The audio frame payload. |
renderTimeMs | The render timestamp in ms. |
avsync_type | The audio/video sync type. |
- Returns
- true: The playback audio frame is valid and is encoded and sent.
- false: The playback audio frame is invalid and is not encoded or sent.
◆ onMixedAudioFrame()
abstract boolean io.agora.rtc2.IAudioFrameObserver.onMixedAudioFrame |
( |
int |
type, |
|
|
int |
samplesPerChannel, |
|
|
int |
bytesPerSample, |
|
|
int |
channels, |
|
|
int |
samplesPerSec, |
|
|
ByteBuffer |
buffer, |
|
|
long |
renderTimeMs, |
|
|
int |
avsync_type |
|
) |
| |
|
abstract |
Occurs when the mixed playback audio frame is received.
- Parameters
-
type | The audio frame type. |
samplesPerChannel | The samples per channel. |
bytesPerSample | The number of bytes per audio sample. For example, each PCM audio sample usually takes up 16 bits (2 bytes). |
channels | The number of audio channels. If the channel uses stereo, the data is interleaved.
|
samplesPerSec | The number of samples per channel per second in the audio frame. |
buffer | The audio frame payload. |
renderTimeMs | The render timestamp in ms. |
avsync_type | The audio/video sync type. |
- Returns
- true: The mixed audio data is valid and is encoded and sent.
- false: The mixed audio data is invalid and is not encoded or sent.
◆ onPlaybackAudioFrameBeforeMixing()
abstract boolean io.agora.rtc2.IAudioFrameObserver.onPlaybackAudioFrameBeforeMixing |
( |
int |
userId, |
|
|
int |
type, |
|
|
int |
samplesPerChannel, |
|
|
int |
bytesPerSample, |
|
|
int |
channels, |
|
|
int |
samplesPerSec, |
|
|
ByteBuffer |
buffer, |
|
|
long |
renderTimeMs, |
|
|
int |
avsync_type |
|
) |
| |
|
abstract |
Occurs when the playback audio frame before mixing is received.
- Parameters
-
userId | The user Id. |
type | The audio frame type. |
samplesPerChannel | The samples per channel. |
bytesPerSample | The number of bytes per audio sample. For example, each PCM audio sample usually takes up 16 bits (2 bytes). |
channels | The number of audio channels. If the channel uses stereo, the data is interleaved.
|
samplesPerSec | The number of samples per channel per second in the audio frame. |
buffer | The audio frame payload. |
renderTimeMs | The render timestamp in ms. |
avsync_type | The audio/video sync type. |
- Returns
- true: The playback audio frame before mixing is valid and is encoded and sent.
- false: The playback audio frame before mixing is invalid and is not encoded or sent.