功能很簡單,大體流程爲:git
須要的前置知識有:github
大體流程和普通的解碼相似,在編寫視頻播放器這個功能時,須要注意的地方有兩個:數組
public interface OnDecodeListener {
void onImageDecoded(byte[] data);
void onSampleDecoded(byte[] data);
void onDecodeEnded();
}
複製代碼
也能夠加一個 onDecodeError() 的接口,看須要擴展便可。緩存
由於視頻數據量很大,不可能把解碼後的 YUV 數據保存在一個隊列裏,再慢慢拿出來使用 OpenGL 渲染(很容易就 OOM 了),所以,必須控制解碼的速率,最簡單的控制方式是和播放同步,以下所示:bash
ByteBuffer outputBuffer = outputBuffers[outIndex];
outputBuffer.position(bufferInfo.offset);
outputBuffer.limit(bufferInfo.offset + bufferInfo.size);
byte[] data = new byte[bufferInfo.size];
outputBuffer.get(data);
if (mIsDecodeWithPts) {
if (startTime == 0) {
startTime = System.nanoTime();
} else {
passTime = (System.nanoTime() - startTime) / 1000;
if (passTime < bufferInfo.presentationTimeUs) {
TimeUnit.MICROSECONDS.sleep(bufferInfo.presentationTimeUs - passTime);
}
}
}
if (mediaType == HWCodec.MEDIA_TYPE_VIDEO && listener != null) {
listener.onImageDecoded(data);
} else if (listener != null) {
listener.onSampleDecoded(data);
}
複製代碼
和渲染紋理的流程相似,不一樣的地方在於須要轉換 YUV 數據爲 RGB,而 YUV 數據又有 YUV420P、YUV420SP 等多種格式,所以在轉換 RGB 以前,須要統一 YUV 數據的格式,這裏使用的是 YUV420P。ide
YUV 數據格式之間的轉換能夠本身寫,好比 YUV420SP 轉換爲 YUV420P,只須要把最後的 U、V 數據分別逐個放入到一個數組裏便可,但考慮到視頻裁剪、旋轉,以及以後可能用到的各類 YUV 數據處理功能,所以這裏引入了一個 libyuv 的庫,使用很是簡單:函數
Yuv* convertToI420(AVModel *model) {
...
Yuv *yuv = new Yuv(model->width, model->height);
ConvertToI420(model->image, (size_t) model->imageLen, yuv->bufY, yuv->strideY,
yuv->bufU, yuv->strideU, yuv->bufV, yuv->strideV,
0, 0, model->width, model->height, model->width, model->height,
kRotate0, getFourCC(model->pixelFormat));
return yuv;
}
複製代碼
AVModel、Yuv 是我自定義的兩個類,分別用於保存音視頻數據及相關信息、YUV 數據及相關信息,源碼可見 GitHub。ui
YUV 轉 RGB 的相關係數在可上網查找,fragment shader 示例以下:this
#version 300 es
precision highp float;
uniform sampler2D yTexture;
uniform sampler2D uTexture;
uniform sampler2D vTexture;
in vec2 vTexCoord;
layout(location=0) out vec4 fragColor;
void main() {
highp float y = texture(yTexture, vTexCoord).r;
highp float u = texture(uTexture, vTexCoord).r - 0.5;
highp float v = texture(vTexture, vTexCoord).r - 0.5;
highp float r = y + 1.402 * v;
highp float g = y - 0.344 * u - 0.714 * v;
highp float b = y + 1.772 * u;
fragColor = vec4(r, g, b, 1.0);
}
複製代碼
OpenGL 關鍵代碼以下:spa
bool YuvRenderer::doInit() {
std::string *vShader = readShaderFromAsset(mAssetManager, "yuv_renderer.vert");
std::string *fShader = readShaderFromAsset(mAssetManager, "yuv_renderer.frag");
mProgram = loadProgram(vShader->c_str(), fShader->c_str());
mMatrixLoc = glGetUniformLocation(mProgram, "mMatrix");
mSamplerY = glGetUniformLocation(mProgram, "yTexture");
mSamplerU = glGetUniformLocation(mProgram, "uTexture");
mSamplerV = glGetUniformLocation(mProgram, "vTexture");
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
// 生成三個紋理,分別用於裝載 Y、U、V 數據
glGenTextures(3, mTextures);
glBindTexture(GL_TEXTURE_2D, mTextures[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, mTexWidth, mTexHeight, 0, GL_LUMINANCE,
GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, mTextures[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, mTexWidth / 2, mTexHeight / 2, 0, GL_LUMINANCE,
GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, mTextures[2]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, mTexWidth / 2, mTexHeight / 2, 0, GL_LUMINANCE,
GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// 緩存頂點座標、紋理座標、索引數據到 VBO 中
glGenBuffers(3, mVboIds);
glBindBuffer(GL_ARRAY_BUFFER, mVboIds[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(VERTICES), VERTICES, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, mVboIds[1]);
glBufferData(GL_ARRAY_BUFFER, sizeof(TEX_COORDS), TEX_COORDS, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mVboIds[2]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(INDICES), INDICES, GL_STATIC_DRAW);
// 緩存 VBO 到 VAO 中
glGenVertexArrays(1, &mVaoId);
glBindVertexArray(mVaoId);
glBindBuffer(GL_ARRAY_BUFFER, mVboIds[0]);
glEnableVertexAttribArray(ATTRIB_POSITION);
glVertexAttribPointer(ATTRIB_POSITION, VERTEX_POS_SIZE, GL_FLOAT, GL_FALSE,
sizeof(GLfloat) * VERTEX_POS_SIZE, 0);
glBindBuffer(GL_ARRAY_BUFFER, mVboIds[1]);
glEnableVertexAttribArray(ATTRIB_TEX_COORD);
glVertexAttribPointer(ATTRIB_TEX_COORD, TEX_COORD_SIZE, GL_FLOAT, GL_FALSE,
sizeof(GLfloat) * TEX_COORD_SIZE, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mVboIds[2]);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
delete vShader;
delete fShader;
return true;
}
void YuvRenderer::doDraw() {
glViewport(0, 0, mWidth, mHeight);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(mProgram);
glUniformMatrix4fv(mMatrixLoc, 1, GL_FALSE, mMatrix);
if (!mYuv) {
LOGW("YuvRenderer doDraw failed: yuv data have not assigned");
return;
}
// 分別載入 Y、U、V 數據到對應的紋理中
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, mTextures[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, mTexWidth, mTexHeight, 0, GL_LUMINANCE,
GL_UNSIGNED_BYTE, mYuv->bufY);
glUniform1i(mSamplerY, 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, mTextures[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, mTexWidth / 2, mTexHeight / 2, 0, GL_LUMINANCE,
GL_UNSIGNED_BYTE, mYuv->bufU);
glUniform1i(mSamplerU, 1);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, mTextures[2]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, mTexWidth / 2, mTexHeight / 2, 0, GL_LUMINANCE,
GL_UNSIGNED_BYTE, mYuv->bufV);
glUniform1i(mSamplerV, 2);
// 使用 VAO 緩存的座標數據繪製圖像
glBindVertexArray(mVaoId);
glDrawElements(GL_TRIANGLES, INDEX_NUMBER, GL_UNSIGNED_SHORT, 0);
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
}
複製代碼
初始化播放器:
bool BQAudioPlayer::init() {
SLresult result;
SLDataLocator_AndroidSimpleBufferQueue locBufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
// channelMask: 位數和 channel 相等,0 表明 SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT
SLDataFormat_PCM formatPcm = {SL_DATAFORMAT_PCM, (SLuint32) mChannels, mSampleRate,
(SLuint32) mSampleFormat, (SLuint32) mSampleFormat,
mChannels == 2 ? 0 : SL_SPEAKER_FRONT_CENTER,
SL_BYTEORDER_LITTLEENDIAN};
if (mSampleRate) {
formatPcm.samplesPerSec = mSampleRate;
}
SLDataSource audioSrc = {&locBufq, &formatPcm};
SLDataLocator_OutputMix locOutpuMix = {SL_DATALOCATOR_OUTPUTMIX, mAudioEngine->outputMixObj};
SLDataSink audioSink = {&locOutpuMix, nullptr};
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_EFFECTSEND};
const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
result = (*mAudioEngine->engine)->CreateAudioPlayer(mAudioEngine->engine, &mPlayerObj,
&audioSrc, &audioSink,
mSampleRate ? 2 : 3, ids, req);
if (result != SL_RESULT_SUCCESS) {
LOGE("CreateAudioPlayer failed: %d", result);
return false;
}
result = (*mPlayerObj)->Realize(mPlayerObj, SL_BOOLEAN_FALSE);
if (result != SL_RESULT_SUCCESS) {
LOGE("mPlayerObj Realize failed: %d", result);
return false;
}
result = (*mPlayerObj)->GetInterface(mPlayerObj, SL_IID_PLAY, &mPlayer);
if (result != SL_RESULT_SUCCESS) {
LOGE("mPlayerObj GetInterface failed: %d", result);
return false;
}
result = (*mPlayerObj)->GetInterface(mPlayerObj, SL_IID_BUFFERQUEUE, &mBufferQueue);
if (result != SL_RESULT_SUCCESS) {
LOGE("mPlayerObj GetInterface failed: %d", result);
return false;
}
result = (*mBufferQueue)->RegisterCallback(mBufferQueue, playerCallback, this);
if (result != SL_RESULT_SUCCESS) {
LOGE("mPlayerObj RegisterCallback failed: %d", result);
return false;
}
mEffectSend = nullptr;
if (mSampleRate == 0) {
result = (*mPlayerObj)->GetInterface(mPlayerObj, SL_IID_EFFECTSEND, &mEffectSend);
if (result != SL_RESULT_SUCCESS) {
LOGE("mPlayerObj GetInterface failed: %d", result);
return false;
}
}
result = (*mPlayerObj)->GetInterface(mPlayerObj, SL_IID_VOLUME, &mVolume);
if (result != SL_RESULT_SUCCESS) {
LOGE("mPlayerObj GetInterface failed: %d", result);
return false;
}
result = (*mPlayer)->SetPlayState(mPlayer, SL_PLAYSTATE_PLAYING);
if (result != SL_RESULT_SUCCESS) {
LOGE("mPlayerObj SetPlayState failed: %d", result);
return false;
}
return true;
}
複製代碼
以後只須要把 PCM 入隊便可:
// 一幀音頻播放完畢後就會回調這個函數
void playerCallback(SLAndroidSimpleBufferQueueItf bq, void *context) {
BQAudioPlayer *player = (BQAudioPlayer *) context;
assert(bq == player->mBufferQueue);
pthread_mutex_unlock(&player->mMutex);
}
void BQAudioPlayer::enqueueSample(void *data, size_t length) {
// 必須等待一幀音頻播放完畢後才能夠 Enqueue 第二幀音頻
pthread_mutex_lock(&mMutex);
if (mBufSize < length) {
mBufSize = length;
if (mBuffers[0]) {
delete[] mBuffers[0];
}
if (mBuffers[1]) {
delete[] mBuffers[1];
}
mBuffers[0] = new uint8_t[mBufSize];
mBuffers[1] = new uint8_t[mBufSize];
}
memcpy(mBuffers[mIndex], data, length);
(*mBufferQueue)->Enqueue(mBufferQueue, mBuffers[mIndex], length);
mIndex = 1 - mIndex;
}
複製代碼
結束播放:
void BQAudioPlayer::release() {
pthread_mutex_lock(&mMutex);
if (mPlayerObj) {
(*mPlayerObj)->Destroy(mPlayerObj);
mPlayerObj = nullptr;
mPlayer = nullptr;
mBufferQueue = nullptr;
mEffectSend = nullptr;
mVolume = nullptr;
}
if (mAudioEngine) {
delete mAudioEngine;
mAudioEngine = nullptr;
}
if (mBuffers[0]) {
delete[] mBuffers[0];
mBuffers[0] = nullptr;
}
if (mBuffers[1]) {
delete[] mBuffers[1];
mBuffers[1] = nullptr;
}
pthread_mutex_unlock(&mMutex);
pthread_mutex_destroy(&mMutex);
}
複製代碼
相對 OpenSL,AudioTrack 代碼量少不少,設置 AudioTrack:
private void setupAudioTrack() {
int channelConfig = mChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO;
// 獲取 sample format 的 API 要求高,這裏默認使用 ENCODING_PCM_16BIT
int bufferSize = AudioTrack.getMinBufferSize(mSampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, mSampleRate, channelConfig,
AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM);
}
複製代碼
播放 PCM 數據:
@Override
public void onSampleDecoded(byte[] data) {
if (mIsPlaying) {
mAudioTrack.write(data, 0, data.length);
mAudioTrack.play();
}
}
複製代碼
結束播放:
private void releaseAudioTrack() {
if (mAudioTrack != null) {
mAudioTrack.stop();
mAudioTrack.release();
mAudioTrack = null;
}
}
複製代碼
以上,一款簡單的視頻播放器就完成了,固然還有不少細節沒有處理,有興趣的能夠參考 ijkplayer 自行完善。
源碼已上傳到 GitHub。