本身花了點時間實現了一個使用FFmpeg將Camera2視頻數據推送到RTMP服務的簡單Demo,在這裏分享下,裏面用到知識不少都是以前博客中用到的,Camera二、YUV數據、FFmpeg編解碼等難度不大。java
定義了三個JNI方法nginx
public class FFmpegHandler {
private FFmpegHandler() {
}
private static class SingletonInstance {
private static final FFmpegHandler INSTANCE = new FFmpegHandler();
}
public static FFmpegHandler getInstance() {
return SingletonInstance.INSTANCE;
}
static {
System.loadLibrary("ffmpeg-handler");
}
//初始化參數
public native int init(String outUrl);
//推流,將Y、U、V數據分開傳遞
public native int pushCameraData(byte[] buffer,int ylen,byte[] ubuffer,int ulen,byte[] vbuffer,int vlen);
//結束
public native int close();
}
複製代碼
具體使用可查看Android音視頻(一) Camera2 API採集數據git
將ImageReader做爲預覽請求的Target之一,這樣咱們就能夠將預覽的數據拿到在onImageAvailable中進行處理推送。github
mImageReader = ImageReader.newInstance(640, 480,ImageFormat.YUV_420_888, 1);
mImageReader.setOnImageAvailableListener(mOnImageAvailableListener, mBackgroundHandler);
複製代碼
Surface imageSurface = mImageReader.getSurface();
mPreviewRequestBuilder = mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
mPreviewRequestBuilder.addTarget(surface);
mPreviewRequestBuilder.addTarget(imageSurface);
複製代碼
將獲取的Image數據解析爲YUV數據,Y、U、V數據分別存儲。具體請看YUV數據格式與YUV_420_888。數組
目前這塊暫時這樣寫着,網上的博客都比較舊了,有點不太合適,我想應該還會有更好的方法,後面再作優化。(或者這塊你有什麼好的處理方法,歡迎留言)。服務器
private final ImageReader.OnImageAvailableListener mOnImageAvailableListener
= new ImageReader.OnImageAvailableListener() {
@Override
public void onImageAvailable(ImageReader reader) {
Image image = reader.acquireLatestImage();
if (image == null) {
return;
}
final Image.Plane[] planes = image.getPlanes();
int width = image.getWidth();
int height = image.getHeight();
// Y、U、V數據
byte[] yBytes = new byte[width * height];
byte uBytes[] = new byte[width * height / 4];
byte vBytes[] = new byte[width * height / 4];
//目標數組的裝填到的位置
int dstIndex = 0;
int uIndex = 0;
int vIndex = 0;
int pixelsStride, rowStride;
for (int i = 0; i < planes.length; i++) {
pixelsStride = planes[i].getPixelStride();
rowStride = planes[i].getRowStride();
ByteBuffer buffer = planes[i].getBuffer();
//若是pixelsStride==2,通常的Y的buffer長度=640*480,UV的長度=640*480/2-1
//源數據的索引,y的數據是byte中連續的,u的數據是v向左移覺得生成的,二者都是偶數位爲有效數據
byte[] bytes = new byte[buffer.capacity()];
buffer.get(bytes);
int srcIndex = 0;
if (i == 0) {
//直接取出來全部Y的有效區域,也能夠存儲成一個臨時的bytes,到下一步再copy
for (int j = 0; j < height; j++) {
System.arraycopy(bytes, srcIndex, yBytes, dstIndex, width);
srcIndex += rowStride;
dstIndex += width;
}
} else if (i == 1) {
//根據pixelsStride取相應的數據
for (int j = 0; j < height / 2; j++) {
for (int k = 0; k < width / 2; k++) {
uBytes[uIndex++] = bytes[srcIndex];
srcIndex += pixelsStride;
}
if (pixelsStride == 2) {
srcIndex += rowStride - width;
} else if (pixelsStride == 1) {
srcIndex += rowStride - width / 2;
}
}
} else if (i == 2) {
//根據pixelsStride取相應的數據
for (int j = 0; j < height / 2; j++) {
for (int k = 0; k < width / 2; k++) {
vBytes[vIndex++] = bytes[srcIndex];
srcIndex += pixelsStride;
}
if (pixelsStride == 2) {
srcIndex += rowStride - width;
} else if (pixelsStride == 1) {
srcIndex += rowStride - width / 2;
}
}
}
}
// 將YUV數據交給C層去處理。
FFmpegHandler.getInstance().pushCameraData(yBytes, yBytes.length, uBytes, uBytes.length, vBytes, vBytes.length);
image.close();
}
};
複製代碼
直播推送的過程總體就是一個先將視頻數據編碼,再將編碼後的數據寫入數據流中推送給服務器的過程。ide
下面初始化的過程就是準備好數據編碼器和一條已經連上服務器的數據流函數
JNIEXPORT jint JNICALL Java_com_david_camerapush_ffmpeg_FFmpegHandler_init (JNIEnv *jniEnv, jobject instance, jstring url) {
const char *out_url = (*jniEnv)->GetStringUTFChars(jniEnv, url, 0);
//計算yuv數據的長度
yuv_width = width;
yuv_height = height;
y_length = width * height;
uv_length = width * height / 4;
//output initialize
int ret = avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_url);
if (ret < 0) {
LOGE("avformat_alloc_output_context2 error");
}
//初始化H264編碼器
pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pCodec) {
LOGE("Can not find encoder!\n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
//編碼器的ID號,這裏爲264編碼器
pCodecCtx->codec_id = pCodec->id;
//像素的格式,也就是說採用什麼樣的色彩空間來代表一個像素點,這裏使用YUV420P
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
//編碼器編碼的數據類型
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
//編碼目標的視頻幀大小,以像素爲單位
pCodecCtx->width = width;
pCodecCtx->height = height;
//幀頻
pCodecCtx->framerate = (AVRational) {15, 1};
//時間基
pCodecCtx->time_base = (AVRational) {1, 15};
//目標的碼率,即採樣的碼率;顯然,採樣碼率越大,視頻大小越大
pCodecCtx->bit_rate = 400000;
pCodecCtx->gop_size = 50;
/* Some formats want stream headers to be separate. */
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
pCodecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
//H264 codec param
pCodecCtx->qcompress = 0.6;
//最大和最小量化係數
pCodecCtx->qmin = 10;
pCodecCtx->qmax = 51;
//Optional Param
//兩個非B幀之間容許出現多少個B幀數
//設置0表示不使用B幀,b 幀越多,圖片越小
pCodecCtx->max_b_frames = 0;
AVDictionary *param = 0;
//H.264
if (pCodecCtx->codec_id == AV_CODEC_ID_H264) {
av_dict_set(¶m, "preset", "superfast", 0); //x264編碼速度的選項
av_dict_set(¶m, "tune", "zerolatency", 0);
}
// 打開編碼器
if (avcodec_open2(pCodecCtx, pCodec, ¶m) < 0) {
LOGE("Failed to open encoder!\n");
return -1;
}
// 新建傳輸流,即將要直播的視頻流
video_st = avformat_new_stream(ofmt_ctx, pCodec);
if (video_st == NULL) {
return -1;
}
video_st->time_base = (AVRational) {25, 1};
video_st->codecpar->codec_tag = 0;
avcodec_parameters_from_context(video_st->codecpar, pCodecCtx);
// 打開數據流,表示與rtmp服務器鏈接
int err = avio_open(&ofmt_ctx->pb, out_url, AVIO_FLAG_READ_WRITE);
if (err < 0) {
LOGE("Failed to open output:%s", av_err2str(err));
return -1;
}
//Write File Header
avformat_write_header(ofmt_ctx, NULL);
av_init_packet(&enc_pkt);
return 0;
}
複製代碼
對YUV數據編碼,並將編碼後數據寫入準備好的直播流中。優化
JNIEXPORT jint JNICALL Java_com_david_camerapush_ffmpeg_FFmpegHandler_pushCameraData (JNIEnv *jniEnv, jobject instance, jbyteArray yArray, jint yLen, jbyteArray uArray, jint uLen, jbyteArray vArray, jint vLen) {
jbyte *yin = (*jniEnv)->GetByteArrayElements(jniEnv, yArray, NULL);
jbyte *uin = (*jniEnv)->GetByteArrayElements(jniEnv, uArray, NULL);
jbyte *vin = (*jniEnv)->GetByteArrayElements(jniEnv, vArray, NULL);
int ret = 0;
// 初始化Frame
pFrameYUV = av_frame_alloc();
// 經過指定像素格式、圖像寬、圖像高來計算所需的內存大小
int picture_size = av_image_get_buffer_size(pCodecCtx->pix_fmt, pCodecCtx->width,pCodecCtx->height, 1);
//分配指定大小的內存空間
uint8_t *buffers = (uint8_t *) av_malloc(picture_size);
//此函數相似於格式化已經申請的內存,即經過av_malloc()函數申請的內存空間。
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, buffers, pCodecCtx->pix_fmt,pCodecCtx->width, pCodecCtx->height, 1);
// Frame中數據填充
memcpy(pFrameYUV->data[0], yin, (size_t) yLen); //Y
memcpy(pFrameYUV->data[1], uin, (size_t) uLen); //U
memcpy(pFrameYUV->data[2], vin, (size_t) vLen); //V
pFrameYUV->pts = count;
pFrameYUV->format = AV_PIX_FMT_YUV420P;
pFrameYUV->width = yuv_width;
pFrameYUV->height = yuv_height;
//初始化AVPacket
enc_pkt.data = NULL;
enc_pkt.size = 0;
//開始編碼YUV數據
ret = avcodec_send_frame(pCodecCtx, pFrameYUV);
if (ret != 0) {
LOGE("avcodec_send_frame error");
return -1;
}
//獲取編碼後的H264數據
ret = avcodec_receive_packet(pCodecCtx, &enc_pkt);
if (ret != 0 || enc_pkt.size <= 0) {
LOGE("avcodec_receive_packet error %s", av_err2str(ret));
return -2;
}
enc_pkt.stream_index = video_st->index;
enc_pkt.pts = count * (video_st->time_base.den) / ((video_st->time_base.num) * fps);
enc_pkt.dts = enc_pkt.pts;
enc_pkt.duration = (video_st->time_base.den) / ((video_st->time_base.num) * fps);
enc_pkt.pos = -1;
// 往直播流寫數據
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
if (ret != 0) {
LOGE("av_interleaved_write_frame failed");
}
count++;
//釋放內存,Java寫多了常常會忘記這塊**
av_packet_unref(&enc_pkt);
av_frame_free(&pFrameYUV);
av_free(buffers);
(*jniEnv)->ReleaseByteArrayElements(jniEnv, yArray, yin, 0);
(*jniEnv)->ReleaseByteArrayElements(jniEnv, uArray, uin, 0);
(*jniEnv)->ReleaseByteArrayElements(jniEnv, vArray, vin, 0);
return 0;
}
複製代碼
這是Demo運行後的結果,推送視頻OK,可是可能會有2到3秒的延遲(可能也跟網速有關)。目前就作到這種程度,後面會優化延遲、音頻直播、音視頻同步等都會慢慢加上去。ui
Tips:
圖片中使用的在Windows下的nginx-rtmp-win32,不須要編譯,點擊exe就能夠運行。