ios直播推流每秒能達到30幀,比安卓要強,視頻採用軟編碼的話手機會發燙,得采用碼編碼,播放視頻採用opengl渲染。ios
ffmpeg初始化代碼以下:緩存
1 int init_Code(int width, int height, const char *out_path) { 2 av_log_set_callback(custom_log); 3 //avcodec_register_all(); 4 av_register_all(); 5 avformat_network_init(); 6 avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path); 7 8 //Open output URL,set before avformat_write_header() for muxing 9 AVOutputFormat * ofmt = ofmt_ctx->oformat; 10 if (!(ofmt->flags & AVFMT_NOFILE)) { //Open output URL 11 if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0) { 12 return -1; 13 } 14 } 15 if (isAudio == 1) { 16 if (init_audio_Code() != 0)//初始化 音頻參數 17 return -1; 18 } 19 if (isVideo == 1) { 20 if (init_video_code(width, height) != 0)//初始化 視頻參數 21 return -1; 22 } 23 av_dump_format(ofmt_ctx, 0, out_path, 1); 24 if (avformat_write_header(ofmt_ctx, NULL) < 0) { //Write file header 25 //LOGE("Error occurred when opening output file\n"); 26 return -1; 27 } 28 29 start_thread_encode(); //開始編碼 30 return 0; 31 }
視頻硬編碼:session
1 /* 視頻 硬編碼**/ 2 int encodeVideo_h264(uint8_t* in, int64_t time, int size, int keyframe) { 3 int ret; 4 // 定義AVPacket對象後,請使用av_init_packet進行初始化 5 av_init_packet(&video_pkt); 6 //av_new_packet(&video_pkt,size); 7 video_pkt.stream_index = video_st->index; 8 video_pkt.data = in; 9 10 video_pkt.size = size; 11 video_pkt.pos = -1; 12 ptsPacket(video_st, &video_pkt, time); 13 if (video_pkt.buf != NULL) { 14 video_pkt.buf->data = in; 15 video_pkt.buf->size = size; 16 } 17 video_pkt.flags = keyframe; 18 if (keyframe == 1) { 19 //LOGE("硬編碼-關鍵幀: %lld", time); 20 } 21 22 ret = av_interleaved_write_frame(ofmt_ctx, &video_pkt); 23 if (ret != 0) { 24 printf("----encodeVideo--encodeVideo -ret: %d ", ret); 25 //LOGE("----encodeVideo--encodeVideo -ret: %d ", ret); 26 } 27 av_free_packet(&video_pkt); 28 29 return 0; 30 }
音頻硬編碼:ide
1 /* 音頻 硬編碼**/ 2 int encodeAudio_AAC(uint8_t* in, int64_t time, int size) { 3 if (isAudio == 0) 4 return 0; 5 av_init_packet(&audio_pkt); 6 int got_frame = 0; 7 audio_pkt.stream_index = audio_st->index; //標識該AVPacket所屬的視頻/音頻流。 8 audio_pkt.data = in; 9 audio_pkt.size = size; 10 audio_pkt.pts = time; 11 audio_pkt.dts = time; 12 //audio_pkt.pos = -1; 13 audio_pkt.flags = 1; 14 //audio_pkt.duration = 10; 15 int ret = av_interleaved_write_frame(ofmt_ctx, &audio_pkt); 16 if (ret != 0) { 17 //LOGE("----encodeAudio---ret: %d size:%d ,time:%lld ", 18 // ret, size, time); 19 } 20 return 0; 21 }
初始化相機:函數
1 - (void) initCamera:(BOOL)type 2 { 3 NSError *deviceError; 4 AVCaptureDeviceInput *inputCameraDevice; 5 if (type==false) 6 { 7 inputCameraDevice = [AVCaptureDeviceInput deviceInputWithDevice:cameraDeviceB error:&deviceError]; 8 } 9 else 10 { 11 inputCameraDevice = [AVCaptureDeviceInput deviceInputWithDevice:cameraDeviceF error:&deviceError]; 12 } 13 AVCaptureVideoDataOutput *outputVideoDevice = [[AVCaptureVideoDataOutput alloc] init]; 14 15 NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey; 16 NSNumber* val = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange]; 17 NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:val forKey:key]; 18 outputVideoDevice.videoSettings = videoSettings; 19 [outputVideoDevice setSampleBufferDelegate:self queue:dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0)]; 20 captureSession = [[AVCaptureSession alloc] init]; 21 [captureSession addInput:inputCameraDevice]; 22 [captureSession addOutput:outputVideoDevice]; 23 [captureSession beginConfiguration]; 24 25 [captureSession setSessionPreset:[NSString stringWithString:AVCaptureSessionPreset352x288]]; 26 connectionVideo = [outputVideoDevice connectionWithMediaType:AVMediaTypeVideo]; 27 #if TARGET_OS_IPHONE 28 [self setRelativeVideoOrientation]; 29 30 NSNotificationCenter* notify = [NSNotificationCenter defaultCenter]; 31 [notify addObserver:self 32 selector:@selector(statusBarOrientationDidChange:) 33 name:@"StatusBarOrientationDidChange" 34 object:nil]; 35 #endif 36 37 [captureSession commitConfiguration]; 38 recordLayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession]; 39 [recordLayer setVideoGravity:AVLayerVideoGravityResizeAspect]; 40 }
設置音頻參數oop
1 - (void)setupAudioFormat:(UInt32) inFormatID SampleRate:(int)sampeleRate 2 { 3 //重置下 4 memset(&_recordFormat, 0, sizeof(_recordFormat)); 5 //設置採樣率,這裏先獲取系統默認的測試下 //TODO: 6 //採樣率的意思是每秒須要採集的幀數 7 _recordFormat.mSampleRate = sampeleRate;//[[AVAudioSession sharedInstance] sampleRate]; 8 UInt32 size = sizeof(_recordFormat.mSampleRate); 9 //AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate, 10 // &size, 11 // &_recordFormat.mSampleRate); 12 size = sizeof(_recordFormat.mChannelsPerFrame); 13 // AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels, 14 // &size, 15 // &_recordFormat.mChannelsPerFrame); 16 17 _recordFormat.mFormatID = inFormatID; 18 if (inFormatID == kAudioFormatLinearPCM){ 19 //這個屌屬性不知道幹啥的。,//要看看是否是這裏屬性設置問題 20 //結果分析: 8bit爲1byte,即爲1個通道里1幀須要採集2byte數據,再*通道數,即爲全部通道採集的byte數目。 21 //因此這裏結果賦值給每幀須要採集的byte數目,而後這裏的packet也等於一幀的數據。 22 23 _recordFormat.mFramesPerPacket = 1; 24 _recordFormat.mSampleRate =sampeleRate;// 16000.0; 25 //每一個通道里,一幀採集的bit數目 語音每採樣點佔用位數 26 _recordFormat.mBitsPerChannel = 16; 27 _recordFormat.mChannelsPerFrame = 2;// 1:單聲道;2:立體聲 28 _recordFormat.mFramesPerPacket = 1; 29 _recordFormat.mBytesPerFrame = (_recordFormat.mBitsPerChannel / 8) * _recordFormat.mChannelsPerFrame; 30 _recordFormat.mBytesPerPacket = _recordFormat.mBytesPerFrame * _recordFormat.mFramesPerPacket; 31 //_recordFormat.mBytesPerPacket = _recordFormat.mBytesPerFrame = (_recordFormat.mBitsPerChannel / 8) * _recordFormat.mChannelsPerFrame; 32 _recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 33 //_recordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 34 } 35 }
開始錄音:測試
1 -(void)startRecording 2 { 3 UInt32 size; 4 NSError *error = nil; 5 //設置audio session的category 6 BOOL ret = [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryRecord error:&error];//注意,這裏選的是AVAudioSessionCategoryPlayAndRecord參數,若是隻須要錄音,就選擇Record就能夠了,若是須要錄音和播放,則選擇PlayAndRecord,這個很重要 7 if (!ret) { 8 NSLog(@"設置聲音環境失敗"); 9 return; 10 } 11 //啓用audio session 12 ret = [[AVAudioSession sharedInstance] setActive:YES error:&error]; 13 if (!ret) 14 { 15 NSLog(@"啓動失敗"); 16 return; 17 } 18 //初始化音頻輸入隊列 19 AudioQueueNewInput(&_recordFormat, inputBufferHandler, (__bridge void *)(self), NULL, kCFRunLoopCommonModes, 0, &_audioQueue);//inputBufferHandler這個是回調函數名 20 size = sizeof(_recordFormat); 21 //AudioQueueGetProperty(_audioQueue, kAudioQueueProperty_StreamDescription, 22 // &_recordFormat, &size); 23 //計算估算的緩存區大小 24 //int frames = (int)ceil(kDefaultBufferDurationSeconds * _recordFormat.mSampleRate);//返回大於或者等於指定表達式的最小整數 25 int bufferByteSize =4096;// frames * _recordFormat.mBytesPerFrame;//緩衝區大小在這裏設置,這個很重要,在這裏設置的緩衝區有多大,那麼在回調函數的時候獲得的inbuffer的大小就是多大。 26 //bufferByteSize=[self ComputeRecordBufferSize:&_recordFormat sss:kDefaultBufferDurationSeconds]; 27 NSLog(@"緩衝區大小:%d",bufferByteSize); 28 AudioQueueBufferRef _audioBuffers[3]; 29 //建立緩衝器 30 for (int i = 0; i < kNumberAudioQueueBuffers; i++){ 31 AudioQueueAllocateBuffer(_audioQueue, bufferByteSize, &_audioBuffers[i]); 32 AudioQueueEnqueueBuffer(_audioQueue, _audioBuffers[i], 0, NULL);//將 _audioBuffers[i]添加到隊列中 33 } 34 // 開始錄音 35 AudioQueueStart(_audioQueue, NULL); 36 37 }