met视频初步
目前仅有点花,但是image上也是估计是avframe转piv的时候的问题
看来多喝咖啡--tothi--,先软解音视频ok--,then rtp 直播对讲? 那个视频的还要通过asio优化--,明天做成可在公司优化的--
音频用 Audio Unit播放PCM数据,传pcm数据
ffmpeg -i test.aac -f s16le test.pcm
decode.h
#ifndef decode_h #define decode_h #include#include #include <string.h> #include // #import "LYShaderTypes.h" @import MetalKit; @import GLKit; #define INBUF_SIZE 4096 int playFile(char* path); UIImage* uiImageFromPixelBuffer(CVPixelBufferRef p); CVPixelBufferRef createCVPixelBufferFromAVFrame(AVFrame *frame ); #endif /* decode_h */
decode.cpp
// // decode.m // IosFmg // // Created by wangt on 2022/3/2. // #import#import "decode.h" static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, char *filename) { FILE *f; int i; f = fopen(filename,"w"); fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255); for (i = 0; i < ysize; i++) fwrite(buf + i * wrap, 1, xsize, f); fclose(f); } CVPixelBufferPoolRef pixelBufferPoolRef; #define _CFToString(obj) ((__bridge NSString *)obj) BOOL setupCVPixelBufferIfNeed(AVFrame *frame ){ if(!pixelBufferPoolRef) { NSMutableDictionary *pixelBufferAttributes = [[NSMutableDictionary alloc] init]; if(frame->color_range == AVCOL_RANGE_MPEG) { pixelBufferAttributes[_CFToString(kCVPixelBufferPixelFormatTypeKey)] = @(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange); } else { pixelBufferAttributes[_CFToString(kCVPixelBufferPixelFormatTypeKey)] = @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange); } pixelBufferAttributes[_CFToString(kCVPixelBufferMetalCompatibilityKey)] = @(TRUE); pixelBufferAttributes[_CFToString(kCVPixelBufferWidthKey)] = @(frame->width); pixelBufferAttributes[_CFToString(kCVPixelBufferHeightKey)] = @(frame->height); /// bytes per row(alignment) pixelBufferAttributes[_CFToString(kCVPixelBufferBytesPerRowAlignmentKey)] = @(frame->linesize[0]); // pixelBufferAttributes[_CFToString(kCVPixelBufferIOSurfacePropertiesKey)] = @{}; CVReturn cvRet = CVPixelBufferPoolCreate(kCFAllocatorDefault, NULL, (__bridge CFDictionaryRef)pixelBufferAttributes, &(pixelBufferPoolRef)); if(cvRet != kCVReturnSuccess) { NSLog(@"create cv buffer pool failed: %d", cvRet); return NO; } } return YES; } CVPixelBufferRef createCVPixelBufferFromAVFrame(AVFrame *frame ){ if(!setupCVPixelBufferIfNeed(frame)) return NULL; CVPixelBufferRef _pixelBufferRef; CVReturn cvRet = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPoolRef, &_pixelBufferRef); if(cvRet != kCVReturnSuccess) { NSLog(@"create cv buffer failed: %d", cvRet); return NULL; } CVPixelBufferLockBaseAddress(_pixelBufferRef, 0); /// copy y size_t yBytesPerRowSize = CVPixelBufferGetBytesPerRowOfPlane(_pixelBufferRef, 0); void *yBase = CVPixelBufferGetBaseAddressOfPlane(_pixelBufferRef, 0); memcpy(yBase, frame->data[0], yBytesPerRowSize * frame->height); /// copy uv void *uvBase = CVPixelBufferGetBaseAddressOfPlane(_pixelBufferRef, 1); size_t uvBytesPerRowSize = CVPixelBufferGetBytesPerRowOfPlane(_pixelBufferRef, 1); memcpy(uvBase, frame->data[1], uvBytesPerRowSize * frame->height / 2); CVPixelBufferUnlockBaseAddress(_pixelBufferRef, 0); return _pixelBufferRef; } UIImage* uiImageFromPixelBuffer(CVPixelBufferRef p) { CIImage* ciImage = [CIImage imageWithCVPixelBuffer:p]; CIContext* context = [CIContext contextWithOptions:@{kCIContextUseSoftwareRenderer : @(YES)}]; CGRect rect = CGRectMake(0, 0, CVPixelBufferGetWidth(p), CVPixelBufferGetHeight(p)); CGImageRef videoImage = [context createCGImage:ciImage fromRect:rect]; UIImage* image = [UIImage imageWithCGImage:videoImage]; CGImageRelease(videoImage); return image; } CVPixelBufferRef pixelBuffer1 ; int gBreak = 0; static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt, const char *filename) { char buf[1024]; int ret; ret = avcodec_send_packet(dec_ctx, pkt); if (ret < 0) { fprintf(stderr, "Error sending a packet for decoding\n"); exit(1); } while (ret >= 0) { ret = avcodec_receive_frame(dec_ctx, frame); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) return; else if (ret < 0) { fprintf(stderr, "Error during decoding\n"); exit(1); } printf("saving frame %3d\n", dec_ctx->frame_number); fflush(stdout); /* the picture is allocated by the decoder. no need to free it */ //CVPixelBufferRef pixelBuffer1 = createCVPixelBufferFromAVFrame(frame); if(pixelBuffer1!=NULL){ //gBreak = 1; //break; printf("pixelBuffer1 not null \n"); } snprintf(buf, sizeof(buf), "%s-%d", filename, dec_ctx->frame_number); //pgm_save(frame->data[0], frame->linesize[0], // frame->width, frame->height, buf); } } /* //myUiv.image = uiImageFromPixelBuffer(pixelBuffer1); dispatch_async(dispatch_get_main_queue(), ^{ myUiv.image = uiImageFromPixelBuffer(pixelBuffer1); }); //[NSThread sleepForTimeInterval:0.03] ; */ //int main1(int argc, char **argv) int playFile(char* path) { const char *filename, *outfilename; const AVCodec *codec; AVCodecParserContext *parser; AVCodecContext *c= NULL; FILE *f; AVFrame *frame; uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE]; uint8_t *data; size_t data_size; int ret; AVPacket *pkt; /* if (argc <= 2) { fprintf(stderr, "Usage: %s */ filename = path; outfilename = "out"; pkt = av_packet_alloc(); if (!pkt) exit(1); /* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */ memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE); /* find the MPEG-1 video decoder */ //codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO); codec = avcodec_find_decoder(AV_CODEC_ID_H264); if (!codec) { fprintf(stderr, "Codec not found\n"); exit(1); } parser = av_parser_init(codec->id); if (!parser) { fprintf(stderr, "parser not found\n"); exit(1); } c = avcodec_alloc_context3(codec); if (!c) { fprintf(stderr, "Could not allocate video codec context\n"); exit(1); } /* For some codecs, such as msmpeg4 and mpeg4, width and height MUST be initialized there because this information is not available in the bitstream. */ /* open it */ if (avcodec_open2(c, codec, NULL) < 0) { fprintf(stderr, "Could not open codec\n"); exit(1); } f = fopen(filename, "rb"); if (!f) { fprintf(stderr, "Could not open %s\n", filename); exit(1); } frame = av_frame_alloc(); if (!frame) { fprintf(stderr, "Could not allocate video frame\n"); exit(1); } while (!feof(f)) { /* read raw data from the input file */ data_size = fread(inbuf, 1, INBUF_SIZE, f); if (!data_size) break; /* use the parser to split the data into frames */ data = inbuf; while (data_size > 0) { ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size, data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0); if (ret < 0) { fprintf(stderr, "Error while parsing\n"); exit(1); } data += ret; data_size -= ret; if (pkt->size) decode(c, frame, pkt, outfilename); usleep(30*1000); //if(gBreak) break; } //if(gBreak) break; } /* flush the decoder */ decode(c, frame, NULL, outfilename); fclose(f); av_parser_close(parser); avcodec_free_context(&c); av_frame_free(&frame); av_packet_free(&pkt); return 0; }
view
extern CVPixelBufferRef pixelBuffer1 ; char gPath[512]; void *start(void *data) { NSLog(@"mythread--> %@", [NSThread currentThread]); playFile(gPath); return NULL; } @implementation ViewController - (void)viewDidLoad { [super viewDidLoad]; // Do any additional setup after loading the view, typically from a nib. //-- NSString *videoUrl = [[NSBundle mainBundle] pathForResource:@"sintel" ofType:@"h264"]; const char* path = [videoUrl UTF8String]; strcpy(gPath,path); //playFile(path); //-- pthread_t thread; //创建一个线程并自动执行 pthread_create(&thread, NULL, start, NULL); .... ... [self setupTextureWithEncoder:renderEncoder buffer:pixelBuffer1];