“我报名参与金石方案1期挑战——瓜分10万奖池,这是我的第1篇文章,点击检查活动详情”

简易实现1对1画面实时传输到网页显现

效果图

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

一、翻开/预览手机摄像头

使用Jetpack包下的CameraX库快速预览
CameraX 是 Jetpack 的新增库。利用该库,能够更轻松地向应用添加相机功能。该库供给了很多兼容性修正程序和解决办法,有助于在众多设备上打造共同的开发者体会。

1.1 引进库

def camerax_version = "1.1.0"
implementation "androidx.camera:camera-core:${camerax_version}"
implementation "androidx.camera:camera-camera2:${camerax_version}"
implementation "androidx.camera:camera-lifecycle:${camerax_version}"
implementation "androidx.camera:camera-video:${camerax_version}"
implementation "androidx.camera:camera-view:${camerax_version}"
implementation "androidx.camera:camera-extensions:${camerax_version}"

1.2 动态请求摄像头权限

xml权限清单

<uses-permission android:name="android.permission.CAMERA"/>

请求权限代码

ActivityCompat.requestPermissions(this, new String[]{
    Manifest.permission.CAMERA
}, 0);

1.3 预览摄像头

xml写入预览View

<androidx.camera.view.PreviewView
android:id="@+id/view_finder"
android:layout_width="match_parent"
android:layout_height="match_parent"/>

java预览固定代码,camerax库简化了camera2代码,曾经的camera2代码比较繁琐。

camera2预览代码能够跳转 Camera2录制视频(音视频组成)及其YUV数据提取(一)- 相机预览

private PreviewView viewFinder;
private ListenableFuture<ProcessCameraProvider> cameraProviderFuture;
private Executor executor = Executors.newSingleThreadExecutor();
public static final int w = 640;
public static final int h = 640;
@Override
protected void onCreate(Bundle savedInstanceState) {
    //...
    viewFinder = findViewById(R.id.viewFinder);
    viewFinder.setImplementationMode(PreviewView.ImplementationMode.COMPATIBLE);
    cameraProviderFuture = ProcessCameraProvider.getInstance(this);
    //当cameraProviderFuture初始化完成后进行绑定预览view
    cameraProviderFuture.addListener(() -> {
    try {
        ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
        bindPreview(cameraProvider);
    } catch (ExecutionException | InterruptedException e) {
    }
}, ContextCompat.getMainExecutor(this));
}
void bindPreview(ProcessCameraProvider cameraProvider) {
    Preview preview = new Preview.Builder()
            .build();
    CameraSelector cameraSelector = new CameraSelector.Builder()
            .requireLensFacing(CameraSelector.LENS_FACING_BACK)
            .build();
    preview.setSurfaceProvider(viewFinder.getSurfaceProvider());
    ImageAnalysis imageAnalysis =
            new ImageAnalysis.Builder()
                    .setTargetResolution(new Size(w, h))
                    .setOutputImageFormat(ImageAnalysis.OUTPUT_IMAGE_FORMAT_YUV_420_888)
                    .setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
                    .build();
    imageAnalysis.setAnalyzer(executor, new ImageAnalysis.Analyzer() {
        @Override
        public void analyze(@NonNull ImageProxy imageProxy) {
            int rotationDegrees = imageProxy.getImageInfo().getRotationDegrees();
            Image image = imageProxy.getImage();
            // insert your code here.
            // after done, release the ImageProxy object
            imageProxy.close();
        }
    });
    Camera camera = cameraProvider.bindToLifecycle((LifecycleOwner) this, cameraSelector, preview, imageAnalysis);
}

ImageAnalysis图画剖析类 用于设置相机输出的图画特点设置

  • setTargetResolution 设置方针图画巨细(留意这个设置当无合适的尺度,会输出最近的宽高图画)
  • setOutputImageFormat 设置输出图画数据类型,YUV420格局和RGB格局
  • setBackpressureStrategy 图画生成战略 STRATEGY_KEEP_ONLY_LATEST默认战略即可

上面代码参照官方文档的用户示例所给出的 -> CameraX预览示例

二、编译FFmpeg so库到Android项目中集成

需求先下载NDK 本环境 mac ndk版别 r20b

2.1 下载libx264库源码 (用于视频流编码H.264裸流)

2.1.1 下载

git clone https://code.videolan.org/videolan/x264.git

源码下载链接 -> libX264官方源码

2.1.2 编译so库

  1. 进入源码文件目录,创立可履行shell脚本文件(*.sh)
  2. 编写脚本 文章链接 -> 音视频(2) – 编译libx264库
  3. 履行.sh 等待生成

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

2.2 下载fdk_aac库 (用于音频流编码AAC)

2.2.1 下载

源码下载链接 -> fdk_aac库源码下载地址

2.2.2 编译so库

  1. 进入源码文件目录,创立可履行shell脚本文件(*.sh)
  2. 编写脚本 文章链接 -> 音视频(3) – 编译fdk_aac库
  3. 履行.sh 等待生成

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

2.3 下载libyuv库源码 (用于yuv数据旋转缩放转化等操作)

独自引进 不作为FFmpeg集成

2.3.1 下载

git clone https://github.com/lemenkov/libyuv.git

2.3.2 编译

编译可参阅文章 -> libyuv 库编译

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

2.4 下载FFmpeg源码

2.4.1 下载

git clone https://git.ffmpeg.org/ffmpeg.git ffmpeg

2.4.2 编译FFmpeg集成fdk-aac和x264编码库

可跳转检查文章 -> 音视频(4) – FFmpeg编译集成libx264+fdk_aac库

2.5 集成到项目

  • app/目录下创立CMakeLists.txt用于链接c/c++源码和so文件
  • app/目录下创立libs

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

  • app/src/main创立cpp目录用于引进编译后的生成的include文件中的c/c++源码
  • app/src/main创立jniLibs目录用于引进ffmpeg/libx264/libyuv等编译生成的so文件

音视频(5)客户端与网页进行画面实时传输实现简易1对1视频-客户端开发

2.6 CMakeLists.txt链接文件代码

cmake_minimum_required(VERSION 3.10.2)
include_directories(src/main/cpp/include) # 引进ffmpeg生成的头文件
include_directories(src/main/cpp/libyuv) # 引进libyuv生成的头文件
add_library(
        # 指定库称号
        yuv 
        # 导入库类型 .a文件则是静态STATIC .so文件则是动态SHARED
        SHARED
        # 奉告 CMake yuv 是导入的库
        IMPORTED)
set_target_properties(
        # add新增的库称号要对应
        yuv
        # 指定特点(本地导入的已有库)
        PROPERTIES
        # 指定特点(本地导入的已有库)
        IMPORTED_LOCATION
        # 动态库对应的so文件途径 CMAKE_SOURCE_DIR为当时文件途径开始
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libyuv.so)
add_library(x264
        SHARED
        IMPORTED)
set_target_properties(x264
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libx264.so
        )
add_library(avcodec
        SHARED
        IMPORTED)
set_target_properties(avcodec
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libavcodec.so
        )
add_library(avfilter
        SHARED
        IMPORTED)
set_target_properties(avfilter
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libavfilter.so
        )
add_library(avformat
        SHARED
        IMPORTED)
set_target_properties(avformat
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libavformat.so
        )
add_library(avutil
        SHARED
        IMPORTED)
set_target_properties(avutil
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libavutil.so
        )
add_library(swresample
        SHARED
        IMPORTED)
set_target_properties(swresample
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libswresample.so
        )
add_library(swscale
        SHARED
        IMPORTED)
set_target_properties(swscale
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libswscale.so
        )
add_library(postproc
        SHARED
        IMPORTED)
set_target_properties(postproc
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libpostproc.so
        )
add_library(avdevice
        SHARED
        IMPORTED)
set_target_properties(avdevice
        PROPERTIES
        IMPORTED_LOCATION
        ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${CMAKE_ANDROID_ARCH_ABI}/libavdevice.so
        )
# Declares and names the project.
project("cameraxtest")
add_library( # Sets the name of the library.
             native-lib
             # Sets the library as a shared library.
             SHARED
             # Provides a relative path to your source file(s).
        src/main/cpp/native-lib.cpp)
find_library( # Sets the name of the path variable.
              log-lib
              # Specifies the name of the NDK library that
              # you want CMake to locate.
              log )
target_link_libraries( # Specifies the target library.
                       native-lib
        avcodec
        avfilter
        avformat
        avutil
        swresample
        swscale
        postproc
        avdevice
        x264
        yuv
                       # Links the target library to the log library
                       # included in the NDK.
                       ${log-lib} )

2.7 build.gradle设置

android {
  // ...
   defaultConfig {
      // ...
      //设置cmake编译
       externalNativeBuild {
           cmake {
               cppFlags ''
               abiFilters "armeabi-v7a"
           }
       }
       //ndk编译架构版别
       ndk {
           abiFilters "armeabi-v7a"
       }
   }
 //...
 //设置cmake途径 以及版别
   externalNativeBuild {
       cmake {
           path file('CMakeLists.txt')
           version '3.10.2'
       }
   }
   sourceSets {
       main {
           jniLibs.srcDirs = ['libs']
       }
   }
}

2.7 加载动态库

MainActivity中加载

// Used to load the 'native-lib' library on application startup.
static {
    System.loadLibrary("yuv");
    System.loadLibrary("x264");
    System.loadLibrary("native-lib");
    System.loadLibrary("avcodec");
    System.loadLibrary("avfilter");
    System.loadLibrary("avformat");
    System.loadLibrary("avutil");
    System.loadLibrary("swresample");
    System.loadLibrary("swscale");
    System.loadLibrary("postproc");
    System.loadLibrary("avdevice");
}

留意加载顺序,x264要加载在ffmpeg库前面由于ffmpeg中H264编码器依赖于x264 同理fdk_aac也是

三、Java层YUV数据解析

需求了解YUV各种数据格局 参阅链接 ->

  • Camera2录制视频(音视频组成)及其YUV数据提取(二)- YUV提取及图画转化
  • YUV 简介及使用

本次仅提取YUV420P中的一种I420格局
数据流格局:YYYYYY UUUU VVVV 依次寄存Y量寄存width * height个 u重量寄存width * height*0.25个 v重量同u重量相同 总length一共是w * h * 1.5;

//相机yuv转化I420格局
private byte[] convertYuvBuffer(Image image) {
    int w = image.getWidth();
    int h = image.getHeight();
    int len = w * h * 3 / 2;
    byte[] outBuffer = new byte[len];
    Image.Plane yPlane = image.getPlanes()[0]; //y
    ByteBuffer yBuffer = yPlane.getBuffer();//y
    Image.Plane uPlane = image.getPlanes()[1];//u
    ByteBuffer uBuffer = uPlane.getBuffer();//u
    Image.Plane vPlane = image.getPlanes()[2];//v
    ByteBuffer vBuffer = vPlane.getBuffer();//v
    int yPix = 0;
    int uPix = 0;
    int vPix = 0;
    //排列组合yuv
    for (int i = 0; i < len; i++) {
        if (i < w * h) {
            outBuffer[i] = yBuffer.get(yPix);
            yPix += yPlane.getPixelStride();
        } else if ((i >= w * h) && (i < w * h * 1.25)) {
            outBuffer[i] = uBuffer.get(uPix);
            uPix += uPlane.getPixelStride();
        } else if ((i >= w * h * 1.25) && (i < w * h * 1.5)) {
            outBuffer[i] = vBuffer.get(vPix);
            vPix += vPlane.getPixelStride();
        }
    }
    return outBuffer;
}
void bindPreview(ProcessCameraProvider cameraProvider) {
    //...省掉代码
    //监听摄像头回来的每帧数据
     imageAnalysis.setAnalyzer(executor, new ImageAnalysis.Analyzer() {
        @Override
        public void analyze(@NonNull ImageProxy imageProxy) {
            int rotationDegrees = imageProxy.getImageInfo().getRotationDegrees();
            Image image = imageProxy.getImage();
            //转化I420格局
            byte[] i420 = convertYuvBuffer(image);
            imageProxy.close();
        }
    });
    //...省掉代码
}

ImagegetPlanes回来一个数组该数组[0] [1] [2]在输出格局为YUV420_888时分别对应的Y重量数据U重量数据V重量数据

四、YUV编码H.264

需求c/c++基础,了解JNI

Java层代码

//初始化
public native void init();
//I420YUV旋转270度
public native byte[] libyuvI420Roate90(byte[] i420, int w, int h);
//此办法用于c调用java办法后拿到编码后的数据
public void getData(byte[] data,int w,int h) {
    //data为H.264数据
}
 imageAnalysis.setAnalyzer(executor, new ImageAnalysis.Analyzer() {
        @Override
        public void analyze(@NonNull ImageProxy imageProxy) {
            int rotationDegrees = imageProxy.getImageInfo().getRotationDegrees();
            Image image = imageProxy.getImage();
            int width = image.getWidth();
            int height = image.getHeight();
            //转化I420格局
            byte[] i420 = convertYuvBuffer(image);
            //yuv旋转270 宽高调整
            byte[] i420_Roate270 = libyuvI420Roate90(i420, width, height);
            //榜首帧则初始化编码器
            if (isFirst) {
                init();
                isFirst = false;
            }
            //编码
            yuvToH264(i420_Roate270);
            imageProxy.close();
        }
    });

留意这儿需求用libyuv库对yuv进行旋转,由于android摄像头输出的图画是倒过来的,此时的width = height,height = width,所以需求进行90度的旋转之后才是正确的图画,width 和 height也是正确的

C/C++层代码 ./native-lib.cpp

#include <jni.h>
#include <string>
#include "android/log.h"
#include <unistd.h>
#include <libyuv/convert_argb.h>
//ffmpeg是c代码,需求cpp文件需求兼容c代码
extern "C" {
    #include "libavcodec/avcodec.h"
    #include "libavfilter/avfilter.h"
    #include "libavformat/avformat.h"
    #include "libavutil/avutil.h"
    #include "libavutil/ffversion.h"
    #include "libswresample/swresample.h"
    #include "libswscale/swscale.h"
    #include "libpostproc/postprocess.h"
    #include "libavutil/imgutils.h"
    #include "libyuv.h"
    #include <libavutil/time.h>
    #include <libavutil/opt.h>
}
void rotateI420(jbyte *src_i420_data, jint width, jint height, jbyte *dst_i420_data, jint degree) {
    jint src_i420_y_size = width * height;
    jint src_i420_u_size = (width >> 1) * (height >> 1);
    jbyte *src_i420_y_data = src_i420_data;
    jbyte *src_i420_u_data = src_i420_data + src_i420_y_size;
    jbyte *src_i420_v_data = src_i420_data + src_i420_y_size + src_i420_u_size;
    jbyte *dst_i420_y_data = dst_i420_data;
    jbyte *dst_i420_u_data = dst_i420_data + src_i420_y_size;
    jbyte *dst_i420_v_data = dst_i420_data + src_i420_y_size + src_i420_u_size;
    if (degree == libyuv::kRotate90 || degree == libyuv::kRotate270) {
        libyuv::I420Rotate((const uint8_t *) src_i420_y_data, width,
                           (const uint8_t *) src_i420_u_data, width >> 1,
                           (const uint8_t *) src_i420_v_data, width >> 1,
                           (uint8_t *) dst_i420_y_data, height,
                           (uint8_t *) dst_i420_u_data, height >> 1,
                           (uint8_t *) dst_i420_v_data, height >> 1,
                           width, height,
                           (libyuv::RotationMode) degree);
    }
}
//编码器
AVCodec *out_h264_codec = nullptr;
//编码器参数表
AVCodecParameters *out_h264_codec_params = nullptr;
//编码器上下文
AVCodecContext *out_h264_codec_ctx = nullptr;
//用于接纳yuv原始数据
AVFrame *out_h264_frame = nullptr;
//请求包内存 后续填入编码后的数据
AVPacket *out_h264_packet = av_packet_alloc();
//图画巨细
size_t img_size;
//输出的图画数据
uint8_t *out_picture_buf = nullptr;
//图画实在的宽和高(旋转后的)
int h, w;
uint64_t q = 0;
uint64_t y_size = 0;
extern "C"
JNIEXPORT void JNICALL
Java_com_exp_cameraxtest_MainActivity_init(JNIEnv *env, jobject thiz) {
    //初始化并设置编码器表参数
    out_h264_codec_params = avcodec_parameters_alloc();
    out_h264_codec_params->codec_type = AVMEDIA_TYPE_VIDEO;
    out_h264_codec_params->width = w;
    out_h264_codec_params->height = h;
    //设置比特率 越高越清晰
    out_h264_codec_params->bit_rate = 4000000;
    //寻觅H.264编码器 libx264库带着的编码器
    out_h264_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if(out_h264_codec == NULL) {
        return;
    }
    //初始化编码器上下文
    out_h264_codec_ctx = avcodec_alloc_context3(out_h264_codec);
    //填充编码器上下文内容
    avcodec_parameters_to_context(out_h264_codec_ctx,out_h264_codec_params);
    if(out_h264_codec_ctx == NULL) {
        return;
    }
    //设置编码器上下文参数
    out_h264_codec_ctx->codec_id = AV_CODEC_ID_H264;
    //类型为视频
    out_h264_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
    //yuv格局
    out_h264_codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
    out_h264_codec_ctx->width = w;
    out_h264_codec_ctx->height = h;
    //设置fps
    out_h264_codec_ctx->time_base.num = 1;
    out_h264_codec_ctx->time_base.den = 60;
    //设置比特率
    out_h264_codec_ctx->bit_rate = 4000000;
    //设置接连的画面组巨细15 适中即可 不然快速移动画面回有马赛克情况
    out_h264_codec_ctx->gop_size = 15;
    //不设置b帧 关键帧
    out_h264_codec_ctx->max_b_frames = 0;
    if(out_h264_codec_ctx->codec_id == AV_CODEC_ID_H264) {
        //固定设置
        out_h264_codec_ctx->qmin = 10;
        out_h264_codec_ctx->qmax = 51;
        out_h264_codec_ctx->qcompress = (float) 0.6;
        //设置编码速度为慢,越慢质量越好 适中即可
        av_opt_set(out_h264_codec_ctx->priv_data, "preset", "slow", 0);
        //设置0延迟编码
        av_opt_set(out_h264_codec_ctx->priv_data, "tune", "zerolatency", 0);
    }
    //翻开编码器
    if(avcodec_open2(out_h264_codec_ctx,out_h264_codec,NULL) < 0 ) {
        return;
    }
    //初始化源数据
    out_h264_frame = av_frame_alloc();
    out_h264_frame->width = out_h264_codec_ctx->width;
    out_h264_frame->height = out_h264_codec_ctx->height;
    out_h264_frame->format = out_h264_codec_ctx->pix_fmt;
    //核算yuv420P格局的yuv在宽和高多少的情况下一张图画的巨细
    img_size = (size_t) av_image_get_buffer_size(AV_PIX_FMT_YUV420P,out_h264_codec_ctx->width,out_h264_codec_ctx->height,1);
    //请求一张图画巨细的内存
    out_picture_buf = (uint8_t*)av_malloc(img_size);
    //将frame的数据与out_pic内存对其 把src的实在的内存空间分配给data,也就是说让data的指针指向 src
    av_image_fill_arrays(
            out_h264_frame->data,out_h264_frame->linesize,
            out_picture_buf,out_h264_codec_ctx->pix_fmt,out_h264_codec_ctx->width,
            out_h264_codec_ctx->height,1
            );
    y_size = out_h264_codec_ctx->width * out_h264_codec_ctx->height;
    //初始化包巨细为足够大
    av_new_packet(out_h264_packet,(int)(img_size * 3));
}
extern "C"
JNIEXPORT void JNICALL
Java_com_exp_cameraxtest_MainActivity_libyuvI420Roate90(JNIEnv *env, jobject thiz, jbyteArray i420,jint wq, jint hq) {
    //请求方针与src数据巨细的内存
    jbyte *bytess = env->GetByteArrayElements(i420, 0);
    jbyteArray dst_i420_a = env->NewByteArray(wq * hq * 3 / 2);
    jbyte *dst_i420 = env->GetByteArrayElements(dst_i420_a, 0);
    //旋转90度
    rotateI420(bytess,wq,hq,dst_i420,90);
    //请求与复制方针数据 转化到java中
    jbyteArray array = env->NewByteArray(wq * hq * 3 / 2);
    env->SetByteArrayRegion(array, 0, wq * hq * 3 / 2, (jbyte *) dst_i420);
    //旋转后宽高应该置换
    w = hq;
    h = wq;
    LOGD(TAG,"%d x %d",w,h);
    return array;
}
extern "C"
JNIEXPORT void JNICALL
Java_com_exp_cameraxtest_MainActivity_yuvToH264(JNIEnv *env, jobject thiz,jbyteArray yuv420) {
    jbyte *bytess = env->GetByteArrayElements(yuv420, 0);
    //java byte转化c的uint8_t*
    uint8_t *s = (uint8_t *) bytess;
    //复制yuv到out_picture_buf
    memcpy(out_picture_buf, s, img_size);
    ///填充frame的yuv数据 0 y | 1 u | 2 v
    out_h264_frame->data[0] = out_picture_buf; //y
    out_h264_frame->data[1] = out_picture_buf + y_size; //u
    out_h264_frame->data[2] = out_picture_buf + y_size * 5 / 4; //v
    out_h264_frame->pts = q;
    q++;
    //yuv数据发送到编码器
    if (avcodec_send_frame(out_h264_codec_ctx, out_h264_frame) >= 0) {
        //从编码器中循环取出编码后的数据
        while (avcodec_receive_packet(out_h264_codec_ctx, out_h264_packet) >= 0) {
            out_h264_packet->pos = -1;
            //调用java办法
            //请求编码后巨细的内存
            jbyteArray array = env->NewByteArray(out_h264_packet->size);
            //复制数据到jbytearray指针
            env->SetByteArrayRegion(array, 0, out_h264_packet->size, (jbyte *)out_h264_packet->data);
            //获取当时调用类
            jclass jazz = env->GetObjectClass(thiz);
            //获取java办法签名id
            jmethodID jid = env->GetMethodID(jazz,"getData","([BII)V");
            //调用java办法,传入编码后的数据以及宽高
            env->CallVoidMethod(thiz,jid,array,w,h);
            //改写缓存
            av_packet_unref(out_h264_packet);
        }
    }
}

这儿用到了c调用java办法的过程,其间GetMethodID的签名需求用jdk指令去获取,或者能够百度一下java办法签名规矩能够自行拼接

流程

  1. 初始化/设置编码器参数,初始化frame 初始化packet
  2. 设置图片内存巨细与frame->data内存对其av_image_get_buffer_sizeav_image_fill_arrays
  3. 先将yuv旋转90度 利用livyuv库libyuv::I420Rotate
  4. 扔进编码器avcodec_send_frame编码后通过avcodec_receive_packet取出packet
  5. 拿到当时classGetObjectClass,获取java办法签名GetMethodID,call javaCallVoidMethod