通过CameraX
的Analyzer
方法得到的图片对象是ImageProxy
对象,而OpenCV
的处理对象必须是Mat
。
在处理这几个转换过程中碰见过的问题有:
1. ImageProxy 如何转Mat?
1. 部分手机摄像头数据需要进行90°到270°的纠正,该如何处理?
1. Mat如何转Bitmap 给到View进行显示?
如果你也有以上的疑问,那么本篇内容应该会给你一点参考价值。
前期如何初始化CameraX
并获取ImageProxy
就不说了。我们从得到ImageProxy
对象开始处理转换吧。
前期由于项目需求,处理过ImageProxy
转Bitmap
。而OpenCV SDK
本身就提供了Bitmap
转Mat
的函数:Utils.bitmapToMat(Bitmap bmp, Mat mat)
。
所以开始的时候,没有自己造轮子,直接使用了现有的转换方法。下面贴一下相关代码:
/**
* 用于转换Bitmap 的工具类
*/
public class BitmapUtils {
/**
* Converts a YUV_420_888 image from CameraX API to a bitmap.
*/
@RequiresApi(VERSION_CODES.KITKAT)
@Nullable
@ExperimentalGetImage
public static Bitmap getBitmap(ImageProxy image) {
ByteBuffer nv21Buffer =
yuv420ThreePlanesToNV21(image.getImage().getPlanes(), image.getWidth(), image.getHeight());
return getBitmap(nv21Buffer, image.getWidth(), image.getHeight(), image.getImageInfo().getRotationDegrees());
}
/**
* Converts NV21 format byte buffer to bitmap.
*/
@Nullable
public static Bitmap getBitmap(ByteBuffer data, int width, int height, int rotation) {
data.rewind();
byte[] imageInBuffer = new byte[data.limit()];
data.get(imageInBuffer, 0, imageInBuffer.length);
try {
YuvImage image =
new YuvImage(
imageInBuffer, ImageFormat.NV21, width, height, null);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
image.compressToJpeg(new Rect(0, 0, width, height), 80, stream);
Bitmap bmp = BitmapFactory.decodeByteArray(stream.toByteArray(), 0, stream.size());
stream.close();
return rotateBitmap(bmp, rotation, false, false);
} catch (Exception e) {
Log.e("VisionProcessorBase", "Error: " + e.getMessage());
}
return null;
}
/**
* Rotates a bitmap if it is converted from a bytebuffer.
*/
private static Bitmap rotateBitmap(
Bitmap bitmap, int rotationDegrees, boolean flipX, boolean flipY) {
Matrix matrix = new Matrix();
// Rotate the image back to straight.
matrix.postRotate(rotationDegrees);
// Mirror the image along the X or Y axis.
matrix.postScale(flipX ? -1.0f : 1.0f, flipY ? -1.0f : 1.0f);
Bitmap rotatedBitmap =
Bitmap.createBitmap(bitmap, 0, 0, bitmap.getWidth(), bitmap.getHeight(), matrix, true);
// Recycle the old bitmap if it has changed.
if (rotatedBitmap != bitmap) {
bitmap.recycle();
}
return rotatedBitmap;
}
/**
* Converts YUV_420_888 to NV21 bytebuffer.
*
* <p>The NV21 format consists of a single byte array containing the Y, U and V values. For an
* image of size S, the first S positions of the array contain all the Y values. The remaining
* positions contain interleaved V and U values. U and V are subsampled by a factor of 2 in both
* dimensions, so there are S/4 U values and S/4 V values. In summary, the NV21 array will contain
* S Y values followed by S/4 VU values: YYYYYYYYYYYYYY(...)YVUVUVUVU(...)VU
*
* <p>YUV_420_888 is a generic format that can describe any YUV image where U and V are subsampled
* by a factor of 2 in both dimensions. {@link Image#getPlanes} returns an array with the Y, U and
* V planes. The Y plane is guaranteed not to be interleaved, so we can just copy its values into
* the first part of the NV21 array. The U and V planes may already have the representation in the
* NV21 format. This happens if the planes share the same buffer, the V buffer is one position
* before the U buffer and the planes have a pixelStride of 2. If this is case, we can just copy
* them to the NV21 array.
*/
@RequiresApi(VERSION_CODES.KITKAT)
private static ByteBuffer yuv420ThreePlanesToNV21(
Plane[] yuv420888planes, int width, int height) {
int imageSize = width * height;
byte[] out = new byte[imageSize + 2 * (imageSize / 4)];
if (areUVPlanesNV21(yuv420888planes, width, height)) {
// Copy the Y values.
yuv420888planes[0].getBuffer().get(out, 0, imageSize);
ByteBuffer uBuffer = yuv420888planes[1].getBuffer();
ByteBuffer vBuffer = yuv420888planes[2].getBuffer();
// Get the first V value from the V buffer, since the U buffer does not contain it.
vBuffer.get(out, imageSize, 1);
// Copy the first U value and the remaining VU values from the U buffer.
uBuffer.get(out, imageSize + 1, 2 * imageSize / 4 - 1);
} else {
// Fallback to copying the UV values one by one, which is slower but also works.
// Unpack Y.
unpackPlane(yuv420888planes[0], width, height, out, 0, 1);
// Unpack U.
unpackPlane(yuv420888planes[1], width, height, out, imageSize + 1, 2);
// Unpack V.
unpackPlane(yuv420888planes[2], width, height, out, imageSize, 2);
}
return ByteBuffer.wrap(out);
}
/**
* Checks if the UV plane buffers of a YUV_420_888 image are in the NV21 format.
*/
@RequiresApi(VERSION_CODES.KITKAT)
private static boolean areUVPlanesNV21(Plane[] planes, int width, int height) {
int imageSize = width * height;
ByteBuffer uBuffer = planes[1].getBuffer();
ByteBuffer vBuffer = planes[2].getBuffer();
// Backup buffer properties.
int vBufferPosition = vBuffer.position();
int uBufferLimit = uBuffer.limit();
// Advance the V buffer by 1 byte, since the U buffer will not contain the first V value.
vBuffer.position(vBufferPosition + 1);
// Chop off the last byte of the U buffer, since the V buffer will not contain the last U value.
uBuffer.limit(uBufferLimit - 1);
// Check that the buffers are equal and have the expected number of elements.
boolean areNV21 =
(vBuffer.remaining() == (2 * imageSize / 4 - 2)) && (vBuffer.compareTo(uBuffer) == 0);
// Restore buffers to their initial state.
vBuffer.position(vBufferPosition);
uBuffer.limit(uBufferLimit);
return areNV21;
}
/**
* Unpack an image plane into a byte array.
*
* <p>The input plane data will be copied in 'out', starting at 'offset' and every pixel will be
* spaced by 'pixelStride'. Note that there is no row padding on the output.
*/
@TargetApi(VERSION_CODES.KITKAT)
private static void unpackPlane(
Plane plane, int width, int height, byte[] out, int offset, int pixelStride) {
ByteBuffer buffer = plane.getBuffer();
buffer.rewind();
// Compute the size of the current plane.
// We assume that it has the aspect ratio as the original image.
int numRow = (buffer.limit() + plane.getRowStride() - 1) / plane.getRowStride();
if (numRow == 0) {
return;
}
int scaleFactor = height / numRow;
int numCol = width / scaleFactor;
// Extract the data in the output buffer.
int outputPos = offset;
int rowStart = 0;
for (int row = 0; row < numRow; row++) {
int inputPos = rowStart;
for (int col = 0; col < numCol; col++) {
out[outputPos] = buffer.get(inputPos);
outputPos += pixelStride;
inputPos += plane.getPixelStride();
}
rowStart += plane.getRowStride();
}
}
}
直接调用 Bitmap.getBitmap()
将ImageProxy
对象传进去即可。转换完毕后的Bitmap
是进行过角度校正的。也就是说竖着拍摄时相机旋转90°等问题,进行了matrix.postRotate
矩阵变化,将照片角度进行了纠正。
将得到的Bitmap对象给到OpenCV的Util对象进行转换得到Mat对象即可,实例:
Bitmap bitmap = Bitmap.getBitmap(imageProxy);
Mat mat = new Mat();
Utils.bitmapToMat(bitmap, mat); // 注意 这个Bitmap对象只能是 ARGB_8888 和RGB_565 类型的。如果转换失败是会崩溃出现异常的
到这里我们ImageProxy
就可以转为Mat
对象了。
如果我们想将Mat
在转换为Bitmap
,实例:
Bitmap bitmap = Bitmap.createBitmap(mat.width(), mat.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(mat, bitmap);
就可以得到该Bitmap
对象了。
但是这种效率明显不太高,因为要先生成Bitmap对象再进行转换。那么有没有直接转换的方法呢?
当然有。
我们参考OpenCV SDK
中的JavaCamera2View
的内部私有类JavaCamera2Frame
的实现效果,它就是将Image
对象转为了Mat
对象。
那么ImageProxy
也是有Image
对象的:ImageProxy.getImage()
。
我们直接复制一份改改代码权限得到:
public class ImageUtil {
public ImageUtil(Image image) {
super();
mImage = image;
mRgba = new Mat();
mGray = new Mat();
}
private Image mImage;
private Mat mRgba;
private Mat mGray;
public Mat gray() {
Image.Plane[] planes = mImage.getPlanes();
int w = mImage.getWidth();
int h = mImage.getHeight();
assert (planes[0].getPixelStride() == 1);
ByteBuffer y_plane = planes[0].getBuffer();
int y_plane_step = planes[0].getRowStride();
mGray = new Mat(h, w, CvType.CV_8UC1, y_plane, y_plane_step);
return mGray;
}
public Mat rgba() {
Image.Plane[] planes = mImage.getPlanes();
int w = mImage.getWidth();
int h = mImage.getHeight();
int chromaPixelStride = planes[1].getPixelStride();
if (chromaPixelStride == 2) { // Chroma channels are interleaved
assert (planes[0].getPixelStride() == 1);
assert (planes[2].getPixelStride() == 2);
ByteBuffer y_plane = planes[0].getBuffer();
int y_plane_step = planes[0].getRowStride();
ByteBuffer uv_plane1 = planes[1].getBuffer();
int uv_plane1_step = planes[1].getRowStride();
ByteBuffer uv_plane2 = planes[2].getBuffer();
int uv_plane2_step = planes[2].getRowStride();
Mat y_mat = new Mat(h, w, CvType.CV_8UC1, y_plane, y_plane_step);
Mat uv_mat1 = new Mat(h / 2, w / 2, CvType.CV_8UC2, uv_plane1, uv_plane1_step);
Mat uv_mat2 = new Mat(h / 2, w / 2, CvType.CV_8UC2, uv_plane2, uv_plane2_step);
long addr_diff = uv_mat2.dataAddr() - uv_mat1.dataAddr();
if (addr_diff > 0) {
assert (addr_diff == 1);
Imgproc.cvtColorTwoPlane(y_mat, uv_mat1, mRgba, Imgproc.COLOR_YUV2RGBA_NV12);
} else {
assert (addr_diff == -1);
Imgproc.cvtColorTwoPlane(y_mat, uv_mat2, mRgba, Imgproc.COLOR_YUV2RGBA_NV21);
}
return mRgba;
} else { // Chroma channels are not interleaved
byte[] yuv_bytes = new byte[w * (h + h / 2)];
ByteBuffer y_plane = planes[0].getBuffer();
ByteBuffer u_plane = planes[1].getBuffer();
ByteBuffer v_plane = planes[2].getBuffer();
int yuv_bytes_offset = 0;
int y_plane_step = planes[0].getRowStride();
if (y_plane_step == w) {
y_plane.get(yuv_bytes, 0, w * h);
yuv_bytes_offset = w * h;
} else {
int padding = y_plane_step - w;
for (int i = 0; i < h; i++) {
y_plane.get(yuv_bytes, yuv_bytes_offset, w);
yuv_bytes_offset += w;
if (i < h - 1) {
y_plane.position(y_plane.position() + padding);
}
}
assert (yuv_bytes_offset == w * h);
}
int chromaRowStride = planes[1].getRowStride();
int chromaRowPadding = chromaRowStride - w / 2;
if (chromaRowPadding == 0) {
// When the row stride of the chroma channels equals their width, we can copy
// the entire channels in one go
u_plane.get(yuv_bytes, yuv_bytes_offset, w * h / 4);
yuv_bytes_offset += w * h / 4;
v_plane.get(yuv_bytes, yuv_bytes_offset, w * h / 4);
} else {
// When not equal, we need to copy the channels row by row
for (int i = 0; i < h / 2; i++) {
u_plane.get(yuv_bytes, yuv_bytes_offset, w / 2);
yuv_bytes_offset += w / 2;
if (i < h / 2 - 1) {
u_plane.position(u_plane.position() + chromaRowPadding);
}
}
for (int i = 0; i < h / 2; i++) {
v_plane.get(yuv_bytes, yuv_bytes_offset, w / 2);
yuv_bytes_offset += w / 2;
if (i < h / 2 - 1) {
v_plane.position(v_plane.position() + chromaRowPadding);
}
}
}
Mat yuv_mat = new Mat(h + h / 2, w, CvType.CV_8UC1);
yuv_mat.put(0, 0, yuv_bytes);
Imgproc.cvtColor(yuv_mat, mRgba, Imgproc.COLOR_YUV2RGBA_I420, 4);
return mRgba;
}
}
public void release() {
mRgba.release();
mGray.release();
}
}
使用实例:
ImageUtil util = new ImageUtil(imageProxy.getImage());
Mat rgb = util.rgba();
//TODO 处理我们要对Mat做的业务计算
util.release();
imageProxy.close();
但是,直接转换后会发现图片的角度不对,需要进行旋转。否则竖着拍摄就是横着的照片了。简单的方法就是直接对Mat进行角度处理。
在ImageUtil中添加:
public void rotation(int rotation, Mat mat) {
// 旋转90°
if (rptation == 90) {
Core.transpose(mat, mat); // 将图像逆时针旋转90°,然后再关于x轴对称
Core.flip(mat, mat, 1); // 然后再绕Y轴旋转180° (顺时针)
} else if (rptation == 180) {
Core.flip(mat, mat, 0); //将图片绕X轴旋转180°(顺时针)
Core.flip(mat, mat, 1); //将图片绕Y轴旋转180°(顺时针)
} else if (rptation == 270) {
Core.transpose(mat, mat); // 将图像逆时针旋转90°,然后再关于x轴对称
Core.flip(mat, mat, 0); // //将图片绕X轴旋转180°(顺时针)
}
}
而rotation的角度值,在ImageProxy中有。获取方法为:int rotation = imageProxy.getImageInfo().getRotationDegrees();
所以完整实例为:
int rotation = imageProxy.getImageInfo().getRotationDegrees();
ImageUtil util = new ImageUtil(imageProxy.getImage());
Mat rgb = util.rgba();
util.rotation(rotation, rgb);
//TODO 处理我们要对Mat做的业务计算
util.release(); //要释放资源哦
imageProxy.close();
到这里,转换的就结束了。