I have a little doubt that I hope you can solve it.
I want to create an android application in unity. The application consists of activating the camera of the device and seeing it on the screen. For this I want to base on a c ++ native code based on OpenCV.
I have the generated code, but when I run the application, I see the scene but not the image, and I have the feeling that it is because OpenCV's VideoCapture function is not using it well for android. can you help me? I attached the code:
C ++
__declspec(dllexport) void iniciar(int& widt, int& heigh) {
camera.open(0);
if (!camera.isOpened())
{
return;
}
widt = (int)camera.get(CV_CAP_PROP_FRAME_WIDTH);
heigh = (int)camera.get(CV_CAP_PROP_FRAME_HEIGHT);
trueRect.x = 5;
trueRect.y = 5;
trueRect.width = 100;
trueRect.height = 100;
midX = 1;
midY = 1;
wi = 0;
he = 0;
}
__declspec(dllexport)
void video(unsigned char* arr) {
Mat frame;
Mat resi;
Mat dst;//dst image
camera >> frame;
if (frame.empty()) {
return;
}
flip(frame, dst,1);
//resize(dst, resi, Size(width, height));
cv::cvtColor(dst, dst, COLOR_BGR2RGB);
copy(dst.datastart, dst.dataend, arr);
}
C #:
public class camara : MonoBehaviour {
[DllImport("NativoPrincipio")]
public static extern void video(byte[] img);
[DllImport("NativoPrincipio")]
public static extern void iniciar(ref int widt, ref int heigh);
WebCamTexture back;
Texture2D textura;
byte[] imgData;
int width = 0;
int height = 0;
// Use this for initialization
void Start () {
back = new WebCamTexture();
//GetComponent<Renderer>().material.mainTexture = back;
//back.Play();
iniciar(ref width, ref height);
}
// Update is called once per frame
void Update ()
{
imgData = new byte[width * height * 4];
video(imgData);
textura = new Texture2D(width, height, TextureFormat.RGB24, false);
textura.LoadRawTextureData(imgData);
textura.Apply();
GetComponent<Renderer>().material.mainTexture = textura;
imgData = null;
textura = null;
}}