I'm trying to recognize two different objects using two different cascade classifiers. I modified the face recognition samples but I have a problem: when I try to recognize object from object1.xml it prints both rectangles, when I try to recognize object from object2.xml it doesn't print nothign...
Where is the problem??
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
// Load native library after(!) OpenCV initialization
System.loadLibrary("detection_based_tracker");
try {
// load cascade file from application resources
InputStream is = getResources().openRawResource(R.raw.object1);
File cascadeDir = getDir("cascade", Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir, "object1.xml");
FileOutputStream os = new FileOutputStream(mCascadeFile);
/* By me */
InputStream is2 = getResources().openRawResource(R.raw.object2);
mCascadeFile2 = new File(cascadeDir, "object2.xml");
FileOutputStream os2 = new FileOutputStream(mCascadeFile2);
byte[] buffer2 = new byte[4096];
int bytesRead2;
while ((bytesRead2 = is2.read(buffer2)) != -1) {
os2.write(buffer2, 0, bytesRead2);
}
is2.close();
os2.close();
/* ............... */
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();
mJavaDetector = new CascadeClassifier(mCascadeFile.getAbsolutePath());
/* By me */
mJavaDetector2 = new CascadeClassifier(mCascadeFile2.getAbsolutePath());
/* ........ */
if (mJavaDetector.empty() || mJavaDetector2.empty()) {
Log.e(TAG, "Failed to load cascade classifier");
mJavaDetector = null;
/* By me */
mJavaDetector2 = null;
} else
Log.i(TAG, "Loaded cascade classifier from " + mCascadeFile.getAbsolutePath() + mCascadeFile2.getAbsolutePath());
mNativeDetector = new DetectionBasedTracker(mCascadeFile.getAbsolutePath(), 0);
/**/
mNativeDetector2 = new DetectionBasedTracker(mCascadeFile2.getAbsolutePath(), 0);
cascadeDir.delete();
} catch (IOException e) {
e.printStackTrace();
Log.e(TAG, "Failed to load cascade. Exception thrown: " + e);
}
mOpenCvCameraView.enableView();
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
>
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
/* ...... */
mNativeDetector2.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
/* ....... */
MatOfRect faces2 = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null && mJavaDetector2 != null) {
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
/* ......... */
mJavaDetector2.detectMultiScale(mGray, faces2, 1.1, 2, 2, new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
}
else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null && mNativeDetector2 != null){
mNativeDetector.detect(mGray, faces);
/* ......... */
mNativeDetector2.detect(mGray, faces2);
}
}
else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++){
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
Core.putText(mRgba, "Obj1", facesArray[i].tl(), 1, 2, new Scalar(255, 0, 0, 255), 3);
}
/* ...... */
Rect[] facesArray2 = faces2.toArray();
for (int i = 0; i < facesArray2.length; i++){
Core.rectangle(mRgba, facesArray2[i].tl(), facesArray2[i].br(), FACE_RECT_COLOR, 3);
Core.putText(mRgba, "Obj2", facesArray2[i].tl(), 1, 2, new Scalar(255, 0, 0, 255), 3);
}
return mRgba;
}
Or there is a way to detect different objects from the same .xml??
SOLVED
I update my code in this way:
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
// Load native library after(!) OpenCV initialization
System.loadLibrary("detection_based_tracker");
try {
// load cascade file from application resources
InputStream is = getResources().openRawResource(R.raw.lbpcascade_frontalface);
File cascadeDir = getDir("cascade", Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir, "lbpcascade_frontalface.xml");
FileOutputStream os = new FileOutputStream(mCascadeFile);
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();
/* By me */
InputStream is2 = getResources().openRawResource(R.raw.haarcascade_frontalface_alt);
File cascadeDir2 = getDir("cascade2", Context.MODE_PRIVATE);
mCascadeFile2 = new File(cascadeDir2, "haarcascade_frontalface_alt.xml");
FileOutputStream os2 = new FileOutputStream(mCascadeFile2);
byte[] buffer2 = new byte[4096];
int bytesRead2;
while ((bytesRead2 = is2.read(buffer2)) != -1) {
os2.write(buffer2, 0, bytesRead2);
}
is2.close();
os2.close();
/* ............... */
mJavaDetector = new CascadeClassifier(mCascadeFile.getAbsolutePath());
if (mJavaDetector.empty()) {
Log.e(TAG, "Failed to load cascade classifier");
mJavaDetector = null;
} else
Log.i(TAG, "Loaded cascade classifier from " + mCascadeFile.getAbsolutePath());
/* By me */
mJavaDetector2 = new CascadeClassifier(mCascadeFile2.getAbsolutePath());
/* ........ */
if (mJavaDetector2.empty()) {
Log.e(TAG, "Failed to load cascade classifier" + mCascadeFile.getAbsolutePath());
mJavaDetector2 = null;
} else
Log.i(TAG, "Loaded cascade classifier from " + mCascadeFile.getAbsolutePath() + mCascadeFile2.getAbsolutePath());
mNativeDetector = new DetectionBasedTracker(mCascadeFile.getAbsolutePath(), 0);
/**/
mNativeDetector2 = new DetectionBasedTracker(mCascadeFile2.getAbsolutePath(), 0);
cascadeDir.delete();
cascadeDir2.delete();
} catch (IOException e) {
e.printStackTrace();
Log.e(TAG, "Failed to load cascade. Exception thrown: " + e);
}
mOpenCvCameraView.enableView();
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
/* ...... */
mNativeDetector2.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
/* ....... */
MatOfRect faces2 = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null && mJavaDetector2 != null) {
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
/* ......... */
mJavaDetector2.detectMultiScale(mGray, faces2, 1.1, 2, 2, new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
}
else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null && mNativeDetector2 != null){
mNativeDetector.detect(mGray, faces);
/* ......... */
mNativeDetector2.detect(mGray, faces2);
}
}
else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++){
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
Core.putText(mRgba, "Pista Ciclabile", facesArray[i].tl(), 1, 2, new Scalar(255, 0, 0, 255), 3);
}
/* ...... */
Rect[] facesArray2 = faces2.toArray();
for (int j = 0; j < facesArray2.length; j++){
Core.rectangle(mRgba, facesArray2[j].tl(), facesArray2[j].br(), FACE_RECT_COLOR, 3);
Core.putText(mRgba, "Viso", facesArray2[j].tl(), 1, 2, new Scalar(255, 0, 0, 255), 3);
}
return mRgba;
}
And this is the result: http://i43.tinypic.com/15eqvl.png
Related
I'm attempting to render video using using the Microsoft sample DX11VideoRenderer found at: https://github.com/Microsoft/Windows-classic-samples/tree/master/Samples/DX11VideoRenderer
From my extensive research it seems that using DirectX 11 with hardware-acceleration is the most up-to-date method (least likely to be deprecated) and offers the best performance solution.
There are 2 similar functions within Presenter.cpp that process frames but I cannot figure out what the difference is between them. ProcessFrameUsingD3D11()uses VideoProcessorBlt() to actually do the render. The mystery is that ProcessFrameUsingXVP() does not use this function so how does it actually do the render? Or is it doing something else entirely? Also it appears that my implementation is using ProcessFrameUsingXVP()based in the value of the variable m_useXVP which is by default set to '1'. Here is the code sample:
if (m_useXVP)
{
BOOL bInputFrameUsed = FALSE;
hr = ProcessFrameUsingXVP( pCurrentType, pSample, pTexture2D, rcDest, ppOutputSample, &bInputFrameUsed );
if (SUCCEEDED(hr) && !bInputFrameUsed)
{
*pbProcessAgain = TRUE;
}
}
else
{
hr = ProcessFrameUsingD3D11( pTexture2D, pEVTexture2D, dwViewIndex, dwEVViewIndex, rcDest, *punInterlaceMode, ppOutputSample );
LONGLONG hnsDuration = 0;
LONGLONG hnsTime = 0;
DWORD dwSampleFlags = 0;
if (ppOutputSample != NULL && *ppOutputSample != NULL)
{
if (SUCCEEDED(pSample->GetSampleDuration(&hnsDuration)))
{
(*ppOutputSample)->SetSampleDuration(hnsDuration);
}
if (SUCCEEDED(pSample->GetSampleTime(&hnsTime)))
{
(*ppOutputSample)->SetSampleTime(hnsTime);
}
if (SUCCEEDED(pSample->GetSampleFlags(&dwSampleFlags)))
{
(*ppOutputSample)->SetSampleFlags(dwSampleFlags);
}
}
}
The reason for setting m_useXVP is also a mystery to me and I cannot find an answer in my research. It uses a registry key that does not exist on my particular Windows10 PC so the value is not modified.
const TCHAR* lpcszInVP = TEXT("XVP");
const TCHAR* lpcszREGKEY = TEXT("SOFTWARE\\Microsoft\\Scrunch\\CodecPack\\MSDVD");
if(0 == RegOpenKeyEx(HKEY_CURRENT_USER, lpcszREGKEY, 0, KEY_READ, &hk))
{
dwData = 0;
cbData = sizeof(DWORD);
if (0 == RegQueryValueEx(hk, lpcszInVP, 0, &cbType, (LPBYTE)&dwData, &cbData))
{
m_useXVP = dwData;
}
}
So since my PC does not have this key, the code is defaulting to using ProcessFrameUsingXVP(). Here is the definition:
HRESULT DX11VideoRenderer::CPresenter::ProcessFrameUsingXVP(IMFMediaType* pCurrentType, IMFSample* pVideoFrame, ID3D11Texture2D* pTexture2D, RECT rcDest, IMFSample** ppVideoOutFrame, BOOL* pbInputFrameUsed)
{
HRESULT hr = S_OK;
ID3D11VideoContext* pVideoContext = NULL;
ID3D11Texture2D* pDXGIBackBuffer = NULL;
IMFSample* pRTSample = NULL;
IMFMediaBuffer* pBuffer = NULL;
IMFAttributes* pAttributes = NULL;
D3D11_VIDEO_PROCESSOR_CAPS vpCaps = { 0 };
do
{
if (!m_pDX11VideoDevice)
{
hr = m_pD3D11Device->QueryInterface(__uuidof(ID3D11VideoDevice), (void**)&m_pDX11VideoDevice);
if (FAILED(hr))
{
break;
}
}
hr = m_pD3DImmediateContext->QueryInterface(__uuidof(ID3D11VideoContext), (void**)&pVideoContext);
if (FAILED(hr))
{
break;
}
// remember the original rectangles
RECT TRectOld = m_rcDstApp;
RECT SRectOld = m_rcSrcApp;
UpdateRectangles(&TRectOld, &SRectOld);
//Update destination rect with current client rect
m_rcDstApp = rcDest;
D3D11_TEXTURE2D_DESC surfaceDesc;
pTexture2D->GetDesc(&surfaceDesc);
BOOL fTypeChanged = FALSE;
if (!m_pVideoProcessorEnum || !m_pSwapChain1 || m_imageWidthInPixels != surfaceDesc.Width || m_imageHeightInPixels != surfaceDesc.Height)
{
SafeRelease(m_pVideoProcessorEnum);
SafeRelease(m_pSwapChain1);
m_imageWidthInPixels = surfaceDesc.Width;
m_imageHeightInPixels = surfaceDesc.Height;
fTypeChanged = TRUE;
D3D11_VIDEO_PROCESSOR_CONTENT_DESC ContentDesc;
ZeroMemory(&ContentDesc, sizeof(ContentDesc));
ContentDesc.InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
ContentDesc.InputWidth = surfaceDesc.Width;
ContentDesc.InputHeight = surfaceDesc.Height;
ContentDesc.OutputWidth = surfaceDesc.Width;
ContentDesc.OutputHeight = surfaceDesc.Height;
ContentDesc.Usage = D3D11_VIDEO_USAGE_PLAYBACK_NORMAL;
hr = m_pDX11VideoDevice->CreateVideoProcessorEnumerator(&ContentDesc, &m_pVideoProcessorEnum);
if (FAILED(hr))
{
break;
}
m_rcSrcApp.left = 0;
m_rcSrcApp.top = 0;
m_rcSrcApp.right = m_uiRealDisplayWidth;
m_rcSrcApp.bottom = m_uiRealDisplayHeight;
if (m_b3DVideo)
{
hr = m_pVideoProcessorEnum->GetVideoProcessorCaps(&vpCaps);
if (FAILED(hr))
{
break;
}
if (vpCaps.FeatureCaps & D3D11_VIDEO_PROCESSOR_FEATURE_CAPS_STEREO)
{
m_bStereoEnabled = TRUE;
}
DXGI_MODE_DESC1 modeFilter = { 0 };
modeFilter.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
modeFilter.Width = surfaceDesc.Width;
modeFilter.Height = surfaceDesc.Height;
modeFilter.Stereo = m_bStereoEnabled;
DXGI_MODE_DESC1 matchedMode;
if (m_bFullScreenState)
{
hr = m_pDXGIOutput1->FindClosestMatchingMode1(&modeFilter, &matchedMode, m_pD3D11Device);
if (FAILED(hr))
{
break;
}
}
hr = m_pXVP->GetAttributes(&pAttributes);
if (FAILED(hr))
{
break;
}
hr = pAttributes->SetUINT32(MF_ENABLE_3DVIDEO_OUTPUT, (0 != m_vp3DOutput) ? MF3DVideoOutputType_Stereo : MF3DVideoOutputType_BaseView);
if (FAILED(hr))
{
break;
}
}
}
// now create the input and output media types - these need to reflect
// the src and destination rectangles that we have been given.
RECT TRect = m_rcDstApp;
RECT SRect = m_rcSrcApp;
UpdateRectangles(&TRect, &SRect);
const BOOL fDestRectChanged = !EqualRect(&TRect, &TRectOld);
const BOOL fSrcRectChanged = !EqualRect(&SRect, &SRectOld);
if (!m_pSwapChain1 || fDestRectChanged)
{
hr = UpdateDXGISwapChain();
if (FAILED(hr))
{
break;
}
}
if (fTypeChanged || fSrcRectChanged || fDestRectChanged)
{
// stop streaming to avoid multiple start\stop calls internally in XVP
hr = m_pXVP->ProcessMessage(MFT_MESSAGE_NOTIFY_END_STREAMING, 0);
if (FAILED(hr))
{
break;
}
if (fTypeChanged)
{
hr = SetXVPOutputMediaType(pCurrentType, DXGI_FORMAT_B8G8R8A8_UNORM);
if (FAILED(hr))
{
break;
}
}
if (fDestRectChanged)
{
hr = m_pXVPControl->SetDestinationRectangle(&m_rcDstApp);
if (FAILED(hr))
{
break;
}
}
if (fSrcRectChanged)
{
hr = m_pXVPControl->SetSourceRectangle(&SRect);
if (FAILED(hr))
{
break;
}
}
hr = m_pXVP->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
if (FAILED(hr))
{
break;
}
}
m_bCanProcessNextSample = FALSE;
// Get Backbuffer
hr = m_pSwapChain1->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&pDXGIBackBuffer);
if (FAILED(hr))
{
break;
}
// create the output media sample
hr = MFCreateSample(&pRTSample);
if (FAILED(hr))
{
break;
}
hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), pDXGIBackBuffer, 0, FALSE, &pBuffer);
if (FAILED(hr))
{
break;
}
hr = pRTSample->AddBuffer(pBuffer);
if (FAILED(hr))
{
break;
}
if (m_b3DVideo && 0 != m_vp3DOutput)
{
SafeRelease(pBuffer);
hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), pDXGIBackBuffer, 1, FALSE, &pBuffer);
if (FAILED(hr))
{
break;
}
hr = pRTSample->AddBuffer(pBuffer);
if (FAILED(hr))
{
break;
}
}
DWORD dwStatus = 0;
MFT_OUTPUT_DATA_BUFFER outputDataBuffer = {};
outputDataBuffer.pSample = pRTSample;
hr = m_pXVP->ProcessOutput(0, 1, &outputDataBuffer, &dwStatus);
if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT)
{
//call process input on the MFT to deliver the YUV video sample
// and the call process output to extract of newly processed frame
hr = m_pXVP->ProcessInput(0, pVideoFrame, 0);
if (FAILED(hr))
{
break;
}
*pbInputFrameUsed = TRUE;
hr = m_pXVP->ProcessOutput(0, 1, &outputDataBuffer, &dwStatus);
if (FAILED(hr))
{
break;
}
}
else
{
*pbInputFrameUsed = FALSE;
}
if (ppVideoOutFrame != NULL)
{
*ppVideoOutFrame = pRTSample;
(*ppVideoOutFrame)->AddRef();
}
} while (FALSE);
SafeRelease(pAttributes);
SafeRelease(pBuffer);
SafeRelease(pRTSample);
SafeRelease(pDXGIBackBuffer);
SafeRelease(pVideoContext);
return hr;
}
And here is the definition of ProcessFrameUsingD3D11() :
HRESULT DX11VideoRenderer::CPresenter::ProcessFrameUsingD3D11( ID3D11Texture2D* pLeftTexture2D, ID3D11Texture2D* pRightTexture2D, UINT dwLeftViewIndex, UINT dwRightViewIndex,
RECT rcDest, UINT32 unInterlaceMode, IMFSample** ppVideoOutFrame )
{
HRESULT hr = S_OK;
ID3D11VideoContext* pVideoContext = NULL;
ID3D11VideoProcessorInputView* pLeftInputView = NULL;
ID3D11VideoProcessorInputView* pRightInputView = NULL;
ID3D11VideoProcessorOutputView* pOutputView = NULL;
ID3D11Texture2D* pDXGIBackBuffer = NULL;
ID3D11RenderTargetView* pRTView = NULL;
IMFSample* pRTSample = NULL;
IMFMediaBuffer* pBuffer = NULL;
D3D11_VIDEO_PROCESSOR_CAPS vpCaps = {0};
LARGE_INTEGER lpcStart,lpcEnd;
do
{
if (!m_pDX11VideoDevice)
{
hr = m_pD3D11Device->QueryInterface(__uuidof(ID3D11VideoDevice), (void**)&m_pDX11VideoDevice);
if (FAILED(hr))
{
break;
}
}
hr = m_pD3DImmediateContext->QueryInterface(__uuidof( ID3D11VideoContext ), (void**)&pVideoContext);
if (FAILED(hr))
{
break;
}
// remember the original rectangles
RECT TRectOld = m_rcDstApp;
RECT SRectOld = m_rcSrcApp;
UpdateRectangles(&TRectOld, &SRectOld);
//Update destination rect with current client rect
m_rcDstApp = rcDest;
D3D11_TEXTURE2D_DESC surfaceDesc;
pLeftTexture2D->GetDesc(&surfaceDesc);
if (!m_pVideoProcessorEnum || !m_pVideoProcessor || m_imageWidthInPixels != surfaceDesc.Width || m_imageHeightInPixels != surfaceDesc.Height)
{
SafeRelease(m_pVideoProcessorEnum);
SafeRelease(m_pVideoProcessor);
m_imageWidthInPixels = surfaceDesc.Width;
m_imageHeightInPixels = surfaceDesc.Height;
D3D11_VIDEO_PROCESSOR_CONTENT_DESC ContentDesc;
ZeroMemory( &ContentDesc, sizeof( ContentDesc ) );
ContentDesc.InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_INTERLACED_TOP_FIELD_FIRST;
ContentDesc.InputWidth = surfaceDesc.Width;
ContentDesc.InputHeight = surfaceDesc.Height;
ContentDesc.OutputWidth = surfaceDesc.Width;
ContentDesc.OutputHeight = surfaceDesc.Height;
ContentDesc.Usage = D3D11_VIDEO_USAGE_PLAYBACK_NORMAL;
hr = m_pDX11VideoDevice->CreateVideoProcessorEnumerator(&ContentDesc, &m_pVideoProcessorEnum);
if (FAILED(hr))
{
break;
}
UINT uiFlags;
DXGI_FORMAT VP_Output_Format = DXGI_FORMAT_B8G8R8A8_UNORM;
hr = m_pVideoProcessorEnum->CheckVideoProcessorFormat(VP_Output_Format, &uiFlags);
if (FAILED(hr) || 0 == (uiFlags & D3D11_VIDEO_PROCESSOR_FORMAT_SUPPORT_OUTPUT))
{
hr = MF_E_UNSUPPORTED_D3D_TYPE;
break;
}
m_rcSrcApp.left = 0;
m_rcSrcApp.top = 0;
m_rcSrcApp.right = m_uiRealDisplayWidth;
m_rcSrcApp.bottom = m_uiRealDisplayHeight;
DWORD index;
hr = FindBOBProcessorIndex(&index); // GG This may not be needed. BOB is something to do with deinterlacing.
if (FAILED(hr))
{
break;
}
hr = m_pDX11VideoDevice->CreateVideoProcessor(m_pVideoProcessorEnum, index, &m_pVideoProcessor);
if (FAILED(hr))
{
break;
}
if (m_b3DVideo)
{
hr = m_pVideoProcessorEnum->GetVideoProcessorCaps(&vpCaps);
if (FAILED(hr))
{
break;
}
if (vpCaps.FeatureCaps & D3D11_VIDEO_PROCESSOR_FEATURE_CAPS_STEREO)
{
m_bStereoEnabled = TRUE;
}
DXGI_MODE_DESC1 modeFilter = { 0 };
modeFilter.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
modeFilter.Width = surfaceDesc.Width;
modeFilter.Height = surfaceDesc.Height;
modeFilter.Stereo = m_bStereoEnabled;
DXGI_MODE_DESC1 matchedMode;
if (m_bFullScreenState)
{
hr = m_pDXGIOutput1->FindClosestMatchingMode1(&modeFilter, &matchedMode, m_pD3D11Device);
if (FAILED(hr))
{
break;
}
}
}
}
// now create the input and output media types - these need to reflect
// the src and destination rectangles that we have been given.
RECT TRect = m_rcDstApp;
RECT SRect = m_rcSrcApp;
UpdateRectangles(&TRect, &SRect);
const BOOL fDestRectChanged = !EqualRect(&TRect, &TRectOld);
if (!m_pSwapChain1 || fDestRectChanged)
{
hr = UpdateDXGISwapChain();
if (FAILED(hr))
{
break;
}
}
m_bCanProcessNextSample = FALSE;
// Get Backbuffer
hr = m_pSwapChain1->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&pDXGIBackBuffer);
if (FAILED(hr))
{
break;
}
// create the output media sample
hr = MFCreateSample(&pRTSample);
if (FAILED(hr))
{
break;
}
hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), pDXGIBackBuffer, 0, FALSE, &pBuffer);
if (FAILED(hr))
{
break;
}
hr = pRTSample->AddBuffer(pBuffer);
if (FAILED(hr))
{
break;
}
// GG For 3D - don't need.
if (m_b3DVideo && 0 != m_vp3DOutput)
{
SafeRelease(pBuffer);
hr = MFCreateDXGISurfaceBuffer(__uuidof(ID3D11Texture2D), pDXGIBackBuffer, 1, FALSE, &pBuffer);
if (FAILED(hr))
{
break;
}
hr = pRTSample->AddBuffer(pBuffer);
if (FAILED(hr))
{
break;
}
}
QueryPerformanceCounter(&lpcStart);
QueryPerformanceCounter(&lpcEnd);
//
// Create Output View of Output Surfaces.
//
D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC OutputViewDesc;
ZeroMemory( &OutputViewDesc, sizeof( OutputViewDesc ) );
if (m_b3DVideo && m_bStereoEnabled)
{
OutputViewDesc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2DARRAY;
}
else
{
OutputViewDesc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D;
}
OutputViewDesc.Texture2D.MipSlice = 0;
OutputViewDesc.Texture2DArray.MipSlice = 0;
OutputViewDesc.Texture2DArray.FirstArraySlice = 0;
if (m_b3DVideo && 0 != m_vp3DOutput)
{
OutputViewDesc.Texture2DArray.ArraySize = 2; // STEREO
}
QueryPerformanceCounter(&lpcStart);
hr = m_pDX11VideoDevice->CreateVideoProcessorOutputView(pDXGIBackBuffer, m_pVideoProcessorEnum, &OutputViewDesc, &pOutputView);
if (FAILED(hr))
{
break;
}
D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC InputLeftViewDesc;
ZeroMemory( &InputLeftViewDesc, sizeof( InputLeftViewDesc ) );
InputLeftViewDesc.FourCC = 0;
InputLeftViewDesc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
InputLeftViewDesc.Texture2D.MipSlice = 0;
InputLeftViewDesc.Texture2D.ArraySlice = dwLeftViewIndex;
hr = m_pDX11VideoDevice->CreateVideoProcessorInputView(pLeftTexture2D, m_pVideoProcessorEnum, &InputLeftViewDesc, &pLeftInputView);
if (FAILED(hr))
{
break;
}
if (m_b3DVideo && MFVideo3DSampleFormat_MultiView == m_vp3DOutput && pRightTexture2D)
{
D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC InputRightViewDesc;
ZeroMemory( &InputRightViewDesc, sizeof( InputRightViewDesc ) );
InputRightViewDesc.FourCC = 0;
InputRightViewDesc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
InputRightViewDesc.Texture2D.MipSlice = 0;
InputRightViewDesc.Texture2D.ArraySlice = dwRightViewIndex;
hr = m_pDX11VideoDevice->CreateVideoProcessorInputView(pRightTexture2D, m_pVideoProcessorEnum, &InputRightViewDesc, &pRightInputView);
if (FAILED(hr))
{
break;
}
}
QueryPerformanceCounter(&lpcEnd);
QueryPerformanceCounter(&lpcStart);
SetVideoContextParameters(pVideoContext, &SRect, &TRect, unInterlaceMode);
// Enable/Disable Stereo
if (m_b3DVideo)
{
pVideoContext->VideoProcessorSetOutputStereoMode(m_pVideoProcessor, m_bStereoEnabled);
D3D11_VIDEO_PROCESSOR_STEREO_FORMAT vpStereoFormat = D3D11_VIDEO_PROCESSOR_STEREO_FORMAT_SEPARATE;
if (MFVideo3DSampleFormat_Packed_LeftRight == m_vp3DOutput)
{
vpStereoFormat = D3D11_VIDEO_PROCESSOR_STEREO_FORMAT_HORIZONTAL;
}
else if (MFVideo3DSampleFormat_Packed_TopBottom == m_vp3DOutput)
{
vpStereoFormat = D3D11_VIDEO_PROCESSOR_STEREO_FORMAT_VERTICAL;
}
pVideoContext->VideoProcessorSetStreamStereoFormat(m_pVideoProcessor,
0, m_bStereoEnabled, vpStereoFormat, TRUE, TRUE, D3D11_VIDEO_PROCESSOR_STEREO_FLIP_NONE, 0);
}
QueryPerformanceCounter(&lpcEnd);
QueryPerformanceCounter(&lpcStart);
D3D11_VIDEO_PROCESSOR_STREAM StreamData;
ZeroMemory( &StreamData, sizeof( StreamData ) );
StreamData.Enable = TRUE;
StreamData.OutputIndex = 0;
StreamData.InputFrameOrField = 0;
StreamData.PastFrames = 0;
StreamData.FutureFrames = 0;
StreamData.ppPastSurfaces = NULL;
StreamData.ppFutureSurfaces = NULL;
StreamData.pInputSurface = pLeftInputView;
StreamData.ppPastSurfacesRight = NULL;
StreamData.ppFutureSurfacesRight = NULL;
if (m_b3DVideo && MFVideo3DSampleFormat_MultiView == m_vp3DOutput && pRightTexture2D)
{
StreamData.pInputSurfaceRight = pRightInputView;
}
hr = pVideoContext->VideoProcessorBlt(m_pVideoProcessor, pOutputView, 0, 1, &StreamData );
if (FAILED(hr))
{
break;
}
QueryPerformanceCounter(&lpcEnd);
if (ppVideoOutFrame != NULL)
{
*ppVideoOutFrame = pRTSample;
(*ppVideoOutFrame)->AddRef();
}
}
while (FALSE);
SafeRelease(pBuffer);
SafeRelease(pRTSample);
SafeRelease(pDXGIBackBuffer);
SafeRelease(pOutputView);
SafeRelease(pLeftInputView);
SafeRelease(pRightInputView);
SafeRelease(pVideoContext);
return hr;
}
One last note, the documentation states that:
Specifically, this sample shows how to:
Decode the video using the Media Foundation APIs
Render the decoded video using the DirectX 11 APIs
Output the video stream to multi-monitor displays
I cannot find anything that does decoding unless by some MF magic chant phrase that I haven't stumbled across yet. But it's not a showstopper because I can stick an H.264 decoder MFT in front no problem. I would just like to clarify the documentation.
Any help would be much appreciated. Thank you!
There are 2 similar functions within Presenter.cpp that process frames but I cannot figure out what the difference is between them. ProcessFrameUsingD3D11()uses VideoProcessorBlt() to actually do the render.
The functions are not rendering - they are two ways to scale video frames. Scaling might be done with a readily available Media Foundation transform internally managed by the renderer's presenter, or scaling might be done with the help of Direct3D 11 processor. Actually both utilize Direct3D 11, so the two methods are close one to another and are just one step in the rendering process.
I cannot find anything that does decoding unless by some MF magic chant phrase that I haven't stumbled across yet.
There is no decoding and the list of sink video formats in StreamSink.cpp suggests that by only listing uncompressed video formats. The renderer presents frames carried by Direct3D 11 textures, which in turn assumes that a decode, esp. hardware decoder such as DXVA2 based already supplies the decoded textures on the renderer input.
I found a way to enumerate other programs handles, but I have problem now. I can not see Process type threads. I need to check which programs open handles for my process.
When I check the output, it is "unnamed", I don't know how to fix it.
Should I do this via dirver? or any other way to do this without driver?
pid = _wtoi(argv[1]);
if (!(processHandle = OpenProcess(PROCESS_DUP_HANDLE, FALSE, pid)))
{
printf("Could not open PID %d! (Don't try to open a system process.)\n", pid);
return 1;
}
handleInfo = (PSYSTEM_HANDLE_INFORMATION)malloc(handleInfoSize);
/* NtQuerySystemInformation won't give us the correct buffer size,
so we guess by doubling the buffer size. */
while ((status = NtQuerySystemInformation(
SystemHandleInformation,
handleInfo,
handleInfoSize,
NULL
)) == STATUS_INFO_LENGTH_MISMATCH)
handleInfo = (PSYSTEM_HANDLE_INFORMATION)realloc(handleInfo, handleInfoSize *= 2);
/* NtQuerySystemInformation stopped giving us STATUS_INFO_LENGTH_MISMATCH. */
if (!NT_SUCCESS(status))
{
printf("NtQuerySystemInformation failed!\n");
return 1;
}
for (i = 0; i < handleInfo->HandleCount; i++)
{
SYSTEM_HANDLE handle = handleInfo->Handles[i];
HANDLE dupHandle = NULL;
POBJECT_TYPE_INFORMATION objectTypeInfo;
PVOID objectNameInfo;
UNICODE_STRING objectName;
ULONG returnLength;
/* Check if this handle belongs to the PID the user specified. */
if (handle.ProcessId != pid)
continue;
/* Duplicate the handle so we can query it. */
if (!NT_SUCCESS(NtDuplicateObject(
processHandle,
handle.Handle,
GetCurrentProcess(),
&dupHandle,
0,
0,
0
)))
{
printf("[%#x] Error!\n", handle.Handle);
continue;
}
/* Query the object type. */
objectTypeInfo = (POBJECT_TYPE_INFORMATION)malloc(0x1000);
if (!NT_SUCCESS(NtQueryObject(
dupHandle,
ObjectTypeInformation,
objectTypeInfo,
0x1000,
NULL
)))
{
printf("[%#x] Error!\n", handle.Handle);
CloseHandle(dupHandle);
continue;
}
/* Query the object name (unless it has an access of
0x0012019f, on which NtQueryObject could hang. */
if (handle.GrantedAccess == 0x0012019f)
{
/* We have the type, so display that. */
printf(
"[%#x] %.*S: (did not get name)\n",
handle.Handle,
objectTypeInfo->Name.Length / 2,
objectTypeInfo->Name.Buffer
);
free(objectTypeInfo);
CloseHandle(dupHandle);
continue;
}
objectNameInfo = malloc(0x1000);
if (!NT_SUCCESS(NtQueryObject(
dupHandle,
ObjectNameInformation,
objectNameInfo,
0x1000,
&returnLength
)))
{
/* Reallocate the buffer and try again. */
objectNameInfo = realloc(objectNameInfo, returnLength);
if (!NT_SUCCESS(NtQueryObject(
dupHandle,
ObjectNameInformation,
objectNameInfo,
returnLength,
NULL
)))
{
/* We have the type name, so just display that. */
printf(
"[%#x] %.*S: (could not get name)\n",
handle.Handle,
objectTypeInfo->Name.Length / 2,
objectTypeInfo->Name.Buffer
);
free(objectTypeInfo);
free(objectNameInfo);
CloseHandle(dupHandle);
continue;
}
}
/* Cast our buffer into an UNICODE_STRING. */
objectName = *(PUNICODE_STRING)objectNameInfo;
/* Print the information! */
if (objectName.Length)
{
/* The object has a name. */
printf(
"[%#x] %.*S: %.*S\n",
handle.Handle,
objectTypeInfo->Name.Length / 2,
objectTypeInfo->Name.Buffer,
objectName.Length / 2,
objectName.Buffer
);
}
else
{
/* Print something else. */
printf(
"[%#x] %.*S: (unnamed)\n",
handle.Handle,
objectTypeInfo->Name.Length / 2,
objectTypeInfo->Name.Buffer
);
}
free(objectTypeInfo);
free(objectNameInfo);
CloseHandle(dupHandle);
}
free(handleInfo);
CloseHandle(processHandle);
return 0;
void SearchMyProcessHandles()
{
ULONG UniqueProcessId = GetCurrentProcessId();
if (HANDLE hProcess = OpenProcess(MAXIMUM_ALLOWED, FALSE, UniqueProcessId))
{
NTSTATUS status;
union {
PSYSTEM_HANDLE_INFORMATION_EX pshi;
PVOID buf;
};
ULONG cb = 0x10000;
do
{
status = STATUS_INSUFFICIENT_RESOURCES;
if (buf = new UCHAR[cb += PAGE_SIZE])
{
if (0 <= (status = ZwQuerySystemInformation(SystemExtendedHandleInformation, buf, cb, &cb)))
{
if (ULONG_PTR NumberOfHandles = pshi->NumberOfHandles)
{
SYSTEM_HANDLE_TABLE_ENTRY_INFO_EX* Handles = pshi->Handles;
do
{
if (Handles->UniqueProcessId == UniqueProcessId &&
Handles->HandleValue == (ULONG_PTR)hProcess)
{
PVOID Object = Handles->Object;
Handles = pshi->Handles;
NumberOfHandles = pshi->NumberOfHandles;
do
{
if (Handles->Object == Object &&
Handles->UniqueProcessId != UniqueProcessId)
{
DbgPrint("%p %p %08x\n",
Handles->UniqueProcessId,
Handles->HandleValue,
Handles->GrantedAccess);
}
} while (Handles++, --NumberOfHandles);
break;
}
} while (Handles++, --NumberOfHandles);
}
}
delete [] buf;
}
} while (status == STATUS_INFO_LENGTH_MISMATCH);
CloseHandle(hProcess);
}
}
I developed a below code:
extern "C"
{
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libswscale/swscale.h>
}
#include <stdio.h>
static AVFormatContext *fmt_ctx = NULL;
static int frame_index = 0;
static int j = 0, nbytes=0;
uint8_t *video_outbuf = NULL;
static AVPacket *pAVPacket=NULL;
static int value=0;
static AVFrame *pAVFrame=NULL;
static AVFrame *outFrame=NULL;
static AVStream *video_st=NULL;
static AVFormatContext *outAVFormatContext=NULL;
static AVCodec *outAVCodec=NULL;
static AVOutputFormat *output_format=NULL;
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
static AVCodecContext *outAVCodecContext=NULL;
static int width, height;
static enum AVPixelFormat pix_fmt;
static AVStream *video_stream = NULL, *audio_stream = NULL;
static const char *src_filename = NULL;
static const char *video_dst_filename = NULL;
static const char *audio_dst_filename = NULL;
static FILE *video_dst_file = NULL;
static FILE *audio_dst_file = NULL;
static uint8_t *video_dst_data[4] = {NULL};
static int video_dst_linesize[4];
static int video_dst_bufsize;
static int video_stream_idx = -1, audio_stream_idx = -1;
static AVPacket *pkt=NULL;
static AVPacket *pkt1=NULL;
static AVFrame *frame = NULL;
//static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;
static int refcount = 0;
AVCodec *codec;
static struct SwsContext *sws_ctx;
AVCodecContext *c= NULL;
int i, out_size, size, x, y, outbuf_size;
AVFrame *picture;
uint8_t *outbuf, *picture_buf;
int video_outbuf_size;
int w, h;
AVPixelFormat pixFmt;
uint8_t *data[4];
int linesize[4];
static int open_codec_context(int *stream_idx,
AVCodecContext **dec_ctx, AVFormatContext
*fmt_ctx, enum AVMediaType type)
{
int ret, stream_index;
AVStream *st;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
printf("Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret;
} else {
stream_index = ret;
st = fmt_ctx->streams[stream_index];
/* find decoder for the stream */
dec = avcodec_find_decoder(st->codecpar->codec_id);
if (!dec) {
printf("Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
}
/* Allocate a codec context for the decoder */
*dec_ctx = avcodec_alloc_context3(dec);
if (!*dec_ctx) {
printf("Failed to allocate the %s codec context\n",
av_get_media_type_string(type));
return AVERROR(ENOMEM);
}
/* Copy codec parameters from input stream to output codec context */
if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
printf("Failed to copy %s codec parameters to decoder context\n",
av_get_media_type_string(type));
return ret;
}
/* Init the decoders, with or without reference counting */
av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
printf("Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
*stream_idx = stream_index;
}
return 0;
}
int main (int argc, char **argv)
{
int ret = 0, got_frame;
src_filename = argv[1];
video_dst_filename = argv[2];
audio_dst_filename = argv[3];
av_register_all();
avcodec_register_all();
printf("Registered all\n");
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
printf("Could not open source file %s\n", src_filename);
exit(1);
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
printf("Could not find stream information\n");
exit(1);
}
if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx,
AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
avformat_alloc_output_context2(&outAVFormatContext, NULL, NULL,
video_dst_filename);
if (!outAVFormatContext)
{
printf("\n\nError : avformat_alloc_output_context2()");
return -1;
}
}
if (open_codec_context(&audio_stream_idx, &audio_dec_ctx, fmt_ctx,
AVMEDIA_TYPE_AUDIO) >= 0) {
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
printf("Could not open destination file %s\n", audio_dst_filename);
ret = 1;
goto end;
}
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (!audio_stream && !video_stream) {
printf("Could not find audio or video stream in the input, aborting\n");
ret = 1;
goto end;
}
output_format = av_guess_format(NULL, video_dst_filename, NULL);
if( !output_format )
{
printf("\n\nError : av_guess_format()");
return -1;
}
video_st = avformat_new_stream(outAVFormatContext ,NULL);
if( !video_st )
{
printf("\n\nError : avformat_new_stream()");
return -1;
}
outAVCodecContext = avcodec_alloc_context3(outAVCodec);
if( !outAVCodecContext )
{
printf("\n\nError : avcodec_alloc_context3()");
return -1;
}
outAVCodecContext = video_st->codec;
outAVCodecContext->codec_id = AV_CODEC_ID_MPEG4;// AV_CODEC_ID_MPEG4; //
AV_CODEC_ID_H264 // AV_CODEC_ID_MPEG1VIDEO
outAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
outAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
outAVCodecContext->bit_rate = 400000; // 2500000
outAVCodecContext->width = 1920;
//outAVCodecContext->width = 500;
outAVCodecContext->height = 1080;
//outAVCodecContext->height = 500;
outAVCodecContext->gop_size = 3;
outAVCodecContext->max_b_frames = 2;
outAVCodecContext->time_base.num = 1;
outAVCodecContext->time_base.den = 30; // 15fps
if (outAVCodecContext->codec_id == AV_CODEC_ID_H264)
{
av_opt_set(outAVCodecContext->priv_data, "preset", "slow", 0);
}
outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
if( !outAVCodec )
{
printf("\n\nError : avcodec_find_encoder()");
return -1;
}
/* Some container formats (like MP4) require global headers to be
present
Mark the encoder so that it behaves accordingly. */
if ( outAVFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
{
outAVCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
value = avcodec_open2(outAVCodecContext, outAVCodec, NULL);
if( value < 0)
{
printf("\n\nError : avcodec_open2()");
return -1;
}
/* create empty video file */
if ( !(outAVFormatContext->flags & AVFMT_NOFILE) )
{
if( avio_open2(&outAVFormatContext->pb , video_dst_filename,
AVIO_FLAG_WRITE ,NULL, NULL) < 0 )
{
printf("\n\nError : avio_open2()");
}
}
if(!outAVFormatContext->nb_streams)
{
printf("\n\nError : Output file dose not contain any stream");
return -1;
}
/* imp: mp4 container or some advanced container file required header
information*/
value = avformat_write_header(outAVFormatContext , NULL);
if(value < 0)
{
printf("\n\nError : avformat_write_header()");
return -1;
}
printf("\n\nOutput file information :\n\n");
av_dump_format(outAVFormatContext , 0 ,video_dst_filename ,1);
int flag;
int frameFinished;
value = 0;
pAVPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
av_init_packet(pAVPacket);
pAVFrame = av_frame_alloc();
if( !pAVFrame )
{
printf("\n\nError : av_frame_alloc()");
return -1;
}
outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to
default values.
if( !outFrame )
{
printf("\n\nError : av_frame_alloc()");
return -1;
}
nbytes = av_image_get_buffer_size(outAVCodecContext-
>pix_fmt,outAVCodecContext->width,outAVCodecContext->height,32);
video_outbuf = (uint8_t*)av_malloc(nbytes);
if( video_outbuf == NULL )
{
printf("\n\nError : av_malloc()");
}
value = av_image_fill_arrays( outFrame->data, outFrame->linesize,
video_outbuf , AV_PIX_FMT_YUV420P, outAVCodecContext-
>width,outAVCodecContext->height,1 ); // returns : the size in bytes
required for src
if(value < 0)
{
printf("\n\nError : av_image_fill_arrays()");
}
SwsContext* swsCtx_ ;
// Allocate and return swsContext.
// a pointer to an allocated context, or NULL in case of error
// Deprecated : Use sws_getCachedContext() instead.
swsCtx_ = sws_getContext(video_dec_ctx->width,
video_dec_ctx->height,
video_dec_ctx->pix_fmt,
video_dec_ctx->width,
video_dec_ctx->height,
video_dec_ctx->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
AVPacket outPacket;
int got_picture;
while( av_read_frame( fmt_ctx , pAVPacket ) >= 0 )
{
if(pAVPacket->stream_index == video_stream_idx)
{
value = avcodec_decode_video2(video_dec_ctx , pAVFrame ,
&frameFinished , pAVPacket );
if( value < 0)
{
printf("Error : avcodec_decode_video2()");
}
if(frameFinished)// Frame successfully decoded :)
{
sws_scale(swsCtx_, pAVFrame->data, pAVFrame-
>linesize,0, video_dec_ctx->height, outFrame->data,outFrame->linesize);
// sws_scale(swsCtx_, pAVFrame->data, pAVFrame-
>linesize,0, video_dec_ctx->height, outFrame->data,outFrame->linesize);
av_init_packet(&outPacket);
outPacket.data = NULL; // packet data will be
allocated by the encoder
outPacket.size = 0;
avcodec_encode_video2(outAVCodecContext ,
&outPacket ,outFrame , &got_picture);
if(got_picture)
{
if(outPacket.pts != AV_NOPTS_VALUE)
outPacket.pts =
av_rescale_q(outPacket.pts, video_st->codec->time_base, video_st-
>time_base);
if(outPacket.dts != AV_NOPTS_VALUE)
outPacket.dts =
av_rescale_q(outPacket.dts, video_st->codec->time_base, video_st-
>time_base);
printf("Write frame %3d (size= %2d)\n",
j++, outPacket.size/1000);
if(av_write_frame(outAVFormatContext ,
&outPacket) != 0)
{
printf("\n\nError :
av_write_frame()");
}
av_packet_unref(&outPacket);
} // got_picture
av_packet_unref(&outPacket);
} // frameFinished
}
}// End of while-loop
value = av_write_trailer(outAVFormatContext);
if( value < 0)
{
printf("\n\nError : av_write_trailer()");
}
//THIS WAS ADDED LATER
av_free(video_outbuf);
end:
avcodec_free_context(&video_dec_ctx);
avcodec_free_context(&audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
//av_frame_free(&frame);
av_free(video_dst_data[0]);
return ret < 0;
}
Problem with above code is that it rotates a video to left by 90 degree.
Snapshot of video given as input to above program
Snapshot of output video. It is rotated by 90 degree to left.
I compiled program using below command:
g++ -D__STDC_CONSTANT_MACROS -Wall -g ScreenRecorder.cpp -I/home/harry/Documents/compressor/ffmpeg-3.3/ -I/root/android-ndk-r14b/platforms/android-21/arch-x86_64/usr/include/ -c -o ScreenRecorder.o -w
And linked it using below command:
g++ -Wall -g ScreenRecorder.o -I/home/harry/Documents/compressor/ffmpeg-3.3/ -I/root/android-ndk-r14b/platforms/android-21/arch-x86_64/usr/include/ -L/usr/lib64 -L/lib64 -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7/ -L/home/harry/Documents/compressor/ffmpeg-3.3/ffmpeg-build -L/root/android-ndk-r14b/platforms/android-21/arch-x86_64/usr/lib64 -o ScreenRecorder.exe -lavformat -lavcodec -lavutil -lavdevice -lavfilter -lswscale -lx264 -lswresample -lm -lpthread -ldl -lstdc++ -lc -lrt
Program is being run using below command:
./ScreenRecorder.exe vertical.MOV videoH.mp4 audioH.mp3
Note:
- Source video is taken from iphone and is of .mov format.
- Output video is being stored in .mp4 file.
Can anyone please tell me why it is rotating video by 90 degree?
One thing i noticed in dump is shown below:
Duration: 00:00:06.04, start: 0.000000, bitrate: 17087 kb/s
Stream #0:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p(tv, bt709), 1920x1080, 17014 kb/s, 29.98 fps, 29.97 tbr, 600 tbn, 1200 tbc (default)
Metadata:
rotate : 90
creation_time : 2017-07-09T10:56:42.000000Z
handler_name : Core Media Data Handler
encoder : H.264
Side data:
displaymatrix: rotation of -90.00 degrees
it says displaymatrix: rotation of -90.00 degrees. Is it responsible for rotating video by 90 degree?
I solved it by assigning metadata of input stream as the metadata of output stream by adding:
outAVFormatContext->metadata = fmt_ctx->metadata
outAVFormatContext->streams[video_stream_idx]->metadata=fmt_ctx->streams[video_stream_idx]->metadata
I am trying to import video in javacv but cvCreateFileCapture("Filename") class is returning null value and I have also tried using OpenCVFrameGrabber("File name"), it is also not working properly.
Here is my code :
public class SmokeDetectionInStoredVideo {
private static final int IMG_SCALE = 2;
private static final int width = 640;
private static final int height = 480;
public static void main(String args[]){
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
CvMemStorage storage = CvMemStorage.create();
IplImage img1=null,imghsv=null,imgbin=null;
CvSeq contour1;
CvSeq contour2;
double areaMax = 00, areaC = 0.0;
imghsv = IplImage.create(width/IMG_SCALE, height/IMG_SCALE, 8, 3);
imgbin = IplImage.create(width/IMG_SCALE, height/IMG_SCALE, 8, 1);
try{
CvCapture capture = cvCreateFileCapture("Red.mp4");//cvCreateCameraCapture(0);
if(capture.isNull()){
System.out.println("Error reading file");
}
IplImage grabbedImage = cvQueryFrame(capture);
OpenCVFrameConverter.ToIplImage converter=new OpenCVFrameConverter.ToIplImage();
CanvasFrame canvasFrame = new CanvasFrame("Actual Video");
CanvasFrame canvasBinFrame = new CanvasFrame("Contour Video");
canvasFrame.setCanvasSize(grabbedImage.width(), grabbedImage.height());
canvasBinFrame.setCanvasSize(grabbedImage.width(), grabbedImage.height());
imghsv = IplImage.create(grabbedImage.width(), grabbedImage.height(), 8, 3);
imgbin = IplImage.create(grabbedImage.width(), grabbedImage.height(), 8, 1);
while (canvasFrame.isVisible() && (img1 = cvQueryFrame(capture)) != null) {
cvCvtColor(img1, imghsv, CV_RGB2HSV);
//canvasFrame.showImage(converter.convert(imghsv));
cvInRangeS(img1, cvScalar(220, 220, 220, 0), cvScalar(235, 235, 235, 0), imgbin);
contour1 = new CvSeq();
cvFindContours(imgbin, storage, contour1, Loader.sizeof(CvContour.class), CV_RETR_LIST, CV_LINK_RUNS);
contour2 = contour1;
while (contour1 != null && !contour1.isNull()) {
areaC = cvContourArea(contour1, CV_WHOLE_SEQ, 1);
if(areaC>areaMax){
areaMax = areaC;
}
contour1 = contour1.h_next();
}
while (contour2 != null && !contour2.isNull()){
areaC = cvContourArea(contour2, CV_WHOLE_SEQ, 1);
if(areaC>areaMax){
cvDrawContours(imgbin, contour1, CV_RGB(0, 0, 0), CV_RGB(0, 0, 0), 0, CV_FILLED, 8);
}
contour2 = contour2.h_next();
}
canvasBinFrame.showImage(converter.convert(imgbin));
canvasFrame.showImage(converter.convert(img1));
}
canvasFrame.dispose();
cvReleaseMemStorage(storage);
cvReleaseImage(img1);
cvReleaseImage(imgbin);
cvReleaseImage(imghsv);
}
catch (Exception e) {
System.out.println("Error while processing video");
// TODO: handle exception
}
}
}
Is there any other way of importing the video in javacv.
Are you sure that you have this file in your class path? Can you try the following to check if the file exists and show the output please?
File file = new File("Red.mp4");
System.out.println(file.exists());
I have searched for a way to retrieve information from a digital signed PE file. I need the publisher, publisher link , issuer name and subject name. I need winapi / c / c++ code (functions) and i need a fast method , i don't need to check if the signature is valid or not.
Here is code that I wrote for a project of mine that will do this. It returns the details in a struct of type NSIGINFO. Feel free to use it - no attribution necessary, but I would appreciate it if you would leave the copyright intact.
If there's any functions missing (I had to consolidate things from a couple of different places so I may have missed something) please let me know and I'll make the necessary tweaks.
Let me know how this works for you. Good luck.
The header file, NAuthenticode.h:
// NAuthenticode.h: Functions for checking signatures in files
//
// Copyright (c) 2008-2012, Nikolaos D. Bougalis <nikb#bougalis.net>
#ifndef B82FBB5B_C0F8_43A5_9A31_619BB690706C
#define B82FBB5B_C0F8_43A5_9A31_619BB690706C
#include <wintrust.h>
#include <softpub.h>
#include <imagehlp.h>
struct NSIGINFO
{
LONG lValidationResult;
LPTSTR lpszPublisher;
LPTSTR lpszPublisherEmail;
LPTSTR lpszPublisherUrl;
LPTSTR lpszAuthority;
LPTSTR lpszFriendlyName;
LPTSTR lpszProgramName;
LPTSTR lpszPublisherLink;
LPTSTR lpszMoreInfoLink;
LPTSTR lpszSignature;
LPTSTR lpszSerial;
BOOL bHasSigTime;
SYSTEMTIME stSigTime;
};
VOID NCertFreeSigInfo(NSIGINFO *pSigInfo);
BOOL NVerifyFileSignature(LPCTSTR lpszFileName, NSIGINFO *pSigInfo, HANDLE hHandle = INVALID_HANDLE_VALUE);
BOOL NCertGetNameString(PCCERT_CONTEXT pCertContext, DWORD dwType,
DWORD dwFlags, LPTSTR *lpszNameString);
BOOL NCheckFileCertificates(HANDLE hFile,
VOID (*pCallback)(PCCERT_CONTEXT, LPVOID), PVOID pParam);
#endif
The implementation, NAuthenticode.cpp:
// NAuthenticode.cpp: Various routines related to validating file signatures
//
// Copyright (c) 2008-2012, Nikolaos D. Bougalis <nikb#bougalis.net>
#include "stdafx.h"
#include "NAuthenticode.h"
//////////////////////////////////////////////////////////////////////////
#pragma comment(lib, "crypt32")
#pragma comment(lib, "imagehlp")
#pragma comment(lib, "wintrust")
//////////////////////////////////////////////////////////////////////////
#define SIG_ENCODING (X509_ASN_ENCODING | PKCS_7_ASN_ENCODING)
//////////////////////////////////////////////////////////////////////////
// Some utility functions
LPVOID NHeapAlloc(SIZE_T dwBytes)
{
if(dwBytes == 0)
return NULL;
return HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, dwBytes);
}
//////////////////////////////////////////////////////////////////////////
LPVOID NHeapFree(LPVOID lpMem)
{
if(lpMem != NULL)
HeapFree(GetProcessHeap(), 0, lpMem);
return NULL;
}
//////////////////////////////////////////////////////////////////////////
LPSTR NConvertW2A(LPCWSTR lpszString, int nLen, UINT nCodePage)
{
ASSERT(lpszString != NULL);
int ret = WideCharToMultiByte(nCodePage, 0, lpszString, nLen, NULL, 0, NULL, NULL);
if(ret <= 0)
return NULL;
LPSTR lpszOutString = (LPSTR)NHeapAlloc((ret + 1) * sizeof(CHAR));
if(lpszOutString == NULL)
return NULL;
ret = WideCharToMultiByte(nCodePage, 0, lpszString, nLen, lpszOutString, ret, NULL, NULL);
if(ret <= 0)
lpszOutString = (LPSTR)NHeapFree(lpszOutString);
return lpszOutString;
}
//////////////////////////////////////////////////////////////////////////
LPWSTR NDupString(LPCWSTR lpszString, int nLen)
{
if(nLen == -1)
nLen = (int)wcslen(lpszString);
LPWSTR lpszOutString = (LPWSTR)NHeapAlloc((2 + nLen) * sizeof(WCHAR));
if((lpszOutString != NULL) && (nLen != 0))
wcsncpy(lpszOutString, lpszString, nLen + 1);
return lpszOutString;
}
//////////////////////////////////////////////////////////////////////////
LPTSTR NConvertW2T(LPCWSTR lpszString, int nLen, UINT nCodePage)
{
ASSERT(lpszString != NULL);
#ifndef UNICODE
return (LPTSTR)NConvertW2A(lpszString, nLen, nCodePage);
#else
return (LPTSTR)NDupString(lpszString, nLen);
#endif
}
//////////////////////////////////////////////////////////////////////////
LPWSTR NConvertA2W(LPCSTR lpszString, int nLen, UINT nCodePage)
{
ASSERT(lpszString != NULL);
int ret = MultiByteToWideChar(nCodePage, 0, lpszString, nLen, NULL, 0);
if(ret <= 0)
return NULL;
LPWSTR lpszOutString = (LPWSTR)NHeapAlloc((ret + 1) * sizeof(WCHAR));
if(lpszOutString == NULL)
return NULL;
ret = MultiByteToWideChar(nCodePage, 0, lpszString, nLen, lpszOutString, ret);
if(ret <= 0)
lpszOutString = (LPWSTR)NHeapFree(lpszOutString);
return lpszOutString;
}
//////////////////////////////////////////////////////////////////////////
LPWSTR NConvertT2W(LPCTSTR lpszString, int nLen, UINT nCodePage)
{
ASSERT(lpszString != NULL);
#ifndef UNICODE
return NConvertA2W((LPCSTR)lpszString, nLen, nCodePage);
#else
return NDupString((LPWSTR)lpszString, nLen);
#endif
}
//////////////////////////////////////////////////////////////////////////
VOID NCertFreeSigInfo(NSIGINFO *pSigInfo)
{
if(pSigInfo == NULL)
return;
__try
{ // Be extra careful
if(pSigInfo->lpszPublisher)
pSigInfo->lpszPublisher = (LPTSTR)NHeapFree(pSigInfo->lpszPublisher);
if(pSigInfo->lpszPublisherEmail)
pSigInfo->lpszPublisherEmail = (LPTSTR)NHeapFree(pSigInfo->lpszPublisherEmail);
if(pSigInfo->lpszPublisherUrl)
pSigInfo->lpszPublisherUrl = (LPTSTR)NHeapFree(pSigInfo->lpszPublisherUrl);
if(pSigInfo->lpszAuthority)
pSigInfo->lpszAuthority = (LPTSTR)NHeapFree(pSigInfo->lpszAuthority);
if(pSigInfo->lpszProgramName)
pSigInfo->lpszProgramName = (LPTSTR)NHeapFree(pSigInfo->lpszPublisher);
if(pSigInfo->lpszPublisherLink)
pSigInfo->lpszPublisherLink = (LPTSTR)NHeapFree(pSigInfo->lpszPublisher);
if(pSigInfo->lpszMoreInfoLink)
pSigInfo->lpszMoreInfoLink = (LPTSTR)NHeapFree(pSigInfo->lpszMoreInfoLink);
if(pSigInfo->lpszSignature)
pSigInfo->lpszSignature = (LPTSTR)NHeapFree(pSigInfo->lpszSignature);
if(pSigInfo->lpszSerial)
pSigInfo->lpszSerial = (LPTSTR)NHeapFree(pSigInfo->lpszSerial);
}
__except(EXCEPTION_EXECUTE_HANDLER)
{
}
}
//////////////////////////////////////////////////////////////////////////
static BOOL NCertGetNameString(PCCERT_CONTEXT pCertContext, DWORD dwType, DWORD dwFlags, LPTSTR *lpszNameString)
{
if(pCertContext == NULL)
return FALSE;
DWORD dwData = CertGetNameString(pCertContext, dwType, 0, NULL, NULL, 0);
if(dwData == 0)
return FALSE;
*lpszNameString = (LPTSTR)NHeapAlloc((dwData + 1) * sizeof(TCHAR));
if(*lpszNameString == NULL)
return FALSE;
dwData = CertGetNameString(pCertContext, dwType, dwFlags, NULL, *lpszNameString, dwData);
if(dwData == 0)
{
NHeapFree(*lpszNameString);
return FALSE;
}
return TRUE;
}
//////////////////////////////////////////////////////////////////////////
static BOOL NCryptDecodeObject(__in LPCSTR lpszObjectId, __in_bcount(cbEncoded) const BYTE *pbEncoded, __in DWORD cbEncoded,
__inout DWORD &dwBuffer, __out void *pBuffer = NULL, __in DWORD dwFlags = 0)
{
if(((pBuffer == NULL) && (dwBuffer != 0)) || ((dwBuffer == 0) && (pBuffer != NULL)))
{ // What? You're passing a NULL pointer an a non-zero size? You so crazy!!!!
ASSERT(FALSE);
SetLastError(ERROR_INVALID_PARAMETER);
return FALSE;
}
return CryptDecodeObject(SIG_ENCODING, lpszObjectId, pbEncoded, cbEncoded, dwFlags, pBuffer, &dwBuffer);
}
//////////////////////////////////////////////////////////////////////////
static BOOL NCryptDecodeObject(__in LPCSTR lpszObjectId, __in PCRYPT_ATTR_BLOB pObject,
__inout DWORD &dwBuffer, __out void *pBuffer = NULL, __in DWORD dwFlags = 0)
{
if((pObject == NULL) || ((dwBuffer == 0) && (pBuffer != NULL)) || ((dwBuffer != 0) && (pBuffer == NULL)))
{
SetLastError(ERROR_INVALID_PARAMETER);
return FALSE;
}
return CryptDecodeObject(SIG_ENCODING, lpszObjectId, pObject->pbData, pObject->cbData, dwFlags, pBuffer, &dwBuffer);
}
//////////////////////////////////////////////////////////////////////////
static BOOL WGetSignTimestamp(PCRYPT_ATTRIBUTES pAttributes, SYSTEMTIME &stTime, LPCSTR lpszObjId)
{
if((pAttributes == NULL) || (pAttributes->cAttr == 0) || (lpszObjId == NULL) || (*lpszObjId == 0))
return FALSE;
for(DWORD dwAttr = 0; dwAttr < pAttributes->cAttr; dwAttr++)
{
if(strcmp(lpszObjId, pAttributes->rgAttr[dwAttr].pszObjId) == 0)
{
DWORD dwSize = sizeof(FILETIME);
FILETIME ftCert;
if(NCryptDecodeObject(lpszObjId, &pAttributes->rgAttr[dwAttr].rgValue[0], dwSize, (PVOID)&ftCert))
{
FILETIME ftLocal;
if(FileTimeToLocalFileTime(&ftCert, &ftLocal) && FileTimeToSystemTime(&ftLocal, &stTime))
return TRUE;
}
}
}
return FALSE;
}
//////////////////////////////////////////////////////////////////////////
static BOOL NVerifyFileSignatureWorker(LPWSTR lpszFileName, WINTRUST_DATA &wtData, NSIGINFO *pSigInfo)
{
if(pSigInfo != NULL)
memset(pSigInfo, 0, sizeof(NSIGINFO));
GUID guidAction = WINTRUST_ACTION_GENERIC_VERIFY_V2;
BOOL bVerified = FALSE;
LONG lRet = WinVerifyTrust((HWND)INVALID_HANDLE_VALUE, &guidAction, &wtData);
if(lRet != 0)
{
if(pSigInfo != NULL)
pSigInfo->lValidationResult = lRet;
return FALSE;
}
if(pSigInfo == NULL)
return TRUE;
HCERTSTORE hStore = NULL;
HCRYPTMSG hMsg = NULL;
if(!CryptQueryObject(CERT_QUERY_OBJECT_FILE, lpszFileName, CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED, CERT_QUERY_FORMAT_FLAG_BINARY, 0, NULL, NULL, NULL, &hStore, &hMsg, NULL))
return FALSE;
PCMSG_SIGNER_INFO pSignerInfo = NULL, pCounterSignerInfo = NULL;
DWORD dwSignerInfo = 0, dwCounterSignerInfo = 0;
if(CryptMsgGetParam(hMsg, CMSG_SIGNER_INFO_PARAM, 0, NULL, &dwSignerInfo) && (dwSignerInfo != 0))
pSignerInfo = (PCMSG_SIGNER_INFO)NHeapAlloc(dwSignerInfo);
if((pSignerInfo != NULL) && CryptMsgGetParam(hMsg, CMSG_SIGNER_INFO_PARAM, 0, (PVOID)pSignerInfo, &dwSignerInfo))
{
for(DWORD dwAttr = 0; dwAttr < pSignerInfo->AuthAttrs.cAttr; dwAttr++)
{
if((strcmp(SPC_SP_OPUS_INFO_OBJID, pSignerInfo->AuthAttrs.rgAttr[dwAttr].pszObjId) != 0))
continue;
PSPC_SP_OPUS_INFO pOpus = NULL;
DWORD dwData = 0;
if(NCryptDecodeObject(SPC_SP_OPUS_INFO_OBJID, &pSignerInfo->AuthAttrs.rgAttr[dwAttr].rgValue[0], dwData) && (dwData != 0))
pOpus = (PSPC_SP_OPUS_INFO)NHeapAlloc(dwData);
if((pOpus != NULL) && NCryptDecodeObject(SPC_SP_OPUS_INFO_OBJID, &pSignerInfo->AuthAttrs.rgAttr[dwAttr].rgValue[0], dwData, (PVOID)pOpus))
{
pSigInfo->lpszProgramName = NConvertW2T(pOpus->pwszProgramName);
if(pOpus->pPublisherInfo != NULL)
{
switch(pOpus->pPublisherInfo->dwLinkChoice)
{
case SPC_URL_LINK_CHOICE:
pSigInfo->lpszPublisherLink = NConvertW2T(pOpus->pPublisherInfo->pwszUrl);
break;
case SPC_FILE_LINK_CHOICE:
pSigInfo->lpszPublisherLink = NConvertW2T(pOpus->pPublisherInfo->pwszFile);
break;
}
}
if(pOpus->pMoreInfo != NULL)
{
switch (pOpus->pMoreInfo->dwLinkChoice)
{
case SPC_URL_LINK_CHOICE:
pSigInfo->lpszMoreInfoLink = NConvertW2T(pOpus->pMoreInfo->pwszUrl);
break;
case SPC_FILE_LINK_CHOICE:
pSigInfo->lpszMoreInfoLink = NConvertW2T(pOpus->pMoreInfo->pwszFile);
break;
}
}
}
if(pOpus != NULL)
NHeapFree(pOpus);
break;
}
CERT_INFO ci;
ci.Issuer = pSignerInfo->Issuer;
ci.SerialNumber = pSignerInfo->SerialNumber;
PCCERT_CONTEXT pCertContext = CertFindCertificateInStore(hStore, SIG_ENCODING, 0, CERT_FIND_SUBJECT_CERT, (PVOID)&ci, NULL);
if(pCertContext != NULL)
{
if(pCertContext->pCertInfo->SerialNumber.cbData != 0)
{
pSigInfo->lpszSerial = (LPTSTR)NHeapAlloc(((pCertContext->pCertInfo->SerialNumber.cbData * 2) + 1) * sizeof(TCHAR));
if(pSigInfo->lpszSerial != NULL)
{
LPTSTR lpszPointer = pSigInfo->lpszSerial;
for(DWORD dwCount = pCertContext->pCertInfo->SerialNumber.cbData; dwCount != 0; dwCount--)
lpszPointer += _stprintf(lpszPointer, _T("%02X"), pCertContext->pCertInfo->SerialNumber.pbData[dwCount - 1]);
}
}
if(!NCertGetNameString(pCertContext, CERT_NAME_FRIENDLY_DISPLAY_TYPE, CERT_NAME_ISSUER_FLAG, &pSigInfo->lpszFriendlyName))
pSigInfo->lpszFriendlyName = NULL;
if(!NCertGetNameString(pCertContext, CERT_NAME_SIMPLE_DISPLAY_TYPE, CERT_NAME_ISSUER_FLAG, &pSigInfo->lpszAuthority))
pSigInfo->lpszAuthority = NULL;
if(!NCertGetNameString(pCertContext, CERT_NAME_SIMPLE_DISPLAY_TYPE, 0, &pSigInfo->lpszPublisher))
pSigInfo->lpszPublisher = NULL;
if(!NCertGetNameString(pCertContext, CERT_NAME_URL_TYPE, 0, &pSigInfo->lpszPublisherUrl))
pSigInfo->lpszPublisherUrl = NULL;
if(!NCertGetNameString(pCertContext, CERT_NAME_EMAIL_TYPE, 0, &pSigInfo->lpszPublisherEmail))
pSigInfo->lpszPublisherEmail = NULL;
CertFreeCertificateContext(pCertContext);
}
for(DWORD dwAttr = 0, dwData; dwAttr < pSignerInfo->AuthAttrs.cAttr; dwAttr++)
{
if((strcmp(szOID_RSA_signingTime, pSignerInfo->AuthAttrs.rgAttr[dwAttr].pszObjId) == 0) && (pSignerInfo->AuthAttrs.rgAttr[dwAttr].cValue != 0))
{
FILETIME ftCert;
dwData = sizeof(FILETIME);
if(NCryptDecodeObject(szOID_RSA_signingTime, &pSignerInfo->AuthAttrs.rgAttr[dwAttr].rgValue[0], dwData, (PVOID)&ftCert))
{
FILETIME ftLocal;
if(!FileTimeToLocalFileTime(&ftCert, &ftLocal))
{
if(!FileTimeToSystemTime(&ftLocal, &pSigInfo->stSigTime))
memset(&pSigInfo->stSigTime, 0, sizeof(SYSTEMTIME));
}
}
}
}
for(DWORD dwAttr = 0; dwAttr < pSignerInfo->UnauthAttrs.cAttr; dwAttr++)
{
if(strcmp(pSignerInfo->UnauthAttrs.rgAttr[dwAttr].pszObjId, szOID_RSA_counterSign) == 0)
{
if(NCryptDecodeObject(PKCS7_SIGNER_INFO, &pSignerInfo->UnauthAttrs.rgAttr[dwAttr].rgValue[0], dwCounterSignerInfo) && (dwCounterSignerInfo != 0))
pCounterSignerInfo = (PCMSG_SIGNER_INFO)NHeapAlloc(dwCounterSignerInfo);
if((pCounterSignerInfo != NULL) && !NCryptDecodeObject(PKCS7_SIGNER_INFO, &pSignerInfo->UnauthAttrs.rgAttr[dwAttr].rgValue[0], dwCounterSignerInfo, pCounterSignerInfo))
pCounterSignerInfo = (PCMSG_SIGNER_INFO)NHeapFree(pCounterSignerInfo);
break;
}
}
if(pCounterSignerInfo != NULL)
{
pSigInfo->bHasSigTime = WGetSignTimestamp(&pCounterSignerInfo->AuthAttrs, pSigInfo->stSigTime, szOID_RSA_signingTime);
if(!pSigInfo->bHasSigTime)
memset(&pSigInfo->stSigTime, 0, sizeof(SYSTEMTIME));
}
}
if(pSignerInfo != NULL)
NHeapFree(pSignerInfo);
if(pCounterSignerInfo != NULL)
NHeapFree(pCounterSignerInfo);
if(hStore != NULL)
CertCloseStore(hStore, 0);
if(hMsg != NULL)
CryptMsgClose(hMsg);
return TRUE;
}
//////////////////////////////////////////////////////////////////////////
BOOL NVerifyFileSignature(LPCTSTR lpszFileName, NSIGINFO *pSigInfo, HANDLE hHandle)
{
if(pSigInfo != NULL)
memset(pSigInfo, 0, sizeof(NSIGINFO));
if(lpszFileName == NULL)
return FALSE;
if((lpszFileName[0] != 0) && (_tcsnicmp(lpszFileName, _T("\\??\\"), 4) == 0))
lpszFileName += 4;
if(lpszFileName[0] == 0)
return FALSE;
LPWSTR lpwszFileName = NConvertT2W(lpszFileName);
if(lpwszFileName == NULL)
return FALSE;
BOOL bOK = FALSE;
__try
{ // be very careful...
WINTRUST_FILE_INFO wtFileInfo;
memset(&wtFileInfo, 0, sizeof(WINTRUST_FILE_INFO));
wtFileInfo.cbStruct = sizeof(WINTRUST_FILE_INFO);
wtFileInfo.pcwszFilePath = lpwszFileName;
if(hHandle != INVALID_HANDLE_VALUE)
wtFileInfo.hFile = hHandle;
WINTRUST_DATA wtData;
memset(&wtData, 0, sizeof(WINTRUST_DATA));
wtData.cbStruct = sizeof(WINTRUST_DATA);
wtData.dwUIChoice = WTD_UI_NONE;
wtData.fdwRevocationChecks = WTD_REVOKE_WHOLECHAIN;
wtData.dwUnionChoice = WTD_CHOICE_FILE;
wtData.pFile = &wtFileInfo;
if(NVerifyFileSignatureWorker(lpwszFileName, wtData, pSigInfo))
bOK = TRUE;
}
__except(EXCEPTION_EXECUTE_HANDLER)
{
if(pSigInfo != NULL)
{
if(pSigInfo->lpszPublisher)
pSigInfo->lpszPublisher = (LPTSTR)NHeapFree(pSigInfo->lpszPublisher);
if(pSigInfo->lpszAuthority)
pSigInfo->lpszAuthority = (LPTSTR)NHeapFree(pSigInfo->lpszAuthority);
if(pSigInfo->lpszProgramName)
pSigInfo->lpszProgramName = (LPTSTR)NHeapFree(pSigInfo->lpszPublisher);
if(pSigInfo->lpszPublisherLink)
pSigInfo->lpszPublisherLink = (LPTSTR)NHeapFree(pSigInfo->lpszPublisher);
if(pSigInfo->lpszMoreInfoLink)
pSigInfo->lpszMoreInfoLink = (LPTSTR)NHeapFree(pSigInfo->lpszMoreInfoLink);
if(pSigInfo->lpszSignature)
pSigInfo->lpszSignature = (LPTSTR)NHeapFree(pSigInfo->lpszSignature);
if(pSigInfo->lpszSerial)
pSigInfo->lpszSerial = (LPTSTR)NHeapFree(pSigInfo->lpszSerial);
}
bOK = FALSE;
}
NHeapFree(lpwszFileName);
return bOK;
}
//////////////////////////////////////////////////////////////////////////
BOOL NCheckFileCertificates(HANDLE hFile, VOID (*pCallback)(PCCERT_CONTEXT, LPVOID), PVOID pParam)
{
DWORD dwCerts = 0;
if(!ImageEnumerateCertificates(hFile, CERT_SECTION_TYPE_ANY, &dwCerts, NULL, 0))
return FALSE;
for(DWORD dwCount = 0; dwCount < dwCerts; dwCount++)
{
WIN_CERTIFICATE wcHdr;
memset(&wcHdr, 0, sizeof(WIN_CERTIFICATE));
wcHdr.dwLength = 0;
wcHdr.wRevision = WIN_CERT_REVISION_1_0;
if(!ImageGetCertificateHeader(hFile, dwCount, &wcHdr))
return FALSE;
DWORD dwLen = sizeof(WIN_CERTIFICATE) + wcHdr.dwLength;
WIN_CERTIFICATE *pWinCert = (WIN_CERTIFICATE *)NHeapAlloc(dwLen);
if(pWinCert == NULL)
return FALSE;
if(!ImageGetCertificateData(hFile, dwCount, pWinCert, &dwLen))
{ // problem getting certificate, return failure
NHeapFree(pWinCert);
return FALSE;
}
// extract the PKCS7 signed data
CRYPT_VERIFY_MESSAGE_PARA cvmp;
memset(&cvmp, 0, sizeof(CRYPT_VERIFY_MESSAGE_PARA));
cvmp.cbSize = sizeof(CRYPT_VERIFY_MESSAGE_PARA);
cvmp.dwMsgAndCertEncodingType = SIG_ENCODING;
PCCERT_CONTEXT pCertContext = NULL;
if(!CryptVerifyMessageSignature(&cvmp, dwCount, pWinCert->bCertificate, pWinCert->dwLength, NULL, NULL, &pCertContext))
{
NHeapFree(pWinCert);
return FALSE;
}
// Now, pass this context on to our callback function (if any)
if(pCallback != NULL)
pCallback(pCertContext, pParam);
if(!CertFreeCertificateContext(pCertContext))
{
NHeapFree(pWinCert);
return FALSE;
}
NHeapFree(pWinCert);
}
return TRUE;
}
Microsoft provides a way to do it in this support link: How To Get Information from Authenticode Signed Executables
You can use the WinVerifyTrust() API to verify an Authenticode signed
executable.
Although a signature is verified, a program may also have to do the
following:
Determine the details of the certificate that signed the
executable.
Determine the date and time that the file was time
stamped.
Retrieve the URL link associated with the file.
Retrieve the timestamp certificate.
This article demonstrates how to use
CryptQueryObject() API to retrieve detailed information from an
Authenticode signed executable.