ADTF_DISPLAY_TOOLBOX  3.8.0 (ADTF 3.14.3)
Source Code for Demo 3D Video View Mixin
Location
./src/examples/src/videoviewmixin
This example shows:
  • how to create a screen on the scene
  • how to show a video on that screen
  • how to create a separate control to show properties of objects
  • how to offer a possibility to select (pick) an object
Header
#pragma once
#include <adtf_utils.h>
class cCamera
{
public:
adtf::util::cString m_strName;
tInt m_nResX;
tInt m_nResY;
tFloat64 m_fPosX, m_fPosY, m_fPosZ;
tFloat64 m_fRotX, m_fRotY, m_fRotZ;
tFloat64 m_fFocalX, m_fFocalY;
tFloat64 m_fSkew;
tFloat64 m_fPrincipalX, m_fPrincipalY;
tFloat64 m_fRadX, m_fRadY;
tFloat64 m_fTangX, m_fTangY;
public:
cCamera();
~cCamera();
tResult ParseConfig(const adtf::util::cFilename& strFile);
};
#pragma once
#include <osgGA/CameraManipulator>
class cCameraManipulator: public osgGA::CameraManipulator
{
protected:
osg::NodePath m_oNodePath;
osg::Matrixd m_oToADTFMatrix;
osg::Matrixd m_oFromADTFMatrix;
public:
cCameraManipulator();
void setTrackNode(osg::Node* node);
void setByMatrix(const osg::Matrixd& matrix);
void setByInverseMatrix(const osg::Matrixd& matrix);
osg::Matrixd getMatrix() const;
osg::Matrixd getInverseMatrix() const;
protected:
void computeNodeCenterAndRotation(osg::Vec3d& center, osg::Quat& rotation) const;
};
#pragma once
#include <adtf_utils.h>
#include <osg/Drawable>
class cGCLDrawable: public osg::Drawable
{
protected:
adtf::util::cMemoryBlock m_oCommands;
mutable adtf::disptb::graphicslib::cGLCanvas m_oCanvas;
tInt m_nWidth;
tInt m_nHeight;
mutable tBool m_bInitialized;
public:
cGCLDrawable(tInt nWidth, tInt nHeight);
virtual Object* cloneType() const { return new cGCLDrawable(0, 0); }
virtual Object* clone(const osg::CopyOp& /*copyop*/) const { return new cGCLDrawable(m_nWidth, m_nHeight); }
tResult UpdateCommands(const tVoid* pCommands, tInt nSize);
virtual void drawImplementation(osg::RenderInfo& /*renderInfo*/) const;
};
Graphics command language processing class.
Definition: gcl.h:64
Copyright © Audi Electronics Venture GmbH.
Copyright © Audi Electronics Venture GmbH.
#pragma once
#include <adtf_utils.h>
#define MEDIA_TYPE_PROJECTIONSTRUCT (MEDIA_TYPE_USER + 0x2712)
#define MEDIA_SUBTYPE_CAMERA_PROJECTION 0x0001
struct tCameraProjection
{
tInt32 nSize;
tUInt32 nCounter;
tFloat32 f32PosX;
tFloat32 f32PosY;
tFloat32 f32PosZ;
tFloat32 f32RotX;
tFloat32 f32RotY;
tFloat32 f32RotZ;
tFloat32 f32PrincipalX;
tFloat32 f32PrincipalY;
tFloat32 f32FocalX;
tFloat32 f32FocalY;
};
#pragma once
#include <adtf_utils.h>
namespace videoviewmixin
{
struct stream_meta_type_camera_calibration
{
static constexpr const tChar* const MetaTypeName = "videoviewmixin/camera_calibration";
};
}
#pragma once
#include "camera.h"
#include "gcldrawable.h"
#include "projection_types.h"
#include <osg/Camera>
#include <osgViewer/Viewer>
#include <osgGA/GUIEventHandler>
#include <osgGA/CameraManipulator>
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <cmath>
#ifndef M_PI
namespace
{
const double M_PI = std::acos(-1.0);
const double M_PI_2 = M_PI/2.0;
}
#endif
using namespace adtf::ucom;
using namespace adtf::base;
using namespace adtf::streaming;
using namespace adtf::filter;
using namespace adtf::disptb::mixinlib;
using namespace adtf::disptb::graphicslib;
class cVideoViewMixin;
struct cExtDrawCallback : virtual public osg::Camera::DrawCallback
{
cVideoViewMixin* m_pMixin;
osg::ref_ptr<DrawCallback> m_pPrevious;
cExtDrawCallback() {}
cExtDrawCallback(cVideoViewMixin* pMixin, osg::Camera::DrawCallback* pPrevious): m_pMixin(pMixin), m_pPrevious(pPrevious) {}
cExtDrawCallback(const osg::Camera::DrawCallback&, const osg::CopyOp&) {}
META_Object(scene3d, cExtDrawCallback)
virtual void operator () (osg::RenderInfo& renderInfo) const;
virtual void operator () (const osg::Camera& /*camera*/) const {}
};
class cVideoViewMixin : public cMixin,
public osgGA::GUIEventHandler
{
friend struct cExtDrawCallback;
public:
ADTF_CLASS_ID_NAME(cVideoViewMixin, "demo_video_view.3d_mixin.disptb.cid", "Demo 3D Video View Mixin");
public:
protected:
property_variable<tBool> m_bShowCameraSymbol = tTrue;
property_variable<tBool> m_bKeepAspect = tTrue;
property_variable<adtf::util::cFilename> m_strCameraConfigFile;
property_variable<tFloat32> m_fVideoScreenDistance = 20.0;
//@todo dynamic
property_variable<tFloat64> m_fFarClipping = 1000.0;
//Injected DrawCallback
osg::ref_ptr<cExtDrawCallback> m_pExtDrawCallback;
struct tVideo
{
ISampleReader* pVideoReader;
ISampleReader* pCalibrationReader;
ISampleReader* pGCLReader;
osg::ref_ptr<osg::PositionAttitudeTransform> pTransform;
tBitmapFormat sFormat;
osg::ref_ptr<osg::Image> pImage;
GLenum nPixelType;
GLint nBitmapFormatInternal;
GLenum nBitmapFormat;
object_ptr<const ISample> pCurSample;
cImage oConvertImg;
osg::ref_ptr<osg::Geode> pCam;
osg::ref_ptr<osg::Geode> pVideo;
osg::ref_ptr<osg::Geometry> pVideoGeometry;
osg::ref_ptr<osg::StateSet> pVideoState;
tBool bShowVideo;
tBool bCameraView;
tFloat64 fScreenDist;
osg::Vec3d vPosition;
cCamera oCamera;
tBool bUseGCL = tFalse;
osg::ref_ptr<cGCLDrawable> pGCL;
osg::ref_ptr<osg::Camera> pGCLCam;
tInt nId;
tBool bConvertToRGB;
tBool bFormatChanged;
tUInt32 ui32VideoChannel;
tUInt32 ui32CalibChannel;
tUInt32 ui32GCLChannel;
};
tVideo m_sVideo;
osgViewer::Viewer* m_pViewer = nullptr;
object_ptr<IViewer> m_pIViewer;
osg::ref_ptr<osgGA::CameraManipulator> m_pOldManip;
osg::Matrixd m_oOldProjectionMatrix;
tBool m_bUseFBO = tFalse;
tBool m_bSceneReady = tFalse;
public:
cVideoViewMixin();
~cVideoViewMixin() override;
tResult InitScene() override;
tVoid ClearScene() override;
tResult ProcessInput(ISampleReader* pReader, const iobject_ptr<const ISample>& pSample) override;
tResult AcceptType(ISampleReader* pReader, const iobject_ptr<const IStreamType>& pType) override;
tResult AddMenuItemForPick(IMenu& oMenu, const tNodePath& sNodePath) override;
tResult AddGlobalMenuItem(IMenu& oMenu) override;
tResult HandleMenuEvent(const tChar* strMenuText, tVoid* pvEventData) override;
protected:
tResult SetUpCamera();
tResult Calibrate(const tCameraProjection* pCalib);
tResult UpdateCamAndScreen();
tResult UpdateGCLCam(tInt nVW, tInt nVH);
tResult ShowVideo(tBool bShow);
tResult SetCameraView(tBool bCameraView);
tResult CreateVideoWall();
public:
virtual bool handle(const osgGA::GUIEventAdapter& ea, osgGA::GUIActionAdapter& aa, osg::Object*, osg::NodeVisitor*);
};
Copyright © Audi Electronics Venture GmbH.
Copyright © Audi Electronics Venture GmbH.
Namespace for functionality provided by the Mixin Library.
Definition: baseobject.h:23
Struct to specifie a bitmap.
Copyright © Audi Electronics Venture GmbH.
Implementation
#include "camera.h"
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <cmath>
#ifndef M_PI
namespace
{
const double M_PI = std::acos(-1.0);
const double M_PI_2 = M_PI/2.0;
}
#endif
#ifndef DEG2RAD
#define DEG2RAD(angle) ((angle)*M_PI/180.0)
#endif
#ifndef RAD2DEG
#define RAD2DEG(angle) ((angle)*180.0/M_PI)
#endif
using namespace adtf::util;
cCamera::cCamera()
{
m_nResX = 0;
m_nResY = 0;
m_fPosX = m_fPosY = 0.0;
m_fPosZ = 1.5;
m_fRotX = m_fRotY = m_fRotZ = 0.0;
m_fFocalX = m_fFocalY = 0.0;
m_fPrincipalX = 0.0;
m_fPrincipalY = 0.0;
m_fRadX = m_fRadY = 0.0;
m_fTangX = m_fTangY = 0.0;
m_fSkew = 0.0;
}
cCamera::~ cCamera()
{
}
tResult cCamera::ParseConfig(const cFilename & strFile)
{
cFile oFile;
RETURN_IF_FAILED(oFile.Open(strFile, cFile::OM_Read));
cRegularExpression oExpRes(".*Resolution:\\s+([0-9]+)\\s*x\\s*([0-9]+).*", tFalse);
cRegularExpression oExpPos(".*Position:\\s+\\(\\s*([-0-9]*[.,][0-9]+)\\s*([-0-9]*[.,][0-9]+)\\s*([-0-9]*[.,][0-9]+)\\s*\\).*", tFalse);
cRegularExpression oExpRotWithUnit(".*Rotation:\\s+\\(\\s*([-0-9]*[.,][0-9]+)"
"\\s*([-0-9]*[.,][0-9]+)\\s*([-0-9]*[.,][0-9]+)\\s*\\)"
".*\\[(.*)\\].*", tFalse);
cRegularExpression oExpRot(".*Rotation:\\s+\\(\\s*([-0-9]*[.,][0-9]+)"
"\\s*([-0-9]*[.,][0-9]+)\\s*([-0-9]*[.,][0-9]+)\\s*\\)",
tFalse);
cRegularExpression oExpFoc(".*Focal length:\\s+\\(\\s*([-0-9]*[.,][0-9]+)\\s*([-0-9]*[.,][0-9]+)\\s*\\).*", tFalse);
cRegularExpression oExpSkew(".*Skew parameter:\\s+([-0-9]*[.,][0-9]+).*", tFalse);
cRegularExpression oExpPrin(".*Principal point:\\s+\\(\\s*([-0-9]*[.,][0-9]+)\\s*([-0-9]*[.,][0-9]+)\\s*\\).*", tFalse);
cRegularExpression oExpRad(".*Radial distortion:\\s+\\(\\s*([-0-9]*[.,][0-9]+)\\s*([-0-9]*[.,][0-9]+)\\s*\\).*", tFalse);
cRegularExpression oExpTan(".*Tangential distortion:\\s+\\(\\s*([-0-9]*[.,][0-9]+)\\s*([-0-9]*[.,][0-9]+)\\s*\\).*", tFalse);
cString strLine;
while (!oFile.IsEof())
{
oFile.ReadLine(strLine);
tInt nX, nY;
cString strUnit;
cString strX, strY, strZ;
if (oExpRes.FullMatch(strLine, &nX, &nY))
{
m_nResX = nX;
m_nResY = nY;
}
else if (oExpPos.FullMatch(strLine, &strX, &strY, &strZ))
{
m_fPosX = cString(strX).AsFloat64();
m_fPosY = cString(strY).AsFloat64();
m_fPosZ = cString(strZ).AsFloat64();
}
else if (oExpRotWithUnit.FullMatch(strLine, &strX, &strY, &strZ, &strUnit))
{
strUnit.ToLower();
if(strUnit != "rad")
{
// the default interpretation is grad
m_fRotX = DEG2RAD(cString(strX).AsFloat64());
m_fRotY = DEG2RAD(cString(strY).AsFloat64());
m_fRotZ = DEG2RAD(cString(strZ).AsFloat64());
}
else
{
// interpretation is rad
m_fRotX = cString(strX).AsFloat64();
m_fRotY = cString(strY).AsFloat64();
m_fRotZ = cString(strZ).AsFloat64();
}
}
else if (oExpRot.FullMatch(strLine, &strX, &strY, &strZ))
{
// the default interpretation is grad
m_fRotX = DEG2RAD(cString(strX).AsFloat64());
m_fRotY = DEG2RAD(cString(strY).AsFloat64());
m_fRotZ = DEG2RAD(cString(strZ).AsFloat64());
}
else if (oExpFoc.FullMatch(strLine, &strX, &strY))
{
m_fFocalX = cString(strX).AsFloat64();
m_fFocalY = cString(strY).AsFloat64();
}
else if (oExpSkew.FullMatch(strLine, &strX))
{
m_fSkew = cString(strX).AsFloat64();
}
else if (oExpPrin.FullMatch(strLine, &strX, &strY))
{
m_fPrincipalX = cString(strX).AsFloat64();
m_fPrincipalY = cString(strY).AsFloat64();
}
else if (oExpRad.FullMatch(strLine, &strX, &strY))
{
m_fRadX = cString(strX).AsFloat64();
m_fRadY = cString(strY).AsFloat64();
}
else if (oExpTan.FullMatch(strLine, &strX, &strY))
{
m_fTangX = cString(strX).AsFloat64();
m_fTangY = cString(strY).AsFloat64();
}
}
oFile.Close();
RETURN_NOERROR;
}
#include "cameramanipulator.h"
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <cmath>
#ifndef M_PI
namespace
{
const double M_PI = std::acos(-1.0);
const double M_PI_2 = M_PI/2.0;
}
#endif
cCameraManipulator::cCameraManipulator()
{
// the 3dScene coordinate system differs from the default osg system in the way that the x-axis points into the screen
// and the z-axis upwards
m_oToADTFMatrix = osg::Matrixd::rotate(M_PI_2, osg::Vec3(0.0, 0.0, 1.0),
0.0, osg::Vec3(0.0, 1.0, 0.0),
-M_PI_2, osg::Vec3(1.0, 0.0, 0.0));
m_oFromADTFMatrix = osg::Matrixd::inverse(m_oToADTFMatrix);
}
void cCameraManipulator::setTrackNode(osg::Node* node)
{
osg::NodePathList nodePaths = node->getParentalNodePaths();
if (!nodePaths.empty())
{
m_oNodePath = nodePaths.front();
}
}
void cCameraManipulator::computeNodeCenterAndRotation(osg::Vec3d& nodeCenter, osg::Quat& nodeRotation) const
{
osg::Matrixd localToWorld = osg::computeLocalToWorld(m_oNodePath);
nodeCenter = osg::Vec3d(m_oNodePath.back()->getBound().center())*localToWorld;
// scale the matrix to get rid of any scales before we extract the rotation.
double sx = 1.0/sqrt(localToWorld(0,0)*localToWorld(0,0) + localToWorld(1,0)*localToWorld(1,0) + localToWorld(2,0)*localToWorld(2,0));
double sy = 1.0/sqrt(localToWorld(0,1)*localToWorld(0,1) + localToWorld(1,1)*localToWorld(1,1) + localToWorld(2,1)*localToWorld(2,1));
double sz = 1.0/sqrt(localToWorld(0,2)*localToWorld(0,2) + localToWorld(1,2)*localToWorld(1,2) + localToWorld(2,2)*localToWorld(2,2));
localToWorld = localToWorld * osg::Matrixd::scale(sx,sy,sz);
nodeRotation = localToWorld.getRotate();
}
void cCameraManipulator::setByMatrix(const osg::Matrixd& /*matrix*/)
{
}
void cCameraManipulator::setByInverseMatrix(const osg::Matrixd& /*matrix*/)
{
}
osg::Matrixd cCameraManipulator::getMatrix() const
{
osg::Vec3d nodeCenter;
osg::Quat nodeRotation;
computeNodeCenterAndRotation(nodeCenter, nodeRotation);
return m_oFromADTFMatrix * osg::Matrixd::rotate(90, osg::Vec3(1.0, 1.0, 0.0)) * osg::Matrixd::rotate(nodeRotation) * osg::Matrix::translate(nodeCenter);
}
osg::Matrixd cCameraManipulator::getInverseMatrix() const
{
osg::Vec3d nodeCenter;
osg::Quat nodeRotation;
computeNodeCenterAndRotation(nodeCenter, nodeRotation);
return osg::Matrixd::translate(-nodeCenter) * osg::Matrixd::rotate(nodeRotation.inverse()) * m_oToADTFMatrix;
}
#include "gcldrawable.h"
using namespace adtf::disptb::graphicslib;
#define GCL_FONT_NAME "Arial"
#define GCL_FONT_SIZE 12
cGCLDrawable::cGCLDrawable(tInt nWidth, tInt nHeight)
{
m_nWidth = nWidth;
m_nHeight = nHeight;
m_oGCL.Create();
m_oCanvas.Create(nWidth, nHeight, 24);
m_bInitialized = tFalse;
setUseDisplayList(false);
}
tResult cGCLDrawable::UpdateCommands(const tVoid* pCommands, tInt nSize)
{
return m_oCommands.Set(pCommands, nSize);
}
void cGCLDrawable::drawImplementation(osg::RenderInfo& /*renderInfo*/) const
{
if (!m_bInitialized)
{
m_oCanvas.Font(m_oCanvas.CreateFont(GCL_FONT_NAME, GCL_FONT_SIZE,
m_bInitialized = tTrue;
}
cRect oOutputRect(0, 0, m_nWidth, m_nHeight);
m_oGCL.Process(&m_oCanvas, m_oCommands.GetPtr(), m_oCommands.GetSize(), m_nWidth, m_nHeight, oOutputRect);
}
#include "videoviewmixin.h"
#include "cameramanipulator.h"
#include "streammetatypecameracalibration.h"
#include <osg/FrameBufferObject>
#include <osg/GLExtensions>
#include <osg/PositionAttitudeTransform>
#include <osg/ShapeDrawable>
#include <osg/Texture2D>
#include <osg/Depth>
#define MIN(a, b) ((a) < (b) ? (a) : (b))
ADTF_PLUGIN_VERSION("Demo 3D Video View Mixin Plugin",
disptb,
DISPTB_VERSION_MAJOR,
DISPTB_VERSION_MINOR,
DISPTB_VERSION_PATCH,
cVideoViewMixin)
cVideoViewMixin::cVideoViewMixin()
{
m_bShowCameraSymbol.SetDescription("If enabled, camera will be added.");
RegisterPropertyVariable("show_camera_symbol", m_bShowCameraSymbol);
m_bKeepAspect.SetDescription("If enabled, aspect ratio will be kept.");
RegisterPropertyVariable("camera_window_aspect", m_bKeepAspect);
m_fFarClipping.SetDescription("Clipping value.");
RegisterPropertyVariable("camera_view_clipping_distance", m_fFarClipping);
m_strCameraConfigFile.SetDescription("Configuration settings for the camera.");
RegisterPropertyVariable("camera_config_file", m_strCameraConfigFile);
m_fVideoScreenDistance.SetDescription("Video screen distance.");
RegisterPropertyVariable("video_screen_distance", m_fVideoScreenDistance);
m_sVideo.pVideoReader = CreateInputPin("video", stream_meta_type_image());
adtf::streaming::set_description(*this, "video", "Input pin for the video data.");
m_sVideo.pCalibrationReader = CreateInputPin("calib", videoviewmixin::stream_meta_type_camera_calibration());
adtf::streaming::set_description(*this, "calib", "Input pin for camera calibration data.");
m_sVideo.pGCLReader = CreateInputPin("gcl", stream_meta_type_gcl());
adtf::streaming::set_description(*this, "gcl", "Input pin for GCL commands.");
SetDescription("Use this Mixin to create a canvas for showing a video in the scene.");
adtf::streaming::set_help_link(*this, "$(ADTF_DISPLAY_TOOLBOX_DIR)/doc/displaytoolbox_html/page_3d_videoview_mixin_example_readme.html");
ref();
}
cVideoViewMixin::~cVideoViewMixin()
{
unref_nodelete();
}
tResult cVideoViewMixin::InitScene()
{
RETURN_IF_FAILED(cMixin::InitScene());
RETURN_IF_FAILED(GetSceneGraph().GetObject(m_pIViewer));
m_pViewer = m_pIViewer->GetOsgViewer();
// these have already been initialized by cMixin
m_bUseFBO = osg::GLExtensions::Get(m_pIViewer->GetContextId(), true)->isFrameBufferObjectSupported;
if (m_bUseFBO)
{
LOG_INFO("using FBOs for rendering to texture in GCL mode");
}
else
{
LOG_INFO("using framebuffer for rendering to texture in GCL mode");
}
m_sVideo.bCameraView = tFalse;
m_sVideo.bShowVideo = tTrue;
m_sVideo.sFormat =
{
320,
240,
8,
IImage::PF_GREYSCALE_8,
320,
320 * 240,
0
};
CreateVideoWall();
m_pViewer->addEventHandler(this);
m_pExtDrawCallback = new cExtDrawCallback(this, m_pViewer->getCamera()->getInitialDrawCallback());
m_pViewer->getCamera()->setInitialDrawCallback(m_pExtDrawCallback);
m_bSceneReady = tTrue;
RETURN_NOERROR;
}
tVoid cVideoViewMixin::ClearScene()
{
if (GetRoot())
{
GetRoot()->removeChildren(0, GetRoot()->getNumChildren());
}
//Make sure to defuse the ExtDrawCallback
if (m_pExtDrawCallback)
{
m_pExtDrawCallback->m_pPrevious = nullptr;
}
if (m_pViewer)
{
m_pViewer->removeEventHandler(this);
}
}
tResult cVideoViewMixin::CreateVideoWall()
{
m_sVideo.bFormatChanged = tFalse;
tBool bUseGCL = m_sVideo.bUseGCL;
tBool bOldShowVideo = m_sVideo.bShowVideo;
tBool bOldCameraView = m_sVideo.bCameraView;
if (bOldCameraView)
{
SetCameraView(tFalse);
}
// clean up
GetRoot()->removeChild(m_sVideo.pTransform);
m_sVideo.pTransform = nullptr;
m_sVideo.pCam = nullptr;
m_sVideo.pGCL = nullptr;
m_sVideo.pGCLCam = nullptr;
m_sVideo.pImage = nullptr;
m_sVideo.pVideo = nullptr;
m_sVideo.pVideoGeometry = nullptr;
m_sVideo.pVideoState = nullptr;
// (re)build
// get camera config
m_sVideo.oCamera.m_nResX = 0.0;
if (m_strCameraConfigFile->IsNotEmpty())
{
RETURN_IF_FAILED(m_sVideo.oCamera.ParseConfig(m_strCameraConfigFile));
}
m_sVideo.bConvertToRGB = tFalse;
if (m_sVideo.sFormat.nPixelFormat == cImage::PF_UNKNOWN)
{
LOG_WARNING("%s: the input pixel format is unkown, trying to guess it.");
m_sVideo.sFormat.nPixelFormat = cImage::GuessPixelFormat(&m_sVideo.sFormat);
if (m_sVideo.sFormat.nPixelFormat == cImage::PF_UNKNOWN)
{
LOG_WARNING("Unable to guess pixel format, waiting for media type change.");
RETURN_NOERROR;
}
}
if (m_sVideo.sFormat.nPixelFormat == cImage::PF_YUV420P_888 &&
m_sVideo.sFormat.nBitsPerPixel == 24)
{
m_sVideo.bConvertToRGB = tTrue;
m_sVideo.nPixelType = GL_UNSIGNED_BYTE;
m_sVideo.nBitmapFormat = GL_BGR;
m_sVideo.nBitmapFormatInternal = GL_RGB;
}
else
{
if (IS_FAILED(cGLTexture::CalcOpenGLTextureFormat(&m_sVideo.sFormat,
&m_sVideo.nPixelType,
&m_sVideo.nBitmapFormat,
&m_sVideo.nBitmapFormatInternal)))
{
LOG_WARNING("Unsupported PixelFormat %d with %d bits per pixel, waiting for media type change.", m_sVideo.sFormat.nPixelFormat, m_sVideo.sFormat.nBitsPerPixel);
RETURN_NOERROR;
}
}
m_sVideo.pImage = new osg::Image;
//Add a Buffer to the Image for Texture reuse (see OSG Doku)
m_sVideo.pImage->setPixelBufferObject(new osg::PixelBufferObject(m_sVideo.pImage.get()));
//set up default camera if neccessary
if (m_sVideo.oCamera.m_nResX == 0)
{
m_sVideo.oCamera.m_nResX = m_sVideo.sFormat.nWidth;
m_sVideo.oCamera.m_nResY = m_sVideo.sFormat.nHeight;
m_sVideo.oCamera.m_fPrincipalX = m_sVideo.oCamera.m_nResX / 2.0;
m_sVideo.oCamera.m_fPrincipalY = m_sVideo.oCamera.m_nResY / 2.0;
m_sVideo.oCamera.m_fFocalX = m_sVideo.oCamera.m_fFocalY = m_sVideo.oCamera.m_nResX;
}
//now create our subgraph
m_sVideo.pTransform = new osg::PositionAttitudeTransform;
GetRoot()->addChild(m_sVideo.pTransform);
// add a node for camera tracking
m_sVideo.pTransform->addChild(new osg::Node);
//now add a little camera
{
m_sVideo.pCam = new osg::Geode;
if (m_bShowCameraSymbol)
{
m_sVideo.pTransform->addChild(m_sVideo.pCam.get());
}
osg::Vec4 oColor(1.0f, 0.0f, 0.0f, 1.0f);
osg::Box* pCamBox = new osg::Box(osg::Vec3(0, 0, 0), 1.0f);
osg::ShapeDrawable* pCamBoxDrawable = new osg::ShapeDrawable(pCamBox);
pCamBoxDrawable->setColor(oColor);
m_sVideo.pCam->addDrawable(pCamBoxDrawable);
osg::Cone* pCamCone = new osg::Cone(osg::Vec3(0.75f, 0, 0), 0.6f, 1.0f);
pCamCone->setRotation(osg::Quat(-M_PI_2, osg::Vec3(0, 1, 0)));
osg::ShapeDrawable* pCamConeDrawable = new osg::ShapeDrawable(pCamCone);
pCamConeDrawable->setColor(oColor);
m_sVideo.pCam->addDrawable(pCamConeDrawable);
}
// add our tv
{
m_sVideo.bCameraView = tFalse;
m_sVideo.bShowVideo = tTrue;
m_sVideo.pVideo = new osg::Geode;
m_sVideo.pTransform->addChild(m_sVideo.pVideo.get());
m_sVideo.pVideoGeometry = new osg::Geometry;
m_sVideo.pVideo->addDrawable(m_sVideo.pVideoGeometry.get());
osg::Vec4Array* pColors = new osg::Vec4Array;
osg::Vec3Array* pNormals = new osg::Vec3Array;
m_sVideo.fScreenDist = m_fVideoScreenDistance;
pNormals->push_back(osg::Vec3(-1, 0, 0));
//bright white
pColors->push_back(osg::Vec4(1, 1, 1, 1));
m_sVideo.pVideoGeometry->addPrimitiveSet(new osg::DrawArrays(osg::PrimitiveSet::QUADS, 0, 4));
m_sVideo.pVideoGeometry->setColorArray(pColors, osg::Array::BIND_PER_PRIMITIVE_SET);
m_sVideo.pVideoGeometry->setNormalArray(pNormals, osg::Array::BIND_PER_PRIMITIVE_SET);
UpdateCamAndScreen();
m_sVideo.pVideoState = new osg::StateSet();
m_sVideo.pVideoState->setMode(GL_LIGHTING, osg::StateAttribute::OFF);
// Associate this state set with the Geode that contains
// the tv:
m_sVideo.pVideo->setStateSet(m_sVideo.pVideoState.get());
}
// create texture
{
// create video texture
osg::Texture2D* pVideoTexture;
pVideoTexture = new osg::Texture2D;
pVideoTexture->setDataVariance(osg::Object::DYNAMIC);
pVideoTexture->setImage(m_sVideo.pImage.get());
pVideoTexture->setResizeNonPowerOfTwoHint(tFalse);
// In GCL Mode we need no mipmapping when using FBOs
if (bUseGCL && m_bUseFBO)
{
// this would speed up the default osg::Texture2D, but with subload
// callback we get the speedup even for non power of 2
// textures + mipmapping (if supported by hardware)
pVideoTexture->setFilter(osg::Texture::MIN_FILTER, osg::Texture::NEAREST);
}
tInt nWidth = m_sVideo.sFormat.nWidth;
tInt nHeight = m_sVideo.sFormat.nHeight;
osg::Viewport* pViewport = m_pViewer->getCamera()->getViewport();
tInt nVW = pViewport->width();
tInt nVH = pViewport->height();
// if we use GCL we render the video plus the GCL objects to a texture first
// and then render it to the screen
if (bUseGCL)
{
// create texture to render to
osg::Texture2D* pCamTex = new osg::Texture2D;
pCamTex->setTextureSize(nWidth, nHeight);
pCamTex->setInternalFormat(GL_RGBA);
// create camera which will render our video plus the gcl stuff
m_sVideo.pGCLCam = new osg::Camera;
m_sVideo.pGCLCam->setClearColor(osg::Vec4(0.0, 0.0, 0.0, 1.0));
m_sVideo.pGCLCam->setProjectionMatrixAsOrtho2D(0, nWidth, nHeight, 0); //(0,0) top left
m_sVideo.pGCLCam->setReferenceFrame(osg::Transform::ABSOLUTE_RF);
m_sVideo.pGCLCam->setViewport(0, 0, nWidth, nHeight);
m_sVideo.pGCLCam->setRenderOrder(osg::Camera::PRE_RENDER);
//fallback is framebuffer, but this requires the framebuffer to have at minimum the size of the texture otherwise
//the scene will be clipped (a view matrix that scales the scene apropriatly is set up in UpdateGCLCam.
if (m_bUseFBO)
{
m_sVideo.pGCLCam->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT);
}
m_sVideo.pGCLCam->attach(osg::Camera::COLOR_BUFFER, pCamTex, 0, 0, true);
//add video quad
{
osg::Geode* pVideoGeode = new osg::Geode;
m_sVideo.pGCLCam->addChild(pVideoGeode);
osg::Geometry* pGeom = new osg::Geometry;
pVideoGeode->addDrawable(pGeom);
osg::StateSet* pState = new osg::StateSet;
pState->setMode(GL_LIGHTING, osg::StateAttribute::OFF);
pState->setMode(GL_CULL_FACE, osg::StateAttribute::OFF);
pVideoGeode->setStateSet(pState);
osg::Vec2Array* pCoords = new osg::Vec2Array;
pCoords->push_back(osg::Vec2(0, 0));
pCoords->push_back(osg::Vec2(0, m_sVideo.sFormat.nHeight));
pCoords->push_back(osg::Vec2(m_sVideo.sFormat.nWidth, m_sVideo.sFormat.nHeight));
pCoords->push_back(osg::Vec2(m_sVideo.sFormat.nWidth, 0));
pGeom->setVertexArray(pCoords);
osg::Vec4Array* pColors = new osg::Vec4Array;
pColors->push_back(osg::Vec4d(1.0, 1.0, 1.0, 1.0));
pGeom->setColorArray(pColors, osg::Array::BIND_OVERALL);
pGeom->addPrimitiveSet(new osg::DrawArrays(osg::PrimitiveSet::QUADS, 0, 4));
pState->setTextureAttributeAndModes(0, pVideoTexture, osg::StateAttribute::ON); // uses our video texture
osg::Vec2Array* pVidTexCoords = new osg::Vec2Array(4);
tFloat64 fX = m_sVideo.sFormat.nWidth / (tFloat64) nWidth;
tFloat64 fY = m_sVideo.sFormat.nHeight / (tFloat64) nHeight;
(*pVidTexCoords)[1].set(0.0, fY);
(*pVidTexCoords)[0].set(0.0, 0.0);
(*pVidTexCoords)[3].set(fX, 0.0);
(*pVidTexCoords)[2].set(fX, fY);
pGeom->setTexCoordArray(0, pVidTexCoords);
}
//add gcl node which uses cGLCanvas in its draw routine
{
osg::Geode* pGCLGeode = new osg::Geode;
m_sVideo.pGCLCam->addChild(pGCLGeode);
m_sVideo.pGCL = new cGCLDrawable(m_sVideo.sFormat.nWidth, m_sVideo.sFormat.nHeight);
pGCLGeode->addDrawable(m_sVideo.pGCL.get());
osg::StateSet* pState = new osg::StateSet;
pState->setMode(GL_LIGHTING, osg::StateAttribute::OFF);
pState->setMode(GL_CULL_FACE, osg::StateAttribute::OFF);
pState->setMode(GL_BLEND, osg::StateAttribute::ON);
pState->setAttribute(new osg::Depth(osg::Depth::ALWAYS), osg::StateAttribute::ON);
pGCLGeode->setStateSet(pState);
}
// add camera to scene
m_sVideo.pTransform->addChild(m_sVideo.pGCLCam.get());
// set the generated texture as the texture for the screen
m_sVideo.pVideoState->setTextureAttributeAndModes(0, pCamTex, osg::StateAttribute::ON);
if (m_bUseFBO)
{
UpdateGCLCam(nWidth, nHeight);
}
else
{
UpdateGCLCam(nVW, nVH);
}
}
else
{
// use the video texture directly for the screen
m_sVideo.pVideoState->setTextureAttributeAndModes(0, pVideoTexture, osg::StateAttribute::ON);
//set texture coordinates
osg::Vec2Array* pTexCoords = new osg::Vec2Array(4);
tFloat64 fX = m_sVideo.sFormat.nWidth / (tFloat64) nWidth;
tFloat64 fY = m_sVideo.sFormat.nHeight / (tFloat64) nHeight;
(*pTexCoords)[0].set(fX, 0.0);
(*pTexCoords)[1].set(fX, fY);
(*pTexCoords)[2].set(0.0, fY);
(*pTexCoords)[3].set(0.0, 0.0);
m_sVideo.pVideoGeometry->setTexCoordArray(0, pTexCoords);
}
}
ShowVideo(bOldShowVideo);
SetCameraView(bOldCameraView);
RETURN_NOERROR;
}
tResult cVideoViewMixin::ProcessInput(ISampleReader* pReader, const iobject_ptr<const ISample>& pSample)
{
if (m_sVideo.bFormatChanged)
{
RETURN_NOERROR;
}
if (pReader == m_sVideo.pVideoReader && m_sVideo.pImage)
{
sample_data<tUInt8> pImageData(pSample);
auto pSrcBuffer = pImageData.GetDataPtr();
if (m_sVideo.bConvertToRGB)
{
m_sVideo.oConvertImg.Attach(const_cast<tUInt8*>(pSrcBuffer), &m_sVideo.sFormat);
RETURN_IF_FAILED(m_sVideo.oConvertImg.ChangePixelFormat(cImage::PF_RGB_888));
pSrcBuffer = m_sVideo.oConvertImg.GetBitmap();
}
m_sVideo.pImage->setImage(m_sVideo.sFormat.nWidth,
m_sVideo.sFormat.nHeight,
1,
m_sVideo.nBitmapFormatInternal,
m_sVideo.nBitmapFormat,
m_sVideo.nPixelType,
const_cast<tUInt8*>(pSrcBuffer),
osg::Image::NO_DELETE,
4);
m_sVideo.pCurSample = pSample; //do not free data until next call
}
else if (pReader == m_sVideo.pCalibrationReader)
{
sample_data<tCameraProjection> oProjection(pSample);
Calibrate(oProjection.GetDataPtr());
}
else if (pReader == m_sVideo.pGCLReader)
{
if (!m_sVideo.bUseGCL)
{
m_sVideo.bUseGCL = tTrue;
m_sVideo.bFormatChanged = tTrue;
}
else
{
sample_data<tUInt8> oData(pSample);
m_sVideo.pGCL->UpdateCommands(oData.GetDataPtr(), oData.GetDataSize());
}
}
RETURN_NOERROR;
}
tResult cVideoViewMixin::AddMenuItemForPick(IMenu& oMenu, const tNodePath& sNodePath)
{
RETURN_IF_FAILED(cMixin::AddMenuItemForPick(oMenu, sNodePath));
oMenu.AddSeparator();
auto pVideoMenu = oMenu.AddMenu("Video");
auto pMenuItem = pVideoMenu->AddMenuItem("Show Video");
RETURN_IF_POINTER_NULL(pMenuItem);
pMenuItem->SetCheckable(tTrue);
pMenuItem->SetChecked(m_sVideo.bShowVideo);
pMenuItem->SetEventHandler(this);
if (!m_sVideo.bCameraView)
{
pVideoMenu->AddMenuItem("Camera View")->SetEventHandler(this);
}
RETURN_NOERROR;
}
tResult cVideoViewMixin::AddGlobalMenuItem(IMenu& oMenu)
{
RETURN_IF_FAILED(cMixin::AddGlobalMenuItem(oMenu));
if (m_pOldManip.get())
{
oMenu.AddMenuItem("Reset Camera")->SetEventHandler(this);
}
if (m_sVideo.bShowVideo)
{
oMenu.AddMenuItem("Hide Video")->SetEventHandler(this);
}
else
{
oMenu.AddMenuItem("Show Video")->SetEventHandler(this);
}
if (m_bShowCameraSymbol)
{
oMenu.AddMenuItem("Hide Camera Symbols")->SetEventHandler(this);
}
else
{
oMenu.AddMenuItem("Show Camera Symbols")->SetEventHandler(this);
}
RETURN_NOERROR;
}
#define IF_FORMAT(__stream_type_format, __image_format)\
if (sFormat.m_strFormatName == __stream_type_format::FormatName)\
{\
sBitmapFormat.nPixelFormat = __image_format;\
}
#define ELSE_IF_FORMAT(__stream_type_format, __image_format) else IF_FORMAT(__stream_type_format, __image_format)
tBitmapFormat get_bitmap_format_from_image_format(const tStreamImageFormat& sFormat)
{
tBitmapFormat sBitmapFormat;
sBitmapFormat.nSize = sFormat.m_szMaxByteSize;
sBitmapFormat.nWidth = sFormat.m_ui32Width;
sBitmapFormat.nHeight = sFormat.m_ui32Height;
sBitmapFormat.nPaletteSize = 0;
sBitmapFormat.nBitsPerPixel = stream_image_format_get_generic_pixel_size(sFormat);
sBitmapFormat.nBytesPerLine = sBitmapFormat.nWidth / (sBitmapFormat.nBitsPerPixel / 8);
IF_FORMAT(stream_image_format::GREYSCALE_8, IImage::PF_GREYSCALE_8)
ELSE_IF_FORMAT(stream_image_format::GREYSCALE_16, IImage::PF_GREYSCALE_16)
ELSE_IF_FORMAT(stream_image_format::GREYSCALE_24, IImage::PF_GREYSCALE_24)
ELSE_IF_FORMAT(stream_image_format::GREYSCALE_32, IImage::PF_GREYSCALE_32)
ELSE_IF_FORMAT(stream_image_format::RGB_8, IImage::PF_RGB_8)
ELSE_IF_FORMAT(stream_image_format::RGB_24, IImage::PF_RGB_888)
ELSE_IF_FORMAT(stream_image_format::RGB_32, IImage::PF_RGBA_8888)
ELSE_IF_FORMAT(stream_image_format::RGB_555, IImage::PF_RGB_555)
ELSE_IF_FORMAT(stream_image_format::RGB_565, IImage::PF_RGB_565)
ELSE_IF_FORMAT(stream_image_format::BGR_24, IImage::PF_BGR_888)
ELSE_IF_FORMAT(stream_image_format::BGR_32, IImage::PF_BGRA_8888)
ELSE_IF_FORMAT(stream_image_format::ABGR_32, IImage::PF_ABGR_8888)
ELSE_IF_FORMAT(stream_image_format::ARGB_32, IImage::PF_ARGB_8888)
ELSE_IF_FORMAT(stream_image_format::BGRA_32, IImage::PF_BGRA_8888)
ELSE_IF_FORMAT(stream_image_format::RGBA_32, IImage::PF_RGBA_8888)
ELSE_IF_FORMAT(stream_image_format::YUV420P, IImage::PF_YUV420P_888)
return sBitmapFormat;
}
tResult cVideoViewMixin::AcceptType(ISampleReader* pReader, const iobject_ptr<const IStreamType>& pType)
{
RETURN_IF_FAILED(cMixin::AcceptType(pReader, pType));
if (pReader == m_sVideo.pVideoReader)
{
tStreamImageFormat sFormat;
RETURN_IF_FAILED(adtf::streaming::get_stream_type_image_format(sFormat, *pType.Get()));
if (sFormat == tStreamImageFormat())
{
RETURN_ERROR_DESC(ERR_INVALID_TYPE, "Default image format is not accepted. Waiting for full image format.");
}
m_sVideo.sFormat = get_bitmap_format_from_image_format(sFormat);
m_sVideo.bFormatChanged = tTrue;
}
RETURN_NOERROR;
}
tResult cVideoViewMixin::HandleMenuEvent(const tChar* strMenuText, tVoid* /*pvEventData*/)
{
std::string strHelper(strMenuText);
m_pIViewer->Lock();
if (strHelper == "Camera View")
{
SetCameraView(tTrue);
}
else if (strHelper == "Reset Camera")
{
SetCameraView(tFalse);
}
else if (strHelper == "Hide Camera Symbols")
{
m_bShowCameraSymbol = tFalse;
m_sVideo.pTransform->removeChild(m_sVideo.pCam.get());
}
else if (strHelper == "Show Camera Symbols")
{
m_bShowCameraSymbol = tTrue;
m_sVideo.pTransform->addChild(m_sVideo.pCam.get());
}
else if (strHelper == "Hide Video")
{
ShowVideo(tFalse);
}
else if (strHelper == "Show Video")
{
ShowVideo(tTrue);
}
m_pIViewer->Unlock();
RETURN_NOERROR;
}
tResult cVideoViewMixin::SetUpCamera()
{
tFloat64 fZNear = 1.0;
tFloat64 fZFar = m_fFarClipping;
if (!m_bKeepAspect)
{
// Init projection matrix (original from scene video display)
tFloat fQ = fZFar / (fZFar -fZNear);
osg::Matrixd oMatrix;
tFloat64* pMat = oMatrix.ptr();
pMat[0] = 2.0 * m_sVideo.oCamera.m_fFocalX / m_sVideo.oCamera.m_nResX;
pMat[1] = 0.0;
pMat[2] = 0.0;
pMat[3] = 0.0;
pMat[4] = 0.0;
pMat[5] = 2.0 * m_sVideo.oCamera.m_fFocalY / m_sVideo.oCamera.m_nResY;
pMat[6] = 0.0;
pMat[7] = 0.0;
pMat[8] = -2.0 * ((tFloat64) (m_sVideo.oCamera.m_fPrincipalX) / (m_sVideo.oCamera.m_nResX) - 0.5);
pMat[9] = 2.0 * ((tFloat64) m_sVideo.oCamera.m_fPrincipalY / (m_sVideo.oCamera.m_nResY) - 0.5);
pMat[10] = fQ;
pMat[11] = -1.0;
pMat[12] = 0.0;
pMat[13] = 0.0;
pMat[14] = -fQ * fZNear;
pMat[15] = 0.0;
m_pViewer->getCamera()->setProjectionMatrix(oMatrix);
}
else
{
//keep aspect ratio of the window
osg::Viewport* pViewport = m_pViewer->getCamera()->getViewport();
tFloat64 fAspect = pViewport->width() / pViewport->height();
tFloat fQ = fZFar / (fZFar -fZNear);
// Init projection matrix (original from scene video display)
osg::Matrixd oMatrix;
tFloat64* pMat = oMatrix.ptr();
pMat[0] = 2.0 * m_sVideo.oCamera.m_fFocalX / (m_sVideo.oCamera.m_nResY * fAspect);
pMat[1] = 0.0;
pMat[2] = 0.0;
pMat[3] = 0.0;
pMat[4] = 0.0;
pMat[5] = 2.0 * m_sVideo.oCamera.m_fFocalY / m_sVideo.oCamera.m_nResY;
pMat[6] = 0.0;
pMat[7] = 0.0;
pMat[8] = -2.0 * ((tFloat64) (m_sVideo.oCamera.m_fPrincipalX) / (m_sVideo.oCamera.m_nResX) - 0.5);
pMat[9] = 2.0 * ((tFloat64) m_sVideo.oCamera.m_fPrincipalY / (m_sVideo.oCamera.m_nResY) - 0.5);
pMat[10] = fQ;
pMat[11] = -1.0;
pMat[12] = 0.0;
pMat[13] = 0.0;
pMat[14] = -fQ * fZNear;
pMat[15] = 0.0;
m_pViewer->getCamera()->setProjectionMatrix(oMatrix);
}
m_sVideo.bCameraView = tTrue;
RETURN_NOERROR;
}
tResult cVideoViewMixin::Calibrate(const tCameraProjection* pCalib)
{
m_sVideo.oCamera.m_fFocalX = pCalib->f32FocalX;
m_sVideo.oCamera.m_fFocalY = pCalib->f32FocalY;
m_sVideo.oCamera.m_fPosX = pCalib->f32PosX;
m_sVideo.oCamera.m_fPosY = pCalib->f32PosY;
m_sVideo.oCamera.m_fPosZ = pCalib->f32PosZ;
m_sVideo.oCamera.m_fPrincipalX = pCalib->f32PrincipalX;
m_sVideo.oCamera.m_fPrincipalY = pCalib->f32PrincipalY;
m_sVideo.oCamera.m_fRotX = pCalib->f32RotX;
m_sVideo.oCamera.m_fRotY = pCalib->f32RotY;
m_sVideo.oCamera.m_fRotZ = pCalib->f32RotZ;
m_sVideo.oCamera.m_nResX = m_sVideo.sFormat.nWidth;
m_sVideo.oCamera.m_nResY = m_sVideo.sFormat.nHeight;
UpdateCamAndScreen();
if (m_sVideo.bCameraView)
{
SetUpCamera();
}
RETURN_NOERROR;
}
tResult cVideoViewMixin::UpdateCamAndScreen()
{
tFloat64 fScreenDist = m_sVideo.bCameraView ? m_fFarClipping * 0.95 : m_sVideo.fScreenDist;
m_sVideo.vPosition.x() = m_sVideo.oCamera.m_fPosX;
m_sVideo.vPosition.y() = m_sVideo.oCamera.m_fPosY;
m_sVideo.vPosition.z() = m_sVideo.oCamera.m_fPosZ;
m_sVideo.pTransform->setPosition(m_sVideo.vPosition);
m_sVideo.pTransform->setAttitude(osg::Quat(m_sVideo.oCamera.m_fRotX, osg::Vec3(1, 0, 0),
m_sVideo.oCamera.m_fRotZ, osg::Vec3(0, 0, 1),
m_sVideo.oCamera.m_fRotY, osg::Vec3(0, 1, 0)));
//calculate screen position
tFloat64 fDown = m_sVideo.oCamera.m_nResY - m_sVideo.oCamera.m_fPrincipalY;
tFloat64 fUp = m_sVideo.oCamera.m_nResY - fDown;
tFloat64 fUpV = fUp / m_sVideo.oCamera.m_fFocalY;
tFloat64 fDownV = fDown / m_sVideo.oCamera.m_fFocalY;
tFloat64 fRight = m_sVideo.oCamera.m_nResX - m_sVideo.oCamera.m_fPrincipalX;
tFloat64 fLeft = m_sVideo.oCamera.m_nResX - fRight;
tFloat64 fRightV = fRight / m_sVideo.oCamera.m_fFocalX;
tFloat64 fLeftV = fLeft / m_sVideo.oCamera.m_fFocalX;
osg::Vec3Array* pCoords = new osg::Vec3Array;
pCoords->push_back(osg::Vec3(fScreenDist,
fScreenDist * -fRightV,
fScreenDist * fUpV));
pCoords->push_back(osg::Vec3(fScreenDist,
fScreenDist * -fRightV,
fScreenDist * -fDownV));
pCoords->push_back(osg::Vec3(fScreenDist,
fScreenDist * fLeftV,
fScreenDist * -fDownV));
pCoords->push_back(osg::Vec3(fScreenDist,
fScreenDist * fLeftV,
fScreenDist * fUpV));
m_sVideo.pVideoGeometry->setVertexArray(pCoords);
m_sVideo.pVideoGeometry->dirtyDisplayList();
RETURN_NOERROR;
}
bool cVideoViewMixin::handle(const osgGA::GUIEventAdapter & ea, osgGA::GUIActionAdapter & /*aa*/, osg::Object *, osg::NodeVisitor *)
{
if (ea.getEventType() & osgGA::GUIEventAdapter::KEYDOWN)
{
m_pIViewer->Lock();
tBool bSetup = tTrue;
switch (ea.getKey())
{
// case 'a' : sVideo.fScreenModY += 0.05; break;
// case 'd' : sVideo.fScreenModY -= 0.05; break;
// case 'w' : sVideo.fScreenModZ += 0.05; break;
// case 'x' : sVideo.fScreenModZ -= 0.05; break;
//
// case 'q' : sVideo.fScreenPosY += 0.05; break;
// case 'y' : sVideo.fScreenPosY -= 0.05; break;
//
// case 'e' : sVideo.fScreenPosX += 0.05; break;
// case 'c' : sVideo.fScreenPosX -= 0.05; break;
//
// case 'r' : sVideo.oCamera.m_fFocalY += 50;break;
// case 'v' : sVideo.oCamera.m_fFocalY -= 50;break;
//
// case 't' : sVideo.fScreenModX += 0.5; break;
// case 'b' : sVideo.fScreenModX -= 0.5; break;
//
// case 'z' : sVideo.oCamera.m_fRotY += 0.002; break;
// case 'h' : sVideo.oCamera.m_fRotY -= 0.002; break;
// case 'u' : sVideo.oCamera.m_fRotX += 0.002; break;
// case 'j' : sVideo.oCamera.m_fRotX -= 0.002; break;
// case 'i' : sVideo.oCamera.m_fRotZ += 0.002; break;
// case 'k' : sVideo.oCamera.m_fRotZ -= 0.002; break;
case 'p' :
{
SetCameraView(tTrue);
}
break;
default:
bSetup = tFalse;
break;
}
if (bSetup)
{
UpdateCamAndScreen();
SetUpCamera();
}
m_pIViewer->Unlock();
}
if (ea.getEventType() & osgGA::GUIEventAdapter::RESIZE)
{
if (m_sVideo.pGCLCam.get())
{
UpdateGCLCam(ea.getWindowWidth(), ea.getWindowHeight());
}
}
return false;
}
tResult cVideoViewMixin::UpdateGCLCam(tInt nVW, tInt nVH)
{
tInt nWidth = m_sVideo.sFormat.nWidth;
tInt nHeight = m_sVideo.sFormat.nHeight;
osg::Matrixd oVMatrix;
if (nVW < nWidth && m_sVideo.sFormat.nWidth > nVW)
{
oVMatrix(0,0) = nVW / (tFloat64) m_sVideo.sFormat.nWidth; //scale down to fb size
}
if (nVH < nHeight && m_sVideo.sFormat.nHeight > nVH)
{
oVMatrix(1,1) = nVH / (tFloat64) m_sVideo.sFormat.nHeight; //scale down to fb size
}
//move it down (FB region is in the lower left
oVMatrix(3,1) = nHeight - MIN(nVH, m_sVideo.sFormat.nHeight) < 0 ? 0 : nHeight - MIN(nVH, m_sVideo.sFormat.nHeight);
m_sVideo.pGCLCam->setViewMatrix(oVMatrix);
osg::Vec2Array* pTexCoords = new osg::Vec2Array(4);
tFloat64 fX = MIN(nVW, m_sVideo.sFormat.nWidth) / (tFloat64) nWidth;
tFloat64 fY = MIN(nVH, m_sVideo.sFormat.nHeight) / (tFloat64) nHeight;
(*pTexCoords)[0].set(fX, fY);
(*pTexCoords)[1].set(fX, 0.0);
(*pTexCoords)[2].set(0.0, 0.0);
(*pTexCoords)[3].set(0.0, fY);
m_sVideo.pVideoGeometry->setTexCoordArray(0, pTexCoords);
RETURN_NOERROR;
}
tResult cVideoViewMixin::ShowVideo(tBool bShow)
{
if (bShow == m_sVideo.bShowVideo)
{
RETURN_NOERROR;
}
if (!bShow)
{
m_sVideo.pTransform->removeChild(m_sVideo.pVideo.get());
m_sVideo.pTransform->removeChild(m_sVideo.pGCLCam.get());
}
else
{
if (m_sVideo.pGCLCam.get())
{
m_sVideo.pTransform->addChild(m_sVideo.pGCLCam.get());
}
m_sVideo.pTransform->addChild(m_sVideo.pVideo.get());
}
m_sVideo.bShowVideo = bShow;
RETURN_NOERROR;
}
tResult cVideoViewMixin::SetCameraView(tBool bCameraView)
{
if (bCameraView == m_sVideo.bCameraView)
{
RETURN_NOERROR;
}
m_sVideo.bCameraView = bCameraView;
if (!bCameraView) // restore normal camera
{
if (m_pOldManip.get())
{
m_pViewer->getCamera()->setProjectionMatrix(m_oOldProjectionMatrix);
if (m_bShowCameraSymbol)
{
m_sVideo.pTransform->addChild(m_sVideo.pCam.get());
}
m_pViewer->setCameraManipulator(m_pOldManip.get());
m_pOldManip.release();
}
}
else
{
if (!m_pOldManip.get())
{
m_pOldManip = m_pViewer->getCameraManipulator();
m_oOldProjectionMatrix = m_pViewer->getCamera()->getProjectionMatrix();
auto pTracker = new cCameraManipulator;
pTracker->setTrackNode(m_sVideo.pTransform->getChild(0));
m_pViewer->setCameraManipulator(pTracker);
}
m_sVideo.pTransform->removeChild(m_sVideo.pCam.get());
SetUpCamera();
}
RETURN_NOERROR;
}
void cExtDrawCallback::operator () (osg::RenderInfo& renderInfo) const
{
if (m_pMixin->m_sVideo.bFormatChanged)
{
m_pMixin->CreateVideoWall();
}
if (m_pPrevious)
{
(*m_pPrevious)(renderInfo);
}
}
Copyright © Audi Electronics Venture GmbH.
tInt32 nWidth
Specifies the width (in pixels) of the image.
tInt16 nBitsPerPixel
Specifies the number of bits used to represent the color of a single pixel.
tInt32 nSize
Size of bitmap in bytes (nBytesPerLine * nHeight)
tInt32 nHeight
Specifies the width (in pixels) of the image.
tInt32 nBytesPerLine
Specifies the number of bytes used per line (nWidth * nBitsPerPixel / 8 + n PaddingBytes) normally th...
tInt32 nPaletteSize
Size of colour palette.