Merge pull request #864 from erwincoumans/master

add pybullet getCameraImage, replacing renderImage, cleaner API:
This commit is contained in:
erwincoumans 2016-11-17 18:18:10 -08:00 committed by GitHub
commit d9ebe381e9
11 changed files with 664 additions and 261 deletions

Binary file not shown.

View File

@ -1232,209 +1232,244 @@ void b3RequestCameraImageSetCameraMatrices(b3SharedMemoryCommandHandle commandHa
command->m_updateFlags |= REQUEST_PIXEL_ARGS_HAS_CAMERA_MATRICES;
}
void b3RequestCameraImageSetLightDirection(b3SharedMemoryCommandHandle commandHandle, const float lightDirection[3])
{
struct SharedMemoryCommand* command = (struct SharedMemoryCommand*) commandHandle;
b3Assert(command);
b3Assert(command->m_type == CMD_REQUEST_CAMERA_IMAGE_DATA);
for (int i = 0; i<3; i++)
{
command->m_requestPixelDataArguments.m_lightDirection[i] = lightDirection[i];
}
command->m_updateFlags |= REQUEST_PIXEL_ARGS_SET_LIGHT_DIRECTION;
}
void b3ComputeViewMatrixFromPositions(const float cameraPosition[3], const float cameraTargetPosition[3], const float cameraUp[3], float viewMatrix[16])
{
b3Vector3 eye = b3MakeVector3(cameraPosition[0], cameraPosition[1], cameraPosition[2]);
b3Vector3 center = b3MakeVector3(cameraTargetPosition[0], cameraTargetPosition[1], cameraTargetPosition[2]);
b3Vector3 up = b3MakeVector3(cameraUp[0], cameraUp[1], cameraUp[2]);
b3Vector3 f = (center - eye).normalized();
b3Vector3 u = up.normalized();
b3Vector3 s = (f.cross(u)).normalized();
u = s.cross(f);
viewMatrix[0 * 4 + 0] = s.x;
viewMatrix[1 * 4 + 0] = s.y;
viewMatrix[2 * 4 + 0] = s.z;
viewMatrix[0 * 4 + 1] = u.x;
viewMatrix[1 * 4 + 1] = u.y;
viewMatrix[2 * 4 + 1] = u.z;
viewMatrix[0 * 4 + 2] = -f.x;
viewMatrix[1 * 4 + 2] = -f.y;
viewMatrix[2 * 4 + 2] = -f.z;
viewMatrix[0 * 4 + 3] = 0.f;
viewMatrix[1 * 4 + 3] = 0.f;
viewMatrix[2 * 4 + 3] = 0.f;
viewMatrix[3 * 4 + 0] = -s.dot(eye);
viewMatrix[3 * 4 + 1] = -u.dot(eye);
viewMatrix[3 * 4 + 2] = f.dot(eye);
viewMatrix[3 * 4 + 3] = 1.f;
}
void b3ComputeViewMatrixFromYawPitchRoll(const float cameraTargetPosition[3], float distance, float yaw, float pitch, float roll, int upAxis, float viewMatrix[16])
{
b3Vector3 camUpVector;
b3Vector3 camForward;
b3Vector3 camPos;
b3Vector3 camTargetPos = b3MakeVector3(cameraTargetPosition[0], cameraTargetPosition[1], cameraTargetPosition[2]);
b3Vector3 eyePos = b3MakeVector3(0, 0, 0);
int forwardAxis(-1);
{
switch (upAxis)
{
case 1:
{
forwardAxis = 0;
eyePos[forwardAxis] = -distance;
camForward = b3MakeVector3(eyePos[0], eyePos[1], eyePos[2]);
if (camForward.length2() < B3_EPSILON)
{
camForward.setValue(1.f, 0.f, 0.f);
}
else
{
camForward.normalize();
}
b3Scalar rollRad = roll * b3Scalar(0.01745329251994329547);
b3Quaternion rollRot(camForward, rollRad);
camUpVector = b3QuatRotate(rollRot, b3MakeVector3(0, 1, 0));
//gLightPos = b3MakeVector3(-50.f,100,30);
break;
}
case 2:
{
forwardAxis = 1;
eyePos[forwardAxis] = -distance;
camForward = b3MakeVector3(eyePos[0], eyePos[1], eyePos[2]);
if (camForward.length2() < B3_EPSILON)
{
camForward.setValue(1.f, 0.f, 0.f);
}
else
{
camForward.normalize();
}
b3Scalar rollRad = roll * b3Scalar(0.01745329251994329547);
b3Quaternion rollRot(camForward, rollRad);
camUpVector = b3QuatRotate(rollRot, b3MakeVector3(0, 0, 1));
//gLightPos = b3MakeVector3(-50.f,30,100);
break;
}
default:
{
//b3Assert(0);
return;
}
};
}
b3Scalar yawRad = yaw * b3Scalar(0.01745329251994329547);// rads per deg
b3Scalar pitchRad = pitch * b3Scalar(0.01745329251994329547);// rads per deg
b3Quaternion pitchRot(camUpVector, pitchRad);
b3Vector3 right = camUpVector.cross(camForward);
b3Quaternion yawRot(right, -yawRad);
eyePos = b3Matrix3x3(pitchRot) * b3Matrix3x3(yawRot) * eyePos;
camPos = eyePos;
camPos += camTargetPos;
float camPosf[4] = { camPos[0],camPos[1],camPos[2],0 };
float camPosTargetf[4] = { camTargetPos[0],camTargetPos[1],camTargetPos[2],0 };
float camUpf[4] = { camUpVector[0],camUpVector[1],camUpVector[2],0 };
b3ComputeViewMatrixFromPositions(camPosf, camPosTargetf, camUpf,viewMatrix);
}
void b3ComputeProjectionMatrix(float left, float right, float bottom, float top, float nearVal, float farVal, float projectionMatrix[16])
{
projectionMatrix[0 * 4 + 0] = (float(2) * nearVal) / (right - left);
projectionMatrix[0 * 4 + 1] = float(0);
projectionMatrix[0 * 4 + 2] = float(0);
projectionMatrix[0 * 4 + 3] = float(0);
projectionMatrix[1 * 4 + 0] = float(0);
projectionMatrix[1 * 4 + 1] = (float(2) * nearVal) / (top - bottom);
projectionMatrix[1 * 4 + 2] = float(0);
projectionMatrix[1 * 4 + 3] = float(0);
projectionMatrix[2 * 4 + 0] = (right + left) / (right - left);
projectionMatrix[2 * 4 + 1] = (top + bottom) / (top - bottom);
projectionMatrix[2 * 4 + 2] = -(farVal + nearVal) / (farVal - nearVal);
projectionMatrix[2 * 4 + 3] = float(-1);
projectionMatrix[3 * 4 + 0] = float(0);
projectionMatrix[3 * 4 + 1] = float(0);
projectionMatrix[3 * 4 + 2] = -(float(2) * farVal * nearVal) / (farVal - nearVal);
projectionMatrix[3 * 4 + 3] = float(0);
}
void b3ComputeProjectionMatrixFOV(float fov, float aspect, float nearVal, float farVal, float projectionMatrix[16])
{
float yScale = 1.0 / tan((3.141592538 / 180.0) * fov / 2);
float xScale = yScale / aspect;
projectionMatrix[0 * 4 + 0] = xScale;
projectionMatrix[0 * 4 + 1] = float(0);
projectionMatrix[0 * 4 + 2] = float(0);
projectionMatrix[0 * 4 + 3] = float(0);
projectionMatrix[1 * 4 + 0] = float(0);
projectionMatrix[1 * 4 + 1] = yScale;
projectionMatrix[1 * 4 + 2] = float(0);
projectionMatrix[1 * 4 + 3] = float(0);
projectionMatrix[2 * 4 + 0] = 0;
projectionMatrix[2 * 4 + 1] = 0;
projectionMatrix[2 * 4 + 2] = (nearVal + farVal) / (nearVal - farVal);
projectionMatrix[2 * 4 + 3] = float(-1);
projectionMatrix[3 * 4 + 0] = float(0);
projectionMatrix[3 * 4 + 1] = float(0);
projectionMatrix[3 * 4 + 2] = (float(2) * farVal * nearVal) / (nearVal - farVal);
projectionMatrix[3 * 4 + 3] = float(0);
}
void b3RequestCameraImageSetViewMatrix2(b3SharedMemoryCommandHandle commandHandle, const float cameraTargetPosition[3], float distance, float yaw, float pitch, float roll, int upAxis)
{
struct SharedMemoryCommand* command = (struct SharedMemoryCommand*) commandHandle;
b3Assert(command);
b3Assert(command->m_type == CMD_REQUEST_CAMERA_IMAGE_DATA);
b3Vector3 camUpVector;
b3Vector3 camForward;
b3Vector3 camPos;
b3Vector3 camTargetPos = b3MakeVector3(cameraTargetPosition[0],cameraTargetPosition[1],cameraTargetPosition[2]);
b3Vector3 eyePos = b3MakeVector3(0,0,0);
int forwardAxis(-1);
{
switch (upAxis)
{
case 1:
{
forwardAxis = 0;
eyePos[forwardAxis] = -distance;
camForward = b3MakeVector3(eyePos[0],eyePos[1],eyePos[2]);
if (camForward.length2() < B3_EPSILON)
{
camForward.setValue(1.f,0.f,0.f);
} else
{
camForward.normalize();
}
b3Scalar rollRad = roll * b3Scalar(0.01745329251994329547);
b3Quaternion rollRot(camForward,rollRad);
camUpVector = b3QuatRotate(rollRot,b3MakeVector3(0,1,0));
//gLightPos = b3MakeVector3(-50.f,100,30);
break;
}
case 2:
{
forwardAxis = 1;
eyePos[forwardAxis] = -distance;
camForward = b3MakeVector3(eyePos[0],eyePos[1],eyePos[2]);
if (camForward.length2() < B3_EPSILON)
{
camForward.setValue(1.f,0.f,0.f);
} else
{
camForward.normalize();
}
b3Scalar rollRad = roll * b3Scalar(0.01745329251994329547);
b3Quaternion rollRot(camForward,rollRad);
camUpVector = b3QuatRotate(rollRot,b3MakeVector3(0,0,1));
//gLightPos = b3MakeVector3(-50.f,30,100);
break;
}
default:
{
//b3Assert(0);
return;
}
};
}
b3Scalar yawRad = yaw * b3Scalar(0.01745329251994329547);// rads per deg
b3Scalar pitchRad = pitch * b3Scalar(0.01745329251994329547);// rads per deg
b3Quaternion pitchRot(camUpVector,pitchRad);
b3Vector3 right = camUpVector.cross(camForward);
b3Quaternion yawRot(right,-yawRad);
eyePos = b3Matrix3x3(pitchRot) * b3Matrix3x3(yawRot) * eyePos;
camPos = eyePos;
camPos += camTargetPos;
float camPosf[4] = {camPos[0],camPos[1],camPos[2],0};
float camPosTargetf[4] = {camTargetPos[0],camTargetPos[1],camTargetPos[2],0};
float camUpf[4] = {camUpVector[0],camUpVector[1],camUpVector[2],0};
b3RequestCameraImageSetViewMatrix(commandHandle,camPosf,camPosTargetf,camUpf);
b3ComputeViewMatrixFromYawPitchRoll(cameraTargetPosition, distance, yaw, pitch, roll, upAxis, command->m_requestPixelDataArguments.m_viewMatrix);
command->m_updateFlags |= REQUEST_PIXEL_ARGS_HAS_CAMERA_MATRICES;
}
void b3RequestCameraImageSetViewMatrix(b3SharedMemoryCommandHandle commandHandle, const float cameraPosition[3], const float cameraTargetPosition[3], const float cameraUp[3])
{
b3Vector3 eye = b3MakeVector3(cameraPosition[0], cameraPosition[1], cameraPosition[2]);
b3Vector3 center = b3MakeVector3(cameraTargetPosition[0], cameraTargetPosition[1], cameraTargetPosition[2]);
b3Vector3 up = b3MakeVector3(cameraUp[0], cameraUp[1], cameraUp[2]);
b3Vector3 f = (center - eye).normalized();
b3Vector3 u = up.normalized();
b3Vector3 s = (f.cross(u)).normalized();
u = s.cross(f);
float viewMatrix[16];
viewMatrix[0*4+0] = s.x;
viewMatrix[1*4+0] = s.y;
viewMatrix[2*4+0] = s.z;
viewMatrix[0*4+1] = u.x;
viewMatrix[1*4+1] = u.y;
viewMatrix[2*4+1] = u.z;
viewMatrix[0*4+2] =-f.x;
viewMatrix[1*4+2] =-f.y;
viewMatrix[2*4+2] =-f.z;
viewMatrix[0*4+3] = 0.f;
viewMatrix[1*4+3] = 0.f;
viewMatrix[2*4+3] = 0.f;
viewMatrix[3*4+0] = -s.dot(eye);
viewMatrix[3*4+1] = -u.dot(eye);
viewMatrix[3*4+2] = f.dot(eye);
viewMatrix[3*4+3] = 1.f;
float viewMatrix[16];
b3ComputeViewMatrixFromPositions(cameraPosition, cameraTargetPosition, cameraUp, viewMatrix);
struct SharedMemoryCommand* command = (struct SharedMemoryCommand*) commandHandle;
b3Assert(command);
b3Assert(command->m_type == CMD_REQUEST_CAMERA_IMAGE_DATA);
for (int i=0;i<16;i++)
{
command->m_requestPixelDataArguments.m_viewMatrix[i] = viewMatrix[i];
}
b3ComputeViewMatrixFromPositions(cameraPosition, cameraTargetPosition, cameraUp, command->m_requestPixelDataArguments.m_viewMatrix);
command->m_updateFlags |= REQUEST_PIXEL_ARGS_HAS_CAMERA_MATRICES;
}
void b3RequestCameraImageSetProjectionMatrix(b3SharedMemoryCommandHandle commandHandle, float left, float right, float bottom, float top, float nearVal, float farVal)
{
float frustum[16];
frustum[0*4+0] = (float(2) * nearVal) / (right - left);
frustum[0*4+1] = float(0);
frustum[0*4+2] = float(0);
frustum[0*4+3] = float(0);
frustum[1*4+0] = float(0);
frustum[1*4+1] = (float(2) * nearVal) / (top - bottom);
frustum[1*4+2] = float(0);
frustum[1*4+3] = float(0);
frustum[2*4+0] = (right + left) / (right - left);
frustum[2*4+1] = (top + bottom) / (top - bottom);
frustum[2*4+2] = -(farVal + nearVal) / (farVal - nearVal);
frustum[2*4+3] = float(-1);
frustum[3*4+0] = float(0);
frustum[3*4+1] = float(0);
frustum[3*4+2] = -(float(2) * farVal * nearVal) / (farVal - nearVal);
frustum[3*4+3] = float(0);
struct SharedMemoryCommand* command = (struct SharedMemoryCommand*) commandHandle;
b3Assert(command);
b3Assert(command->m_type == CMD_REQUEST_CAMERA_IMAGE_DATA);
for (int i=0;i<16;i++)
{
command->m_requestPixelDataArguments.m_projectionMatrix[i] = frustum[i];
}
b3ComputeProjectionMatrix(left, right, bottom, top, nearVal, farVal, command->m_requestPixelDataArguments.m_projectionMatrix);
command->m_updateFlags |= REQUEST_PIXEL_ARGS_HAS_CAMERA_MATRICES;
}
void b3RequestCameraImageSetFOVProjectionMatrix(b3SharedMemoryCommandHandle commandHandle, float fov, float aspect, float nearVal, float farVal)
{
float yScale = 1.0 / tan((3.141592538 / 180.0) * fov / 2);
float xScale = yScale / aspect;
float frustum[16];
frustum[0*4+0] = xScale;
frustum[0*4+1] = float(0);
frustum[0*4+2] = float(0);
frustum[0*4+3] = float(0);
frustum[1*4+0] = float(0);
frustum[1*4+1] = yScale;
frustum[1*4+2] = float(0);
frustum[1*4+3] = float(0);
frustum[2*4+0] = 0;
frustum[2*4+1] = 0;
frustum[2*4+2] = (nearVal + farVal) / (nearVal - farVal);
frustum[2*4+3] = float(-1);
frustum[3*4+0] = float(0);
frustum[3*4+1] = float(0);
frustum[3*4+2] = (float(2) * farVal * nearVal) / (nearVal - farVal);
frustum[3*4+3] = float(0);
struct SharedMemoryCommand* command = (struct SharedMemoryCommand*) commandHandle;
b3Assert(command);
b3Assert(command->m_type == CMD_REQUEST_CAMERA_IMAGE_DATA);
for (int i=0;i<16;i++)
{
command->m_requestPixelDataArguments.m_projectionMatrix[i] = frustum[i];
}
b3ComputeProjectionMatrixFOV(fov, aspect, nearVal, farVal, command->m_requestPixelDataArguments.m_projectionMatrix);
command->m_updateFlags |= REQUEST_PIXEL_ARGS_HAS_CAMERA_MATRICES;
}

View File

@ -95,14 +95,30 @@ int b3GetDebugItemUniqueId(b3SharedMemoryStatusHandle statusHandle);
///request an image from a simulated camera, using a software renderer.
b3SharedMemoryCommandHandle b3InitRequestCameraImage(b3PhysicsClientHandle physClient);
void b3RequestCameraImageSetCameraMatrices(b3SharedMemoryCommandHandle command, float viewMatrix[16], float projectionMatrix[16]);
void b3RequestCameraImageSetViewMatrix(b3SharedMemoryCommandHandle command, const float cameraPosition[3], const float cameraTargetPosition[3], const float cameraUp[3]);
void b3RequestCameraImageSetViewMatrix2(b3SharedMemoryCommandHandle commandHandle, const float cameraTargetPosition[3], float distance, float yaw, float pitch, float roll, int upAxis);
void b3RequestCameraImageSetProjectionMatrix(b3SharedMemoryCommandHandle command, float left, float right, float bottom, float top, float nearVal, float farVal);
void b3RequestCameraImageSetFOVProjectionMatrix(b3SharedMemoryCommandHandle command, float fov, float aspect, float nearVal, float farVal);
void b3RequestCameraImageSetPixelResolution(b3SharedMemoryCommandHandle command, int width, int height );
void b3RequestCameraImageSetLightDirection(b3SharedMemoryCommandHandle commandHandle, const float lightDirection[3]);
void b3RequestCameraImageSelectRenderer(b3SharedMemoryCommandHandle commandHandle, int renderer);
void b3GetCameraImageData(b3PhysicsClientHandle physClient, struct b3CameraImageData* imageData);
///compute a view matrix, helper function for b3RequestCameraImageSetCameraMatrices
void b3ComputeViewMatrixFromPositions(const float cameraPosition[3], const float cameraTargetPosition[3], const float cameraUp[3], float viewMatrix[16]);
void b3ComputeViewMatrixFromYawPitchRoll(const float cameraTargetPosition[3], float distance, float yaw, float pitch, float roll, int upAxis, float viewMatrix[16]);
///compute a projection matrix, helper function for b3RequestCameraImageSetCameraMatrices
void b3ComputeProjectionMatrix(float left, float right, float bottom, float top, float nearVal, float farVal, float projectionMatrix[16]);
void b3ComputeProjectionMatrixFOV(float fov, float aspect, float nearVal, float farVal, float projectionMatrix[16]);
/* obsolete, please use b3ComputeViewProjectionMatrices */
void b3RequestCameraImageSetViewMatrix(b3SharedMemoryCommandHandle command, const float cameraPosition[3], const float cameraTargetPosition[3], const float cameraUp[3]);
/* obsolete, please use b3ComputeViewProjectionMatrices */
void b3RequestCameraImageSetViewMatrix2(b3SharedMemoryCommandHandle commandHandle, const float cameraTargetPosition[3], float distance, float yaw, float pitch, float roll, int upAxis);
/* obsolete, please use b3ComputeViewProjectionMatrices */
void b3RequestCameraImageSetProjectionMatrix(b3SharedMemoryCommandHandle command, float left, float right, float bottom, float top, float nearVal, float farVal);
/* obsolete, please use b3ComputeViewProjectionMatrices */
void b3RequestCameraImageSetFOVProjectionMatrix(b3SharedMemoryCommandHandle command, float fov, float aspect, float nearVal, float farVal);
///request an contact point information
b3SharedMemoryCommandHandle b3InitRequestContactPointInformation(b3PhysicsClientHandle physClient);
void b3SetContactFilterBodyA(b3SharedMemoryCommandHandle commandHandle, int bodyUniqueIdA);

View File

@ -1416,6 +1416,10 @@ bool PhysicsServerCommandProcessor::processCommand(const struct SharedMemoryComm
{
// printf("-------------------------------\nRendering\n");
if ((clientCmd.m_updateFlags & REQUEST_PIXEL_ARGS_SET_LIGHT_DIRECTION) != 0)
{
m_data->m_visualConverter.setLightDirection(clientCmd.m_requestPixelDataArguments.m_lightDirection[0], clientCmd.m_requestPixelDataArguments.m_lightDirection[1], clientCmd.m_requestPixelDataArguments.m_lightDirection[2]);
}
if ((clientCmd.m_updateFlags & REQUEST_PIXEL_ARGS_HAS_CAMERA_MATRICES)!=0)
{

View File

@ -138,12 +138,14 @@ struct RequestPixelDataArgs
int m_startPixelIndex;
int m_pixelWidth;
int m_pixelHeight;
float m_lightDirection[3];
};
enum EnumRequestPixelDataUpdateFlags
{
REQUEST_PIXEL_ARGS_HAS_CAMERA_MATRICES=1,
REQUEST_PIXEL_ARGS_SET_PIXEL_WIDTH_HEIGHT=4,
REQUEST_PIXEL_ARGS_SET_LIGHT_DIRECTION=8,
//don't exceed (1<<15), because this enum is shared with EnumRenderer in SharedMemoryPublic.h
};

View File

@ -73,14 +73,17 @@ struct TinyRendererVisualShapeConverterInternalData
b3AlignedObjectArray<MyTexture2> m_textures;
b3AlignedObjectArray<float> m_depthBuffer;
b3AlignedObjectArray<int> m_segmentationMaskBuffer;
btVector3 m_lightDirection;
bool m_hasLightDirection;
SimpleCamera m_camera;
TinyRendererVisualShapeConverterInternalData()
:m_upAxis(2),
m_swWidth(START_WIDTH),
m_swHeight(START_HEIGHT),
m_rgbColorBuffer(START_WIDTH,START_HEIGHT,TGAImage::RGB)
m_rgbColorBuffer(START_WIDTH,START_HEIGHT,TGAImage::RGB),
m_hasLightDirection(false)
{
m_depthBuffer.resize(m_swWidth*m_swHeight);
m_segmentationMaskBuffer.resize(m_swWidth*m_swHeight,-1);
@ -108,7 +111,11 @@ TinyRendererVisualShapeConverter::~TinyRendererVisualShapeConverter()
delete m_data;
}
void TinyRendererVisualShapeConverter::setLightDirection(float x, float y, float z)
{
m_data->m_lightDirection.setValue(x, y, z);
m_data->m_hasLightDirection = true;
}
void convertURDFToVisualShape(const UrdfVisual* visual, const char* urdfPathPrefix, const btTransform& visualTransform, btAlignedObjectArray<GLInstanceVertex>& verticesOut, btAlignedObjectArray<int>& indicesOut, btAlignedObjectArray<MyTexture2>& texturesOut, b3VisualShapeData& visualShapeOut)
@ -677,16 +684,23 @@ void TinyRendererVisualShapeConverter::render(const float viewMat[16], const flo
btVector3 lightDirWorld(-5,200,-40);
switch (m_data->m_upAxis)
{
case 1:
lightDirWorld = btVector3(-50.f,100,30);
break;
case 2:
lightDirWorld = btVector3(-50.f,30,100);
break;
default:{}
};
if (m_data->m_hasLightDirection)
{
lightDirWorld = m_data->m_lightDirection;
}
else
{
switch (m_data->m_upAxis)
{
case 1:
lightDirWorld = btVector3(-50.f, 100, 30);
break;
case 2:
lightDirWorld = btVector3(-50.f, 30, 100);
break;
default: {}
};
}
lightDirWorld.normalize();

View File

@ -32,7 +32,8 @@ struct TinyRendererVisualShapeConverter : public LinkVisualShapesConverter
void getWidthAndHeight(int& width, int& height);
void setWidthAndHeight(int width, int height);
void setLightDirection(float x, float y, float z);
void copyCameraImageData(unsigned char* pixelsRGBA, int rgbaBufferSizeInPixels, float* depthBuffer, int depthBufferSizeInPixels,int* segmentationMaskBuffer, int segmentationMaskSizeInPixels, int startPixelIndex, int* widthPtr, int* heightPtr, int* numPixelsCopied);
void render();

View File

@ -1377,15 +1377,18 @@ static int pybullet_internalSetMatrix(PyObject* objMat, float matrix[16]) {
PyObject* seq;
seq = PySequence_Fast(objMat, "expected a sequence");
len = PySequence_Size(objMat);
if (len == 16) {
for (i = 0; i < len; i++) {
matrix[i] = pybullet_internalGetFloatFromSequence(seq, i);
}
Py_DECREF(seq);
return 1;
if (seq)
{
len = PySequence_Size(objMat);
if (len == 16) {
for (i = 0; i < len; i++) {
matrix[i] = pybullet_internalGetFloatFromSequence(seq, i);
}
Py_DECREF(seq);
return 1;
}
Py_DECREF(seq);
}
Py_DECREF(seq);
return 0;
}
@ -1397,20 +1400,24 @@ static int pybullet_internalSetMatrix(PyObject* objMat, float matrix[16]) {
// vector - float[3] which will be set by values from objMat
static int pybullet_internalSetVector(PyObject* objVec, float vector[3]) {
int i, len;
PyObject* seq;
PyObject* seq=0;
if (objVec==NULL)
return 0;
seq = PySequence_Fast(objVec, "expected a sequence");
len = PySequence_Size(objVec);
if (len == 3) {
for (i = 0; i < len; i++) {
vector[i] = pybullet_internalGetFloatFromSequence(seq, i);
}
Py_DECREF(seq);
return 1;
if (seq)
{
len = PySequence_Size(objVec);
if (len == 3) {
for (i = 0; i < len; i++) {
vector[i] = pybullet_internalGetFloatFromSequence(seq, i);
}
Py_DECREF(seq);
return 1;
}
Py_DECREF(seq);
}
Py_DECREF(seq);
return 0;
}
@ -1422,15 +1429,18 @@ static int pybullet_internalSetVectord(PyObject* obVec, double vector[3]) {
return 0;
seq = PySequence_Fast(obVec, "expected a sequence");
len = PySequence_Size(obVec);
if (len == 3) {
for (i = 0; i < len; i++) {
vector[i] = pybullet_internalGetFloatFromSequence(seq, i);
}
Py_DECREF(seq);
return 1;
}
Py_DECREF(seq);
if (seq)
{
len = PySequence_Size(obVec);
if (len == 3) {
for (i = 0; i < len; i++) {
vector[i] = pybullet_internalGetFloatFromSequence(seq, i);
}
Py_DECREF(seq);
return 1;
}
Py_DECREF(seq);
}
return 0;
}
@ -2196,6 +2206,302 @@ static PyObject* pybullet_getContactPointData(PyObject* self, PyObject* args, Py
return Py_None;
}
/// Render an image from the current timestep of the simulation, width, height are required, other args are optional
// getCameraImage(w, h, view[16], projection[16], lightpos[3])
static PyObject* pybullet_getCameraImage(PyObject* self, PyObject* args, PyObject *keywds)
{
/// request an image from a simulated camera, using a software renderer.
struct b3CameraImageData imageData;
PyObject* objViewMat = 0, *objProjMat = 0, *lightDirObj = 0;
int width, height;
int size = PySequence_Size(args);
float viewMatrix[16];
float projectionMatrix[16];
float lightDir[3];
// inialize cmd
b3SharedMemoryCommandHandle command;
if (0 == sm)
{
PyErr_SetString(SpamError, "Not connected to physics server.");
return NULL;
}
command = b3InitRequestCameraImage(sm);
// set camera resolution, optionally view, projection matrix, light direction
static char *kwlist[] = { "width", "height", "viewMatrix", "projectionMatrix", "lightDirection",NULL };
if (!PyArg_ParseTupleAndKeywords(args, keywds, "ii|OOO", kwlist, &width, &height, &objViewMat, &objProjMat, &lightDirObj))
{
return NULL;
}
b3RequestCameraImageSetPixelResolution(command, width, height);
// set camera matrices only if set matrix function succeeds
if (pybullet_internalSetMatrix(objViewMat, viewMatrix) && (pybullet_internalSetMatrix(objProjMat, projectionMatrix)))
{
b3RequestCameraImageSetCameraMatrices(command, viewMatrix, projectionMatrix);
}
//set light pos only if function succeeds
if (pybullet_internalSetVector(lightDirObj, lightDir))
{
b3RequestCameraImageSetLightDirection(command, lightDir);
}
if (b3CanSubmitCommand(sm))
{
b3SharedMemoryStatusHandle statusHandle;
int statusType;
// b3RequestCameraImageSelectRenderer(command,ER_BULLET_HARDWARE_OPENGL);
statusHandle = b3SubmitClientCommandAndWaitStatus(sm, command);
statusType = b3GetStatusType(statusHandle);
if (statusType == CMD_CAMERA_IMAGE_COMPLETED) {
PyObject* item2;
PyObject* pyResultList; // store 4 elements in this result: width,
// height, rgbData, depth
#ifdef PYBULLET_USE_NUMPY
PyObject* pyRGB;
PyObject* pyDep;
PyObject* pySeg;
int i, j, p;
b3GetCameraImageData(sm, &imageData);
// TODO(hellojas): error handling if image size is 0
pyResultList = PyTuple_New(5);
PyTuple_SetItem(pyResultList, 0, PyInt_FromLong(imageData.m_pixelWidth));
PyTuple_SetItem(pyResultList, 1, PyInt_FromLong(imageData.m_pixelHeight));
int bytesPerPixel = 4; // Red, Green, Blue, and Alpha each 8 bit values
npy_intp rgb_dims[3] = { imageData.m_pixelHeight, imageData.m_pixelWidth,
bytesPerPixel };
npy_intp dep_dims[2] = { imageData.m_pixelHeight, imageData.m_pixelWidth };
npy_intp seg_dims[2] = { imageData.m_pixelHeight, imageData.m_pixelWidth };
pyRGB = PyArray_SimpleNew(3, rgb_dims, NPY_UINT8);
pyDep = PyArray_SimpleNew(2, dep_dims, NPY_FLOAT32);
pySeg = PyArray_SimpleNew(2, seg_dims, NPY_INT32);
memcpy(PyArray_DATA(pyRGB), imageData.m_rgbColorData,
imageData.m_pixelHeight * imageData.m_pixelWidth * bytesPerPixel);
memcpy(PyArray_DATA(pyDep), imageData.m_depthValues,
imageData.m_pixelHeight * imageData.m_pixelWidth);
memcpy(PyArray_DATA(pySeg), imageData.m_segmentationMaskValues,
imageData.m_pixelHeight * imageData.m_pixelWidth);
PyTuple_SetItem(pyResultList, 2, pyRGB);
PyTuple_SetItem(pyResultList, 3, pyDep);
PyTuple_SetItem(pyResultList, 4, pySeg);
#else//PYBULLET_USE_NUMPY
PyObject* pylistRGB;
PyObject* pylistDep;
PyObject* pylistSeg;
int i, j, p;
b3GetCameraImageData(sm, &imageData);
// TODO(hellojas): error handling if image size is 0
pyResultList = PyTuple_New(5);
PyTuple_SetItem(pyResultList, 0, PyInt_FromLong(imageData.m_pixelWidth));
PyTuple_SetItem(pyResultList, 1, PyInt_FromLong(imageData.m_pixelHeight));
{
PyObject* item;
int bytesPerPixel = 4; // Red, Green, Blue, and Alpha each 8 bit values
int num =
bytesPerPixel * imageData.m_pixelWidth * imageData.m_pixelHeight;
pylistRGB = PyTuple_New(num);
pylistDep =
PyTuple_New(imageData.m_pixelWidth * imageData.m_pixelHeight);
pylistSeg =
PyTuple_New(imageData.m_pixelWidth * imageData.m_pixelHeight);
for (i = 0; i < imageData.m_pixelWidth; i++) {
for (j = 0; j < imageData.m_pixelHeight; j++) {
// TODO(hellojas): validate depth values make sense
int depIndex = i + j * imageData.m_pixelWidth;
{
item = PyFloat_FromDouble(imageData.m_depthValues[depIndex]);
PyTuple_SetItem(pylistDep, depIndex, item);
}
{
item2 =
PyLong_FromLong(imageData.m_segmentationMaskValues[depIndex]);
PyTuple_SetItem(pylistSeg, depIndex, item2);
}
for (p = 0; p < bytesPerPixel; p++) {
int pixelIndex =
bytesPerPixel * (i + j * imageData.m_pixelWidth) + p;
item = PyInt_FromLong(imageData.m_rgbColorData[pixelIndex]);
PyTuple_SetItem(pylistRGB, pixelIndex, item);
}
}
}
}
PyTuple_SetItem(pyResultList, 2, pylistRGB);
PyTuple_SetItem(pyResultList, 3, pylistDep);
PyTuple_SetItem(pyResultList, 4, pylistSeg);
return pyResultList;
#endif//PYBULLET_USE_NUMPY
return pyResultList;
}
}
Py_INCREF(Py_None);
return Py_None;
}
static PyObject* pybullet_computeViewMatrix(PyObject* self, PyObject* args, PyObject *keywds)
{
PyObject* camEyeObj = 0;
PyObject* camTargetPositionObj = 0;
PyObject* camUpVectorObj = 0;
float camEye[3];
float camTargetPosition[3];
float camUpVector[3];
// set camera resolution, optionally view, projection matrix, light position
static char *kwlist[] = { "cameraEyePosition", "cameraTargetPosition", "cameraUpVector",NULL };
if (!PyArg_ParseTupleAndKeywords(args, keywds, "OOO", kwlist, &camEyeObj, &camTargetPositionObj, &camUpVectorObj))
{
return NULL;
}
if (pybullet_internalSetVector(camEyeObj, camEye) &&
pybullet_internalSetVector(camTargetPositionObj, camTargetPosition) &&
pybullet_internalSetVector(camUpVectorObj, camUpVector))
{
float viewMatrix[16];
PyObject* pyResultList=0;
int i;
b3ComputeViewMatrixFromPositions(camEye, camTargetPosition, camUpVector, viewMatrix);
pyResultList = PyTuple_New(16);
for (i = 0; i < 16; i++)
{
PyObject* item = PyFloat_FromDouble(viewMatrix[i]);
PyTuple_SetItem(pyResultList, i, item);
}
return pyResultList;
}
PyErr_SetString(SpamError, "Error in computeViewMatrix.");
return NULL;
}
///compute a view matrix, helper function for b3RequestCameraImageSetCameraMatrices
static PyObject* pybullet_computeViewMatrixFromYawPitchRoll(PyObject* self, PyObject* args, PyObject *keywds)
{
PyObject* cameraTargetPositionObj = 0;
float cameraTargetPosition[3];
float distance, yaw, pitch, roll;
int upAxisIndex;
float viewMatrix[16];
PyObject* pyResultList = 0;
int i;
// set camera resolution, optionally view, projection matrix, light position
static char *kwlist[] = { "cameraTargetPosition", "distance", "yaw", "pitch", "roll", "upAxisIndex" ,NULL };
if (!PyArg_ParseTupleAndKeywords(args, keywds, "Offffi", kwlist, &cameraTargetPositionObj, &distance,&yaw,&pitch,&roll, &upAxisIndex))
{
return NULL;
}
if (!pybullet_internalSetVector(cameraTargetPositionObj, cameraTargetPosition))
{
PyErr_SetString(SpamError, "Cannot convert cameraTargetPosition.");
return NULL;
}
b3ComputeViewMatrixFromYawPitchRoll(cameraTargetPosition, distance, yaw, pitch, roll, upAxisIndex, viewMatrix);
pyResultList = PyTuple_New(16);
for (i = 0; i < 16; i++)
{
PyObject* item = PyFloat_FromDouble(viewMatrix[i]);
PyTuple_SetItem(pyResultList, i, item);
}
return pyResultList;
}
///compute a projection matrix, helper function for b3RequestCameraImageSetCameraMatrices
static PyObject* pybullet_computeProjectionMatrix(PyObject* self, PyObject* args, PyObject *keywds)
{
PyObject* pyResultList = 0;
float left;
float right;
float bottom;
float top;
float nearVal;
float farVal;
float projectionMatrix[16];
int i;
// set camera resolution, optionally view, projection matrix, light position
static char *kwlist[] = { "left", "right", "bottom", "top", "nearVal", "farVal" ,NULL };
if (!PyArg_ParseTupleAndKeywords(args, keywds, "ffffff", kwlist, &left, &right, &bottom, &top, &nearVal, &farVal))
{
return NULL;
}
b3ComputeProjectionMatrix(left, right, bottom, top, nearVal, farVal, projectionMatrix);
pyResultList = PyTuple_New(16);
for (i = 0; i < 16; i++)
{
PyObject* item = PyFloat_FromDouble(projectionMatrix[i]);
PyTuple_SetItem(pyResultList, i, item);
}
return pyResultList;
}
static PyObject* pybullet_computeProjectionMatrixFOV(PyObject* self, PyObject* args, PyObject *keywds)
{
float fov, aspect, nearVal, farVal;
PyObject* pyResultList = 0;
float projectionMatrix[16];
int i;
static char *kwlist[] = { "fov","aspect","nearVal","farVal",NULL };
if (!PyArg_ParseTupleAndKeywords(args, keywds, "ffff", kwlist, &fov, &aspect, &nearVal, &farVal))
{
return NULL;
}
b3ComputeProjectionMatrixFOV(fov, aspect, nearVal, farVal, projectionMatrix);
pyResultList = PyTuple_New(16);
for (i = 0; i < 16; i++)
{
PyObject* item = PyFloat_FromDouble(projectionMatrix[i]);
PyTuple_SetItem(pyResultList, i, item);
}
return pyResultList;
}
// Render an image from the current timestep of the simulation
//
// Examples:
@ -2219,7 +2525,7 @@ static PyObject* pybullet_getContactPointData(PyObject* self, PyObject* args, Py
// TODO(hellojas): fix image is cut off at head
// TODO(hellojas): should we add check to give minimum image resolution
// to see object based on camera position?
static PyObject* pybullet_renderImage(PyObject* self, PyObject* args) {
static PyObject* pybullet_renderImageObsolete(PyObject* self, PyObject* args) {
/// request an image from a simulated camera, using a software renderer.
struct b3CameraImageData imageData;
PyObject* objViewMat, *objProjMat;
@ -2277,7 +2583,8 @@ static PyObject* pybullet_renderImage(PyObject* self, PyObject* args) {
b3RequestCameraImageSetPixelResolution(command, width, height);
if (pybullet_internalSetVector(objCameraPos, cameraPos) &&
pybullet_internalSetVector(objTargetPos, targetPos) &&
pybullet_internalSetVector(objCameraUp, cameraUp)) {
pybullet_internalSetVector(objCameraUp, cameraUp))
{
b3RequestCameraImageSetViewMatrix(command, cameraPos, targetPos,
cameraUp);
} else {
@ -3020,15 +3327,35 @@ static PyMethodDef SpamMethods[] = {
"[x,y,z] in Cartesian coordinates, flag to select TORQUE_IN_LINK_FRAME or "
"TORQUE_IN_WORLD_FRAME coordinates"},
{"renderImage", pybullet_renderImage, METH_VARARGS,
"Render an image (given the pixel resolution width, height, camera view "
"matrix, projection matrix, near, and far values), and return the "
"8-8-8bit RGB pixel data and floating point depth values"
#ifdef PYBULLET_USE_NUMPY
" as NumPy arrays"
#endif
{"renderImage", pybullet_renderImageObsolete, METH_VARARGS,
"obsolete, please use getCameraImage and getViewProjectionMatrices instead"
},
{ "getCameraImage",(PyCFunction)pybullet_getCameraImage, METH_VARARGS| METH_KEYWORDS,
"Render an image (given the pixel resolution width, height, camera viewMatrix "
", projectionMatrix and lightDirection), and return the "
"8-8-8bit RGB pixel data and floating point depth values"
#ifdef PYBULLET_USE_NUMPY
" as NumPy arrays"
#endif
},
{ "computeViewMatrix", (PyCFunction)pybullet_computeViewMatrix, METH_VARARGS | METH_KEYWORDS,
"Compute a camera viewmatrix from camera eye, target position and up vector "
},
{ "computeViewMatrixFromYawPitchRoll",(PyCFunction)pybullet_computeViewMatrixFromYawPitchRoll, METH_VARARGS | METH_KEYWORDS,
"Compute a camera viewmatrix from camera eye, target position and up vector "
},
{ "computeProjectionMatrix", (PyCFunction)pybullet_computeProjectionMatrix, METH_VARARGS | METH_KEYWORDS,
"Compute a camera projection matrix from screen left/right/bottom/top/near/far values"
},
{ "computeProjectionMatrixFOV", (PyCFunction)pybullet_computeProjectionMatrixFOV, METH_VARARGS | METH_KEYWORDS,
"Compute a camera projection matrix from fov, aspect ratio, near, far values"
},
{"getContactPoints", (PyCFunction)pybullet_getContactPointData, METH_VARARGS | METH_KEYWORDS,
"Return existing contact points after the stepSimulation command. "
"Optional arguments one or two object unique "

View File

@ -3,7 +3,7 @@ import time
#choose connection method: GUI, DIRECT, SHARED_MEMORY
pybullet.connect(pybullet.GUI)
pybullet.loadURDF("plane.urdf",0,0,-1)
#load URDF, given a relative or absolute file+path
obj = pybullet.loadURDF("r2d2.urdf")

View File

@ -5,7 +5,7 @@ import pybullet
pybullet.connect(pybullet.GUI)
pybullet.loadURDF("r2d2.urdf")
camTargetPos = [0,0,0]
camTargetPos = [0.,0.,0.]
cameraUp = [0,0,1]
cameraPos = [1,1,1]
yaw = 40
@ -18,29 +18,28 @@ pixelWidth = 320
pixelHeight = 240
nearPlane = 0.01
farPlane = 1000
lightDirection = [0,1,0]
fov = 60
#img_arr = pybullet.renderImage(pixelWidth, pixelHeight)
#renderImage(w, h, view[16], projection[16])
#img_arr = pybullet.renderImage(pixelWidth, pixelHeight, cameraPos, camTargetPos, cameraUp, nearPlane, farPlane)
for pitch in range (0,360,10) :
img_arr = pybullet.renderImage(pixelWidth, pixelHeight, camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex, nearPlane, farPlane, fov)
w=img_arr[0] #width of the image, in pixels
h=img_arr[1] #height of the image, in pixels
rgb=img_arr[2] #color data RGB
dep=img_arr[3] #depth data
#print 'width = %d height = %d' % (w,h)
# reshape creates np array
np_img_arr = np.reshape(rgb, (h, w, 4))
np_img_arr = np_img_arr*(1./255.)
#show
plt.imshow(np_img_arr,interpolation='none')
#plt.show()
plt.pause(0.01)
viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
aspect = pixelWidth / pixelHeight;
projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, lightDirection)
w=img_arr[0]
h=img_arr[1]
rgb=img_arr[2]
dep=img_arr[3]
#print 'width = %d height = %d' % (w,h)
# reshape creates np array
np_img_arr = np.reshape(rgb, (h, w, 4))
np_img_arr = np_img_arr*(1./255.)
#show
plt.imshow(np_img_arr,interpolation='none')
plt.pause(0.01)
pybullet.resetSimulation()

View File

@ -1,3 +1,6 @@
#make sure to compile pybullet with PYBULLET_USE_NUMPY enabled, otherwise use testrender.py (slower but compatible without numpy)
import numpy as np
import matplotlib.pyplot as plt
import pybullet
@ -23,28 +26,30 @@ farPlane = 1000
fov = 60
main_start = time.time()
#img_arr = pybullet.renderImage(pixelWidth, pixelHeight)
#renderImage(w, h, view[16], projection[16])
#img_arr = pybullet.renderImage(pixelWidth, pixelHeight, cameraPos, camTargetPos, cameraUp, nearPlane, farPlane)
for pitch in range (0,360,10) :
start = time.time()
img_arr = pybullet.renderImage(pixelWidth, pixelHeight, camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex, nearPlane, farPlane, fov)
stop = time.time()
print "renderImage %f" % (stop - start)
start = time.time()
viewMatrix = pybullet.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)
aspect = pixelWidth / pixelHeight;
projectionMatrix = pybullet.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);
img_arr = pybullet.getCameraImage(pixelWidth, pixelHeight, viewMatrix,projectionMatrix, [0,1,0])
stop = time.time()
print ("renderImage %f" % (stop - start))
w=img_arr[0] #width of the image, in pixels
h=img_arr[1] #height of the image, in pixels
rgb=img_arr[2] #color data RGB
dep=img_arr[3] #depth data
w=img_arr[0] #width of the image, in pixels
h=img_arr[1] #height of the image, in pixels
rgb=img_arr[2] #color data RGB
dep=img_arr[3] #depth data
#print 'width = %d height = %d' % (w,h)
print 'width = %d height = %d' % (w,h)
#show
plt.imshow(rgb,interpolation='none')
#plt.show()
plt.pause(0.01)
#note that sending the data to matplotlib is really slow
#show
plt.imshow(rgb,interpolation='none')
#plt.show()
plt.pause(0.01)
main_stop = time.time()
print "Total time %f" % (main_stop - main_start)
print ("Total time %f" % (main_stop - main_start))
pybullet.resetSimulation()