Cycles: fix some update issues with camera motion blur, and do some more work
for getting object motion blur ready.
This commit is contained in:
@@ -27,39 +27,11 @@ enum ObjectTransform {
|
||||
OBJECT_DUPLI = 16
|
||||
};
|
||||
|
||||
__device_inline Transform object_fetch_transform(KernelGlobals *kg, int object, float time, enum ObjectTransform type)
|
||||
__device_inline Transform object_fetch_transform(KernelGlobals *kg, int object, enum ObjectTransform type)
|
||||
{
|
||||
Transform tfm;
|
||||
|
||||
#ifdef __OBJECT_MOTION__
|
||||
/* if we do motion blur */
|
||||
if(sd->flag & SD_OBJECT_MOTION) {
|
||||
/* fetch motion transforms */
|
||||
MotionTransform motion;
|
||||
|
||||
motion.pre.x = have_motion;
|
||||
motion.pre.y = kernel_tex_fetch(__objects, offset + 1);
|
||||
motion.pre.z = kernel_tex_fetch(__objects, offset + 2);
|
||||
motion.pre.w = kernel_tex_fetch(__objects, offset + 3);
|
||||
|
||||
motion.post.x = kernel_tex_fetch(__objects, offset + 4);
|
||||
motion.post.y = kernel_tex_fetch(__objects, offset + 5);
|
||||
motion.post.z = kernel_tex_fetch(__objects, offset + 6);
|
||||
motion.post.w = kernel_tex_fetch(__objects, offset + 7);
|
||||
|
||||
/* interpolate (todo: do only once per object) */
|
||||
transform_motion_interpolate(&tfm, &motion, time);
|
||||
|
||||
/* invert */
|
||||
if(type == OBJECT_INVERSE_TRANSFORM)
|
||||
tfm = transform_quick_inverse(tfm);
|
||||
|
||||
return tfm;
|
||||
}
|
||||
#endif
|
||||
|
||||
int offset = object*OBJECT_SIZE + (int)type;
|
||||
|
||||
Transform tfm;
|
||||
tfm.x = kernel_tex_fetch(__objects, offset + 0);
|
||||
tfm.y = kernel_tex_fetch(__objects, offset + 1);
|
||||
tfm.z = kernel_tex_fetch(__objects, offset + 2);
|
||||
@@ -68,12 +40,54 @@ __device_inline Transform object_fetch_transform(KernelGlobals *kg, int object,
|
||||
return tfm;
|
||||
}
|
||||
|
||||
#ifdef __OBJECT_MOTION__
|
||||
__device_inline Transform object_fetch_transform_motion(KernelGlobals *kg, int object, float time, Transform *itfm)
|
||||
{
|
||||
Transform tfm;
|
||||
|
||||
int object_flag = kernel_tex_fetch(__object_flag, object);
|
||||
|
||||
/* if we do motion blur */
|
||||
if(object_flag & SD_OBJECT_MOTION) {
|
||||
/* fetch motion transforms */
|
||||
MotionTransform motion;
|
||||
|
||||
int offset = object*OBJECT_SIZE + (int)OBJECT_TRANSFORM_MOTION_PRE;
|
||||
|
||||
motion.pre.x = kernel_tex_fetch(__objects, offset + 0);
|
||||
motion.pre.y = kernel_tex_fetch(__objects, offset + 1);
|
||||
motion.pre.z = kernel_tex_fetch(__objects, offset + 2);
|
||||
motion.pre.w = kernel_tex_fetch(__objects, offset + 3);
|
||||
|
||||
|
||||
motion.post.x = kernel_tex_fetch(__objects, offset + 4);
|
||||
motion.post.y = kernel_tex_fetch(__objects, offset + 5);
|
||||
motion.post.z = kernel_tex_fetch(__objects, offset + 6);
|
||||
motion.post.w = kernel_tex_fetch(__objects, offset + 7);
|
||||
|
||||
transform_motion_interpolate(&tfm, &motion, time);
|
||||
|
||||
/* invert */
|
||||
if(itfm)
|
||||
*itfm = transform_quick_inverse(tfm);
|
||||
}
|
||||
else {
|
||||
tfm = object_fetch_transform(kg, object, OBJECT_TRANSFORM);
|
||||
|
||||
if(itfm)
|
||||
*itfm = object_fetch_transform(kg, object, OBJECT_INVERSE_TRANSFORM);
|
||||
}
|
||||
|
||||
return tfm;
|
||||
}
|
||||
#endif
|
||||
|
||||
__device_inline void object_position_transform(KernelGlobals *kg, ShaderData *sd, float3 *P)
|
||||
{
|
||||
#ifdef __OBJECT_MOTION__
|
||||
*P = transform_point(&sd->ob_tfm, *P);
|
||||
#else
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_TRANSFORM);
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_TRANSFORM);
|
||||
*P = transform_point(&tfm, *P);
|
||||
#endif
|
||||
}
|
||||
@@ -83,7 +97,7 @@ __device_inline void object_inverse_position_transform(KernelGlobals *kg, Shader
|
||||
#ifdef __OBJECT_MOTION__
|
||||
*P = transform_point(&sd->ob_itfm, *P);
|
||||
#else
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_INVERSE_TRANSFORM);
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_INVERSE_TRANSFORM);
|
||||
*P = transform_point(&tfm, *P);
|
||||
#endif
|
||||
}
|
||||
@@ -93,7 +107,7 @@ __device_inline void object_inverse_normal_transform(KernelGlobals *kg, ShaderDa
|
||||
#ifdef __OBJECT_MOTION__
|
||||
*N = normalize(transform_direction_transposed(&sd->ob_tfm, *N));
|
||||
#else
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_TRANSFORM);
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_TRANSFORM);
|
||||
*N = normalize(transform_direction_transposed(&tfm, *N));
|
||||
#endif
|
||||
}
|
||||
@@ -103,7 +117,7 @@ __device_inline void object_normal_transform(KernelGlobals *kg, ShaderData *sd,
|
||||
#ifdef __OBJECT_MOTION__
|
||||
*N = normalize(transform_direction_transposed(&sd->ob_itfm, *N));
|
||||
#else
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_INVERSE_TRANSFORM);
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_INVERSE_TRANSFORM);
|
||||
*N = normalize(transform_direction_transposed(&tfm, *N));
|
||||
#endif
|
||||
}
|
||||
@@ -113,7 +127,7 @@ __device_inline void object_dir_transform(KernelGlobals *kg, ShaderData *sd, flo
|
||||
#ifdef __OBJECT_MOTION__
|
||||
*D = transform_direction(&sd->ob_tfm, *D);
|
||||
#else
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, 0.0f, OBJECT_TRANSFORM);
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_TRANSFORM);
|
||||
*D = transform_direction(&tfm, *D);
|
||||
#endif
|
||||
}
|
||||
@@ -123,7 +137,7 @@ __device_inline float3 object_location(KernelGlobals *kg, ShaderData *sd)
|
||||
#ifdef __OBJECT_MOTION__
|
||||
return make_float3(sd->ob_tfm.x.w, sd->ob_tfm.y.w, sd->ob_tfm.z.w);
|
||||
#else
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, 0.0f, OBJECT_TRANSFORM);
|
||||
Transform tfm = object_fetch_transform(kg, sd->object, OBJECT_TRANSFORM);
|
||||
return make_float3(tfm.x.w, tfm.y.w, tfm.z.w);
|
||||
#endif
|
||||
}
|
||||
|
Reference in New Issue
Block a user