Cycles Volume Render: add support for overlapping volume objects.
This works pretty much as you would expect, overlapping volume objects gives a more dense volume. What did change is that world volume shaders are now active everywhere, they are no longer excluded inside objects. This may not be desirable and we need to think of better control over this. In some cases you clearly want it to happen, for example if you are rendering a fire in a foggy environment. In other cases like the inside of a house you may not want any fog, but it doesn't seem possible in general for the renderer to automatically determine what is inside or outside of the house. This is implemented using a simple fixed size array of shader/object ID pairs, limited to max 15 overlapping objects. The closures from all shaders are put into a single closure array, exactly the same as if an add shader was used to combine them.
This commit is contained in:
@@ -34,7 +34,6 @@
|
|||||||
#include "kernel_light.h"
|
#include "kernel_light.h"
|
||||||
#include "kernel_emission.h"
|
#include "kernel_emission.h"
|
||||||
#include "kernel_passes.h"
|
#include "kernel_passes.h"
|
||||||
#include "kernel_path_state.h"
|
|
||||||
|
|
||||||
#ifdef __SUBSURFACE__
|
#ifdef __SUBSURFACE__
|
||||||
#include "kernel_subsurface.h"
|
#include "kernel_subsurface.h"
|
||||||
@@ -44,6 +43,7 @@
|
|||||||
#include "kernel_volume.h"
|
#include "kernel_volume.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include "kernel_path_state.h"
|
||||||
#include "kernel_shadow.h"
|
#include "kernel_shadow.h"
|
||||||
|
|
||||||
CCL_NAMESPACE_BEGIN
|
CCL_NAMESPACE_BEGIN
|
||||||
@@ -93,10 +93,10 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg, RNG *rng, int sample, Ra
|
|||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* volume attenuation */
|
/* volume attenuation */
|
||||||
if(state.volume_shader != SHADER_NO_ID) {
|
if(state.volume_stack[0].shader != SHADER_NO_ID) {
|
||||||
Ray segment_ray = ray;
|
Ray segment_ray = ray;
|
||||||
segment_ray.t = (hit)? isect.t: FLT_MAX;
|
segment_ray.t = (hit)? isect.t: FLT_MAX;
|
||||||
throughput *= kernel_volume_get_shadow_attenuation(kg, &state, &segment_ray, state.volume_shader);
|
throughput *= kernel_volume_get_shadow_attenuation(kg, &state, &segment_ray);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -116,7 +116,7 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg, RNG *rng, int sample, Ra
|
|||||||
float rbsdf = path_rng_1D(kg, rng, sample, num_total_samples, rng_offset + PRNG_BSDF);
|
float rbsdf = path_rng_1D(kg, rng, sample, num_total_samples, rng_offset + PRNG_BSDF);
|
||||||
shader_eval_surface(kg, &sd, rbsdf, state.flag, SHADER_CONTEXT_INDIRECT);
|
shader_eval_surface(kg, &sd, rbsdf, state.flag, SHADER_CONTEXT_INDIRECT);
|
||||||
#ifdef __BRANCHED_PATH__
|
#ifdef __BRANCHED_PATH__
|
||||||
shader_merge_closures(kg, &sd);
|
shader_merge_closures(&sd);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* blurring of bsdf after bounces, for rays that have a small likelihood
|
/* blurring of bsdf after bounces, for rays that have a small likelihood
|
||||||
@@ -291,7 +291,7 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg, RNG *rng, int sample, Ra
|
|||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* enter/exit volume */
|
/* enter/exit volume */
|
||||||
if(label & LABEL_TRANSMIT)
|
if(label & LABEL_TRANSMIT)
|
||||||
kernel_volume_enter_exit(kg, &sd, &state.volume_shader);
|
kernel_volume_stack_enter_exit(kg, &sd, state.volume_stack);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
@@ -308,7 +308,7 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg, RNG *rng, int sample, Ra
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* enter/exit volume */
|
/* enter/exit volume */
|
||||||
kernel_volume_enter_exit(kg, &sd, &state.volume_shader);
|
kernel_volume_stack_enter_exit(kg, &sd, state.volume_stack);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
else {
|
else {
|
||||||
@@ -411,7 +411,7 @@ ccl_device_inline bool kernel_path_integrate_lighting(KernelGlobals *kg, RNG *rn
|
|||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* enter/exit volume */
|
/* enter/exit volume */
|
||||||
if(label & LABEL_TRANSMIT)
|
if(label & LABEL_TRANSMIT)
|
||||||
kernel_volume_enter_exit(kg, sd, &state->volume_shader);
|
kernel_volume_stack_enter_exit(kg, sd, state->volume_stack);
|
||||||
#endif
|
#endif
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -429,7 +429,7 @@ ccl_device_inline bool kernel_path_integrate_lighting(KernelGlobals *kg, RNG *rn
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* enter/exit volume */
|
/* enter/exit volume */
|
||||||
kernel_volume_enter_exit(kg, sd, &state->volume_shader);
|
kernel_volume_stack_enter_exit(kg, sd, state->volume_stack);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@@ -515,10 +515,10 @@ ccl_device float4 kernel_path_integrate(KernelGlobals *kg, RNG *rng, int sample,
|
|||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* volume attenuation */
|
/* volume attenuation */
|
||||||
if(state.volume_shader != SHADER_NO_ID) {
|
if(state.volume_stack[0].shader != SHADER_NO_ID) {
|
||||||
Ray segment_ray = ray;
|
Ray segment_ray = ray;
|
||||||
segment_ray.t = (hit)? isect.t: FLT_MAX;
|
segment_ray.t = (hit)? isect.t: FLT_MAX;
|
||||||
throughput *= kernel_volume_get_shadow_attenuation(kg, &state, &segment_ray, state.volume_shader);
|
throughput *= kernel_volume_get_shadow_attenuation(kg, &state, &segment_ray);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -769,7 +769,7 @@ ccl_device float4 kernel_path_integrate(KernelGlobals *kg, RNG *rng, int sample,
|
|||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* enter/exit volume */
|
/* enter/exit volume */
|
||||||
if(label & LABEL_TRANSMIT)
|
if(label & LABEL_TRANSMIT)
|
||||||
kernel_volume_enter_exit(kg, &sd, &state.volume_shader);
|
kernel_volume_stack_enter_exit(kg, &sd, state.volume_stack);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -787,7 +787,7 @@ ccl_device float4 kernel_path_integrate(KernelGlobals *kg, RNG *rng, int sample,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* enter/exit volume */
|
/* enter/exit volume */
|
||||||
kernel_volume_enter_exit(kg, &sd, &state.volume_shader);
|
kernel_volume_stack_enter_exit(kg, &sd, state.volume_stack);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
else {
|
else {
|
||||||
@@ -957,7 +957,7 @@ ccl_device_noinline void kernel_branched_path_integrate_lighting(KernelGlobals *
|
|||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* enter/exit volume */
|
/* enter/exit volume */
|
||||||
if(label & LABEL_TRANSMIT)
|
if(label & LABEL_TRANSMIT)
|
||||||
kernel_volume_enter_exit(kg, sd, &ps.volume_shader);
|
kernel_volume_stack_enter_exit(kg, sd, ps.volume_stack);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
kernel_path_indirect(kg, rng, sample*num_samples + j, bsdf_ray, buffer,
|
kernel_path_indirect(kg, rng, sample*num_samples + j, bsdf_ray, buffer,
|
||||||
@@ -1019,10 +1019,10 @@ ccl_device float4 kernel_branched_path_integrate(KernelGlobals *kg, RNG *rng, in
|
|||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* volume attenuation */
|
/* volume attenuation */
|
||||||
if(state.volume_shader != SHADER_NO_ID) {
|
if(state.volume_stack[0].shader != SHADER_NO_ID) {
|
||||||
Ray segment_ray = ray;
|
Ray segment_ray = ray;
|
||||||
segment_ray.t = (hit)? isect.t: FLT_MAX;
|
segment_ray.t = (hit)? isect.t: FLT_MAX;
|
||||||
throughput *= kernel_volume_get_shadow_attenuation(kg, &state, &segment_ray, state.volume_shader);
|
throughput *= kernel_volume_get_shadow_attenuation(kg, &state, &segment_ray);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -1050,7 +1050,7 @@ ccl_device float4 kernel_branched_path_integrate(KernelGlobals *kg, RNG *rng, in
|
|||||||
ShaderData sd;
|
ShaderData sd;
|
||||||
shader_setup_from_ray(kg, &sd, &isect, &ray, state.bounce);
|
shader_setup_from_ray(kg, &sd, &isect, &ray, state.bounce);
|
||||||
shader_eval_surface(kg, &sd, 0.0f, state.flag, SHADER_CONTEXT_MAIN);
|
shader_eval_surface(kg, &sd, 0.0f, state.flag, SHADER_CONTEXT_MAIN);
|
||||||
shader_merge_closures(kg, &sd);
|
shader_merge_closures(&sd);
|
||||||
|
|
||||||
/* holdout */
|
/* holdout */
|
||||||
#ifdef __HOLDOUT__
|
#ifdef __HOLDOUT__
|
||||||
@@ -1198,7 +1198,7 @@ ccl_device float4 kernel_branched_path_integrate(KernelGlobals *kg, RNG *rng, in
|
|||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* enter/exit volume */
|
/* enter/exit volume */
|
||||||
kernel_volume_enter_exit(kg, &sd, &state.volume_shader);
|
kernel_volume_stack_enter_exit(kg, &sd, state.volume_stack);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -16,20 +16,6 @@
|
|||||||
|
|
||||||
CCL_NAMESPACE_BEGIN
|
CCL_NAMESPACE_BEGIN
|
||||||
|
|
||||||
typedef struct PathState {
|
|
||||||
int flag;
|
|
||||||
int bounce;
|
|
||||||
|
|
||||||
int diffuse_bounce;
|
|
||||||
int glossy_bounce;
|
|
||||||
int transmission_bounce;
|
|
||||||
int transparent_bounce;
|
|
||||||
|
|
||||||
#ifdef __VOLUME__
|
|
||||||
int volume_shader;
|
|
||||||
#endif
|
|
||||||
} PathState;
|
|
||||||
|
|
||||||
ccl_device_inline void path_state_init(KernelGlobals *kg, PathState *state)
|
ccl_device_inline void path_state_init(KernelGlobals *kg, PathState *state)
|
||||||
{
|
{
|
||||||
state->flag = PATH_RAY_CAMERA|PATH_RAY_SINGULAR|PATH_RAY_MIS_SKIP;
|
state->flag = PATH_RAY_CAMERA|PATH_RAY_SINGULAR|PATH_RAY_MIS_SKIP;
|
||||||
@@ -40,8 +26,7 @@ ccl_device_inline void path_state_init(KernelGlobals *kg, PathState *state)
|
|||||||
state->transparent_bounce = 0;
|
state->transparent_bounce = 0;
|
||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* todo: this assumes camera is always in air, need to detect when it isn't */
|
kernel_volume_stack_init(kg, state->volume_stack);
|
||||||
state->volume_shader = kernel_data.background.volume_shader;
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -459,6 +459,41 @@ ccl_device_inline void shader_setup_from_volume(KernelGlobals *kg, ShaderData *s
|
|||||||
sd->ray_dP = ray->dP;
|
sd->ray_dP = ray->dP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Merging */
|
||||||
|
|
||||||
|
#if defined(__BRANCHED_PATH__) || defined(__VOLUME__)
|
||||||
|
ccl_device void shader_merge_closures(ShaderData *sd)
|
||||||
|
{
|
||||||
|
/* merge identical closures, better when we sample a single closure at a time */
|
||||||
|
for(int i = 0; i < sd->num_closure; i++) {
|
||||||
|
ShaderClosure *sci = &sd->closure[i];
|
||||||
|
|
||||||
|
for(int j = i + 1; j < sd->num_closure; j++) {
|
||||||
|
ShaderClosure *scj = &sd->closure[j];
|
||||||
|
|
||||||
|
#ifdef __OSL__
|
||||||
|
if(!sci->prim && !scj->prim && sci->type == scj->type && sci->data0 == scj->data0 && sci->data1 == scj->data1) {
|
||||||
|
#else
|
||||||
|
if(sci->type == scj->type && sci->data0 == scj->data0 && sci->data1 == scj->data1) {
|
||||||
|
#endif
|
||||||
|
sci->weight += scj->weight;
|
||||||
|
sci->sample_weight += scj->sample_weight;
|
||||||
|
|
||||||
|
int size = sd->num_closure - (j+1);
|
||||||
|
if(size > 0) {
|
||||||
|
for(int k = 0; k < size; k++) {
|
||||||
|
scj[k] = scj[k+1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sd->num_closure--;
|
||||||
|
j--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* BSDF */
|
/* BSDF */
|
||||||
|
|
||||||
#ifdef __MULTI_CLOSURE__
|
#ifdef __MULTI_CLOSURE__
|
||||||
@@ -851,9 +886,16 @@ ccl_device float3 shader_holdout_eval(KernelGlobals *kg, ShaderData *sd)
|
|||||||
ccl_device void shader_eval_surface(KernelGlobals *kg, ShaderData *sd,
|
ccl_device void shader_eval_surface(KernelGlobals *kg, ShaderData *sd,
|
||||||
float randb, int path_flag, ShaderContext ctx)
|
float randb, int path_flag, ShaderContext ctx)
|
||||||
{
|
{
|
||||||
|
#ifdef __MULTI_CLOSURE__
|
||||||
|
sd->num_closure = 0;
|
||||||
|
sd->randb_closure = randb;
|
||||||
|
#else
|
||||||
|
sd->closure.type = NBUILTIN_CLOSURES;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __OSL__
|
#ifdef __OSL__
|
||||||
if(kg->osl)
|
if(kg->osl)
|
||||||
OSLShader::eval_surface(kg, sd, randb, path_flag, ctx);
|
OSLShader::eval_surface(kg, sd, path_flag, ctx);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
@@ -871,9 +913,17 @@ ccl_device void shader_eval_surface(KernelGlobals *kg, ShaderData *sd,
|
|||||||
|
|
||||||
ccl_device float3 shader_eval_background(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx)
|
ccl_device float3 shader_eval_background(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx)
|
||||||
{
|
{
|
||||||
|
#ifdef __MULTI_CLOSURE__
|
||||||
|
sd->num_closure = 0;
|
||||||
|
sd->randb_closure = 0.0f;
|
||||||
|
#else
|
||||||
|
sd->closure.type = NBUILTIN_CLOSURES;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef __OSL__
|
#ifdef __OSL__
|
||||||
if (kg->osl)
|
if(kg->osl) {
|
||||||
return OSLShader::eval_background(kg, sd, path_flag, ctx);
|
return OSLShader::eval_background(kg, sd, path_flag, ctx);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -907,6 +957,7 @@ ccl_device float3 shader_eval_background(KernelGlobals *kg, ShaderData *sd, int
|
|||||||
|
|
||||||
/* Volume */
|
/* Volume */
|
||||||
|
|
||||||
|
#ifdef __VOLUME__
|
||||||
ccl_device float3 shader_volume_eval_phase(KernelGlobals *kg, ShaderData *sd,
|
ccl_device float3 shader_volume_eval_phase(KernelGlobals *kg, ShaderData *sd,
|
||||||
float3 omega_in, float3 omega_out)
|
float3 omega_in, float3 omega_out)
|
||||||
{
|
{
|
||||||
@@ -931,22 +982,67 @@ ccl_device float3 shader_volume_eval_phase(KernelGlobals *kg, ShaderData *sd,
|
|||||||
/* Volume Evaluation */
|
/* Volume Evaluation */
|
||||||
|
|
||||||
ccl_device void shader_eval_volume(KernelGlobals *kg, ShaderData *sd,
|
ccl_device void shader_eval_volume(KernelGlobals *kg, ShaderData *sd,
|
||||||
float randb, int path_flag, ShaderContext ctx)
|
VolumeStack *stack, float randb, int path_flag, ShaderContext ctx)
|
||||||
{
|
{
|
||||||
#ifdef __SVM__
|
/* reset closures once at the start, we will be accumulating the closures
|
||||||
#ifdef __OSL__
|
* for all volumes in the stack into a single array of closures */
|
||||||
if (kg->osl)
|
#ifdef __MULTI_CLOSURE__
|
||||||
OSLShader::eval_volume(kg, sd, randb, path_flag, ctx);
|
sd->num_closure = 0;
|
||||||
else
|
sd->randb_closure = randb;
|
||||||
|
#else
|
||||||
|
sd->closure.type = NBUILTIN_CLOSURES;
|
||||||
#endif
|
#endif
|
||||||
svm_eval_nodes(kg, sd, SHADER_TYPE_VOLUME, randb, path_flag);
|
|
||||||
|
for(int i = 0; stack[i].shader != SHADER_NO_ID; i++) {
|
||||||
|
/* setup shaderdata from stack. it's mostly setup already in
|
||||||
|
* shader_setup_from_volume, this switching should be quick */
|
||||||
|
sd->object = stack[i].object;
|
||||||
|
sd->shader = stack[i].shader;
|
||||||
|
|
||||||
|
sd->flag &= ~(SD_SHADER_FLAGS|SD_OBJECT_FLAGS);
|
||||||
|
sd->flag |= kernel_tex_fetch(__shader_flag, (sd->shader & SHADER_MASK)*2);
|
||||||
|
|
||||||
|
if(sd->object != ~0) {
|
||||||
|
sd->flag |= kernel_tex_fetch(__object_flag, sd->object);
|
||||||
|
|
||||||
|
#ifdef __OBJECT_MOTION__
|
||||||
|
/* todo: this is inefficient for motion blur, we should be
|
||||||
|
* caching matrices instead of recomputing them each step */
|
||||||
|
shader_setup_object_transforms(kg, sd, sd->time);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* evaluate shader */
|
||||||
|
#ifdef __SVM__
|
||||||
|
#ifdef __OSL__
|
||||||
|
if(kg->osl) {
|
||||||
|
OSLShader::eval_volume(kg, sd, path_flag, ctx);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
svm_eval_nodes(kg, sd, SHADER_TYPE_VOLUME, randb, path_flag);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* merge closures to avoid exceeding number of closures limit */
|
||||||
|
if(i > 0)
|
||||||
|
shader_merge_closures(sd);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Displacement Evaluation */
|
/* Displacement Evaluation */
|
||||||
|
|
||||||
ccl_device void shader_eval_displacement(KernelGlobals *kg, ShaderData *sd, ShaderContext ctx)
|
ccl_device void shader_eval_displacement(KernelGlobals *kg, ShaderData *sd, ShaderContext ctx)
|
||||||
{
|
{
|
||||||
|
#ifdef __MULTI_CLOSURE__
|
||||||
|
sd->num_closure = 0;
|
||||||
|
sd->randb_closure = 0.0f;
|
||||||
|
#else
|
||||||
|
sd->closure.type = NBUILTIN_CLOSURES;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* this will modify sd->P */
|
/* this will modify sd->P */
|
||||||
#ifdef __SVM__
|
#ifdef __SVM__
|
||||||
#ifdef __OSL__
|
#ifdef __OSL__
|
||||||
@@ -954,7 +1050,9 @@ ccl_device void shader_eval_displacement(KernelGlobals *kg, ShaderData *sd, Shad
|
|||||||
OSLShader::eval_displacement(kg, sd, ctx);
|
OSLShader::eval_displacement(kg, sd, ctx);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
|
{
|
||||||
svm_eval_nodes(kg, sd, SHADER_TYPE_DISPLACEMENT, 0.0f, 0);
|
svm_eval_nodes(kg, sd, SHADER_TYPE_DISPLACEMENT, 0.0f, 0);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -984,40 +1082,5 @@ ccl_device bool shader_transparent_shadow(KernelGlobals *kg, Intersection *isect
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Merging */
|
|
||||||
|
|
||||||
#ifdef __BRANCHED_PATH__
|
|
||||||
ccl_device void shader_merge_closures(KernelGlobals *kg, ShaderData *sd)
|
|
||||||
{
|
|
||||||
/* merge identical closures, better when we sample a single closure at a time */
|
|
||||||
for(int i = 0; i < sd->num_closure; i++) {
|
|
||||||
ShaderClosure *sci = &sd->closure[i];
|
|
||||||
|
|
||||||
for(int j = i + 1; j < sd->num_closure; j++) {
|
|
||||||
ShaderClosure *scj = &sd->closure[j];
|
|
||||||
|
|
||||||
#ifdef __OSL__
|
|
||||||
if(!sci->prim && !scj->prim && sci->type == scj->type && sci->data0 == scj->data0 && sci->data1 == scj->data1) {
|
|
||||||
#else
|
|
||||||
if(sci->type == scj->type && sci->data0 == scj->data0 && sci->data1 == scj->data1) {
|
|
||||||
#endif
|
|
||||||
sci->weight += scj->weight;
|
|
||||||
sci->sample_weight += scj->sample_weight;
|
|
||||||
|
|
||||||
int size = sd->num_closure - (j+1);
|
|
||||||
if(size > 0) {
|
|
||||||
for(int k = 0; k < size; k++) {
|
|
||||||
scj[k] = scj[k+1];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sd->num_closure--;
|
|
||||||
j--;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
CCL_NAMESPACE_END
|
CCL_NAMESPACE_END
|
||||||
|
|
||||||
|
@@ -44,9 +44,8 @@ ccl_device_inline bool shadow_blocked(KernelGlobals *kg, PathState *state, Ray *
|
|||||||
float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
|
float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
|
||||||
float3 Pend = ray->P + ray->D*ray->t;
|
float3 Pend = ray->P + ray->D*ray->t;
|
||||||
int bounce = state->transparent_bounce;
|
int bounce = state->transparent_bounce;
|
||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
int volume_shader = state->volume_shader;
|
PathState ps = *state;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for(;;) {
|
for(;;) {
|
||||||
@@ -74,8 +73,8 @@ ccl_device_inline bool shadow_blocked(KernelGlobals *kg, PathState *state, Ray *
|
|||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* attenuation for last line segment towards light */
|
/* attenuation for last line segment towards light */
|
||||||
if(volume_shader != SHADER_NO_ID)
|
if(ps.volume_stack[0].shader != SHADER_NO_ID)
|
||||||
throughput *= kernel_volume_get_shadow_attenuation(kg, state, ray, volume_shader);
|
throughput *= kernel_volume_get_shadow_attenuation(kg, &ps, ray);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
*shadow *= throughput;
|
*shadow *= throughput;
|
||||||
@@ -87,10 +86,10 @@ ccl_device_inline bool shadow_blocked(KernelGlobals *kg, PathState *state, Ray *
|
|||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* attenuation between last surface and next surface */
|
/* attenuation between last surface and next surface */
|
||||||
if(volume_shader != SHADER_NO_ID) {
|
if(ps.volume_stack[0].shader != SHADER_NO_ID) {
|
||||||
Ray segment_ray = *ray;
|
Ray segment_ray = *ray;
|
||||||
segment_ray.t = isect.t;
|
segment_ray.t = isect.t;
|
||||||
throughput *= kernel_volume_get_shadow_attenuation(kg, state, &segment_ray, volume_shader);
|
throughput *= kernel_volume_get_shadow_attenuation(kg, &ps, &segment_ray);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -111,10 +110,7 @@ ccl_device_inline bool shadow_blocked(KernelGlobals *kg, PathState *state, Ray *
|
|||||||
|
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
/* exit/enter volume */
|
/* exit/enter volume */
|
||||||
if(sd.flag & SD_BACKFACING)
|
kernel_volume_stack_enter_exit(kg, &sd, ps.volume_stack);
|
||||||
volume_shader = kernel_data.background.volume_shader;
|
|
||||||
else
|
|
||||||
volume_shader = (sd.flag & SD_HAS_VOLUME)? sd.shader: SHADER_NO_ID;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bounce++;
|
bounce++;
|
||||||
@@ -122,9 +118,9 @@ ccl_device_inline bool shadow_blocked(KernelGlobals *kg, PathState *state, Ray *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef __VOLUME__
|
#ifdef __VOLUME__
|
||||||
else if(!result && state->volume_shader != SHADER_NO_ID) {
|
else if(!result && state->volume_stack[0].shader != SHADER_NO_ID) {
|
||||||
/* apply attenuation from current volume shader */
|
/* apply attenuation from current volume shader */
|
||||||
*shadow *= kernel_volume_get_shadow_attenuation(kg, state, ray, state->volume_shader);
|
*shadow *= kernel_volume_get_shadow_attenuation(kg, state, ray);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
@@ -48,6 +48,8 @@ CCL_NAMESPACE_BEGIN
|
|||||||
|
|
||||||
#define SHADER_NO_ID -1
|
#define SHADER_NO_ID -1
|
||||||
|
|
||||||
|
#define VOLUME_STACK_SIZE 16
|
||||||
|
|
||||||
/* device capabilities */
|
/* device capabilities */
|
||||||
#ifdef __KERNEL_CPU__
|
#ifdef __KERNEL_CPU__
|
||||||
#define __KERNEL_SHADING__
|
#define __KERNEL_SHADING__
|
||||||
@@ -511,10 +513,14 @@ enum ShaderDataFlag {
|
|||||||
SD_HOMOGENEOUS_VOLUME = 8192, /* has homogeneous volume */
|
SD_HOMOGENEOUS_VOLUME = 8192, /* has homogeneous volume */
|
||||||
SD_HAS_BSSRDF_BUMP = 16384, /* bssrdf normal uses bump */
|
SD_HAS_BSSRDF_BUMP = 16384, /* bssrdf normal uses bump */
|
||||||
|
|
||||||
|
SD_SHADER_FLAGS = (SD_USE_MIS|SD_HAS_TRANSPARENT_SHADOW|SD_HAS_VOLUME|SD_HAS_ONLY_VOLUME|SD_HOMOGENEOUS_VOLUME|SD_HAS_BSSRDF_BUMP),
|
||||||
|
|
||||||
/* object flags */
|
/* object flags */
|
||||||
SD_HOLDOUT_MASK = 32768, /* holdout for camera rays */
|
SD_HOLDOUT_MASK = 32768, /* holdout for camera rays */
|
||||||
SD_OBJECT_MOTION = 65536, /* has object motion blur */
|
SD_OBJECT_MOTION = 65536, /* has object motion blur */
|
||||||
SD_TRANSFORM_APPLIED = 131072 /* vertices have transform applied */
|
SD_TRANSFORM_APPLIED = 131072, /* vertices have transform applied */
|
||||||
|
|
||||||
|
SD_OBJECT_FLAGS = (SD_HOLDOUT_MASK|SD_OBJECT_MOTION|SD_TRANSFORM_APPLIED)
|
||||||
};
|
};
|
||||||
|
|
||||||
struct KernelGlobals;
|
struct KernelGlobals;
|
||||||
@@ -599,6 +605,29 @@ typedef struct ShaderData {
|
|||||||
#endif
|
#endif
|
||||||
} ShaderData;
|
} ShaderData;
|
||||||
|
|
||||||
|
/* Path State */
|
||||||
|
|
||||||
|
#ifdef __VOLUME__
|
||||||
|
typedef struct VolumeStack {
|
||||||
|
int object;
|
||||||
|
int shader;
|
||||||
|
} VolumeStack;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct PathState {
|
||||||
|
int flag;
|
||||||
|
int bounce;
|
||||||
|
|
||||||
|
int diffuse_bounce;
|
||||||
|
int glossy_bounce;
|
||||||
|
int transmission_bounce;
|
||||||
|
int transparent_bounce;
|
||||||
|
|
||||||
|
#ifdef __VOLUME__
|
||||||
|
VolumeStack volume_stack[VOLUME_STACK_SIZE];
|
||||||
|
#endif
|
||||||
|
} PathState;
|
||||||
|
|
||||||
/* Constant Kernel Data
|
/* Constant Kernel Data
|
||||||
*
|
*
|
||||||
* These structs are passed from CPU to various devices, and the struct layout
|
* These structs are passed from CPU to various devices, and the struct layout
|
||||||
|
@@ -64,11 +64,11 @@ ccl_device float3 volume_shader_get_absorption_coefficient(ShaderData *sd)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* evaluate shader to get extinction coefficient at P */
|
/* evaluate shader to get extinction coefficient at P */
|
||||||
ccl_device float3 volume_extinction_sample(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx, float3 P)
|
ccl_device float3 volume_extinction_sample(KernelGlobals *kg, ShaderData *sd, VolumeStack *stack, int path_flag, ShaderContext ctx, float3 P)
|
||||||
{
|
{
|
||||||
sd->P = P;
|
sd->P = P;
|
||||||
|
|
||||||
shader_eval_volume(kg, sd, 0.0f, path_flag, ctx);
|
shader_eval_volume(kg, sd, stack, 0.0f, path_flag, ctx);
|
||||||
|
|
||||||
return volume_shader_get_extinction_coefficient(sd);
|
return volume_shader_get_extinction_coefficient(sd);
|
||||||
}
|
}
|
||||||
@@ -82,10 +82,10 @@ ccl_device float3 volume_color_attenuation(float3 sigma, float t)
|
|||||||
|
|
||||||
/* get the volume attenuation over line segment defined by segment_ray, with the
|
/* get the volume attenuation over line segment defined by segment_ray, with the
|
||||||
* assumption that there are surfaces blocking light between the endpoints */
|
* assumption that there are surfaces blocking light between the endpoints */
|
||||||
ccl_device float3 kernel_volume_get_shadow_attenuation(KernelGlobals *kg, PathState *state, Ray *segment_ray, int shader)
|
ccl_device float3 kernel_volume_get_shadow_attenuation(KernelGlobals *kg, PathState *state, Ray *segment_ray)
|
||||||
{
|
{
|
||||||
ShaderData sd;
|
ShaderData sd;
|
||||||
shader_setup_from_volume(kg, &sd, segment_ray, shader, state->bounce);
|
shader_setup_from_volume(kg, &sd, segment_ray, state->volume_stack[0].shader, state->bounce);
|
||||||
|
|
||||||
/* do we have a volume shader? */
|
/* do we have a volume shader? */
|
||||||
if(!(sd.flag & SD_HAS_VOLUME))
|
if(!(sd.flag & SD_HAS_VOLUME))
|
||||||
@@ -101,7 +101,7 @@ ccl_device float3 kernel_volume_get_shadow_attenuation(KernelGlobals *kg, PathSt
|
|||||||
* the extinction coefficient for the entire line segment */
|
* the extinction coefficient for the entire line segment */
|
||||||
|
|
||||||
/* todo: could this use sigma_t_cache? */
|
/* todo: could this use sigma_t_cache? */
|
||||||
float3 sigma_t = volume_extinction_sample(kg, &sd, path_flag, ctx, segment_ray->P);
|
float3 sigma_t = volume_extinction_sample(kg, &sd, state->volume_stack, path_flag, ctx, segment_ray->P);
|
||||||
|
|
||||||
attenuation = volume_color_attenuation(sigma_t, segment_ray->t);
|
attenuation = volume_color_attenuation(sigma_t, segment_ray->t);
|
||||||
//}
|
//}
|
||||||
@@ -111,13 +111,62 @@ ccl_device float3 kernel_volume_get_shadow_attenuation(KernelGlobals *kg, PathSt
|
|||||||
|
|
||||||
/* Volume Stack */
|
/* Volume Stack */
|
||||||
|
|
||||||
/* todo: this assumes no overlapping volumes, needs to become a stack */
|
ccl_device void kernel_volume_stack_init(KernelGlobals *kg, VolumeStack *stack)
|
||||||
ccl_device void kernel_volume_enter_exit(KernelGlobals *kg, ShaderData *sd, int *volume_shader)
|
|
||||||
{
|
{
|
||||||
if(sd->flag & SD_BACKFACING)
|
/* todo: this assumes camera is always in air, need to detect when it isn't */
|
||||||
*volume_shader = kernel_data.background.volume_shader;
|
if(kernel_data.background.volume_shader == SHADER_NO_ID) {
|
||||||
else
|
stack[0].shader = SHADER_NO_ID;
|
||||||
*volume_shader = (sd->flag & SD_HAS_VOLUME)? sd->shader: SHADER_NO_ID;
|
}
|
||||||
|
else {
|
||||||
|
stack[0].shader = kernel_data.background.volume_shader;
|
||||||
|
stack[0].object = ~0;
|
||||||
|
stack[1].shader = SHADER_NO_ID;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ccl_device void kernel_volume_stack_enter_exit(KernelGlobals *kg, ShaderData *sd, VolumeStack *stack)
|
||||||
|
{
|
||||||
|
/* todo: we should have some way for objects to indicate if they want the
|
||||||
|
* world shader to work inside them. excluding it by default is problematic
|
||||||
|
* because non-volume objects can't be assumed to be closed manifolds */
|
||||||
|
|
||||||
|
if(!(sd->flag & SD_HAS_VOLUME))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if(sd->flag & SD_BACKFACING) {
|
||||||
|
/* exit volume object: remove from stack */
|
||||||
|
for(int i = 0; stack[i].shader != SHADER_NO_ID; i++) {
|
||||||
|
if(stack[i].object == sd->object) {
|
||||||
|
/* shift back next stack entries */
|
||||||
|
do {
|
||||||
|
stack[i] = stack[i+1];
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
while(stack[i].shader != SHADER_NO_ID);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
/* enter volume object: add to stack */
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for(i = 0; stack[i].shader != SHADER_NO_ID; i++) {
|
||||||
|
/* already in the stack? then we have nothing to do */
|
||||||
|
if(stack[i].object == sd->object)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* if we exceed the stack limit, ignore */
|
||||||
|
if(i >= VOLUME_STACK_SIZE-1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* add to the end of the stack */
|
||||||
|
stack[i].shader = sd->shader;
|
||||||
|
stack[i].object = sd->object;
|
||||||
|
stack[i+1].shader = SHADER_NO_ID;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CCL_NAMESPACE_END
|
CCL_NAMESPACE_END
|
||||||
|
@@ -299,7 +299,7 @@ static void flatten_surface_closure_tree(ShaderData *sd, int path_flag,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void OSLShader::eval_surface(KernelGlobals *kg, ShaderData *sd, float randb, int path_flag, ShaderContext ctx)
|
void OSLShader::eval_surface(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx)
|
||||||
{
|
{
|
||||||
/* setup shader globals from shader data */
|
/* setup shader globals from shader data */
|
||||||
OSLThreadData *tdata = kg->osl_tdata;
|
OSLThreadData *tdata = kg->osl_tdata;
|
||||||
@@ -315,9 +315,6 @@ void OSLShader::eval_surface(KernelGlobals *kg, ShaderData *sd, float randb, int
|
|||||||
ss->execute(*octx, *(kg->osl->surface_state[shader]), *globals);
|
ss->execute(*octx, *(kg->osl->surface_state[shader]), *globals);
|
||||||
|
|
||||||
/* flatten closure tree */
|
/* flatten closure tree */
|
||||||
sd->num_closure = 0;
|
|
||||||
sd->randb_closure = randb;
|
|
||||||
|
|
||||||
if (globals->Ci)
|
if (globals->Ci)
|
||||||
flatten_surface_closure_tree(sd, path_flag, globals->Ci);
|
flatten_surface_closure_tree(sd, path_flag, globals->Ci);
|
||||||
}
|
}
|
||||||
@@ -440,7 +437,7 @@ static void flatten_volume_closure_tree(ShaderData *sd,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void OSLShader::eval_volume(KernelGlobals *kg, ShaderData *sd, float randb, int path_flag, ShaderContext ctx)
|
void OSLShader::eval_volume(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx)
|
||||||
{
|
{
|
||||||
/* setup shader globals from shader data */
|
/* setup shader globals from shader data */
|
||||||
OSLThreadData *tdata = kg->osl_tdata;
|
OSLThreadData *tdata = kg->osl_tdata;
|
||||||
@@ -456,9 +453,6 @@ void OSLShader::eval_volume(KernelGlobals *kg, ShaderData *sd, float randb, int
|
|||||||
ss->execute(*octx, *(kg->osl->volume_state[shader]), *globals);
|
ss->execute(*octx, *(kg->osl->volume_state[shader]), *globals);
|
||||||
|
|
||||||
/* flatten closure tree */
|
/* flatten closure tree */
|
||||||
sd->num_closure = 0;
|
|
||||||
sd->randb_closure = randb;
|
|
||||||
|
|
||||||
if (globals->Ci)
|
if (globals->Ci)
|
||||||
flatten_volume_closure_tree(sd, globals->Ci);
|
flatten_volume_closure_tree(sd, globals->Ci);
|
||||||
}
|
}
|
||||||
|
@@ -53,9 +53,9 @@ public:
|
|||||||
static void thread_free(KernelGlobals *kg);
|
static void thread_free(KernelGlobals *kg);
|
||||||
|
|
||||||
/* eval */
|
/* eval */
|
||||||
static void eval_surface(KernelGlobals *kg, ShaderData *sd, float randb, int path_flag, ShaderContext ctx);
|
static void eval_surface(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx);
|
||||||
static float3 eval_background(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx);
|
static float3 eval_background(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx);
|
||||||
static void eval_volume(KernelGlobals *kg, ShaderData *sd, float randb, int path_flag, ShaderContext ctx);
|
static void eval_volume(KernelGlobals *kg, ShaderData *sd, int path_flag, ShaderContext ctx);
|
||||||
static void eval_displacement(KernelGlobals *kg, ShaderData *sd, ShaderContext ctx);
|
static void eval_displacement(KernelGlobals *kg, ShaderData *sd, ShaderContext ctx);
|
||||||
|
|
||||||
/* sample & eval */
|
/* sample & eval */
|
||||||
|
@@ -188,13 +188,6 @@ ccl_device_noinline void svm_eval_nodes(KernelGlobals *kg, ShaderData *sd, Shade
|
|||||||
float closure_weight = 1.0f;
|
float closure_weight = 1.0f;
|
||||||
int offset = sd->shader & SHADER_MASK;
|
int offset = sd->shader & SHADER_MASK;
|
||||||
|
|
||||||
#ifdef __MULTI_CLOSURE__
|
|
||||||
sd->num_closure = 0;
|
|
||||||
sd->randb_closure = randb;
|
|
||||||
#else
|
|
||||||
sd->closure.type = NBUILTIN_CLOSURES;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
while(1) {
|
while(1) {
|
||||||
uint4 node = read_node(kg, &offset);
|
uint4 node = read_node(kg, &offset);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user