Cycles: Subsurface Scattering

New features:

* Bump mapping now works with SSS
* Texture Blur factor for SSS, see the documentation for details:
http://wiki.blender.org/index.php/Doc:2.6/Manual/Render/Cycles/Nodes/Shaders#Subsurface_Scattering

Work in progress for feedback:

Initial implementation of the "BSSRDF Importance Sampling" paper, which uses
a different importance sampling method. It gives better quality results in
many ways, with the availability of both Cubic and Gaussian falloff functions,
but also tends to be more noisy when using the progressive integrator and does
not give great results with some geometry. It works quite well for the
non-progressive integrator and is often less noisy there.

This code may still change a lot, so unless you're testing it may be best to
stick to the Compatible falloff function.

Skin test render and file that takes advantage of the gaussian falloff:
http://www.pasteall.org/pic/show.php?id=57661
http://www.pasteall.org/pic/show.php?id=57662
http://www.pasteall.org/blend/23501
This commit is contained in:
Brecht Van Lommel
2013-08-18 14:15:57 +00:00
parent a2541508ac
commit d43682d51b
36 changed files with 1386 additions and 431 deletions

View File

@@ -100,11 +100,11 @@ __device_inline void path_state_next(KernelGlobals *kg, PathState *state, int la
/* diffuse/glossy/singular */
if(label & LABEL_DIFFUSE) {
state->flag |= PATH_RAY_DIFFUSE;
state->flag |= PATH_RAY_DIFFUSE|PATH_RAY_DIFFUSE_ANCESTOR;
state->flag &= ~(PATH_RAY_GLOSSY|PATH_RAY_SINGULAR|PATH_RAY_MIS_SKIP);
}
else if(label & LABEL_GLOSSY) {
state->flag |= PATH_RAY_GLOSSY;
state->flag |= PATH_RAY_GLOSSY|PATH_RAY_GLOSSY_ANCESTOR;
state->flag &= ~(PATH_RAY_DIFFUSE|PATH_RAY_SINGULAR|PATH_RAY_MIS_SKIP);
}
else {
@@ -117,7 +117,7 @@ __device_inline void path_state_next(KernelGlobals *kg, PathState *state, int la
__device_inline uint path_state_ray_visibility(KernelGlobals *kg, PathState *state)
{
uint flag = state->flag;
uint flag = state->flag & PATH_RAY_ALL_VISIBILITY;
/* for visibility, diffuse/glossy are for reflection only */
if(flag & PATH_RAY_TRANSMIT)
@@ -404,7 +404,15 @@ __device float4 kernel_path_progressive(KernelGlobals *kg, RNG *rng, int sample,
/* do bssrdf scatter step if we picked a bssrdf closure */
if(sc) {
uint lcg_state = lcg_init(*rng + rng_offset + sample*0x68bc21eb);
subsurface_scatter_step(kg, &sd, state.flag, sc, &lcg_state, false);
if(old_subsurface_scatter_use(&sd)) {
old_subsurface_scatter_step(kg, &sd, state.flag, sc, &lcg_state, false);
}
else {
float bssrdf_u, bssrdf_v;
path_rng_2D(kg, rng, sample, num_samples, rng_offset + PRNG_BSDF_U, &bssrdf_u, &bssrdf_v);
subsurface_scatter_step(kg, &sd, state.flag, sc, &lcg_state, bssrdf_u, bssrdf_v, false);
}
}
}
#endif
@@ -646,7 +654,15 @@ __device void kernel_path_indirect(KernelGlobals *kg, RNG *rng, int sample, Ray
/* do bssrdf scatter step if we picked a bssrdf closure */
if(sc) {
uint lcg_state = lcg_init(*rng + rng_offset + sample*0x68bc21eb);
subsurface_scatter_step(kg, &sd, state.flag, sc, &lcg_state, false);
if(old_subsurface_scatter_use(&sd)) {
old_subsurface_scatter_step(kg, &sd, state.flag, sc, &lcg_state, false);
}
else {
float bssrdf_u, bssrdf_v;
path_rng_2D(kg, rng, sample, num_total_samples, rng_offset + PRNG_BSDF_U, &bssrdf_u, &bssrdf_v);
subsurface_scatter_step(kg, &sd, state.flag, sc, &lcg_state, bssrdf_u, bssrdf_v, false);
}
}
}
#endif
@@ -1090,17 +1106,32 @@ __device float4 kernel_path_non_progressive(KernelGlobals *kg, RNG *rng, int sam
uint lcg_state = lcg_init(*rng + rng_offset + sample*0x68bc21eb);
int num_samples = kernel_data.integrator.subsurface_samples;
float num_samples_inv = 1.0f/num_samples;
RNG bssrdf_rng = cmj_hash(*rng, i);
/* do subsurface scatter step with copy of shader data, this will
* replace the BSSRDF with a diffuse BSDF closure */
for(int j = 0; j < num_samples; j++) {
ShaderData bssrdf_sd = sd;
subsurface_scatter_step(kg, &bssrdf_sd, state.flag, sc, &lcg_state, true);
if(old_subsurface_scatter_use(&sd)) {
ShaderData bssrdf_sd = sd;
old_subsurface_scatter_step(kg, &bssrdf_sd, state.flag, sc, &lcg_state, true);
/* compute lighting with the BSDF closure */
kernel_path_non_progressive_lighting(kg, rng, sample*num_samples + j,
&bssrdf_sd, throughput, num_samples_inv,
ray_pdf, ray_pdf, state, rng_offset, &L, buffer);
/* compute lighting with the BSDF closure */
kernel_path_non_progressive_lighting(kg, rng, sample*num_samples + j,
&bssrdf_sd, throughput, num_samples_inv,
ray_pdf, ray_pdf, state, rng_offset, &L, buffer);
}
else {
ShaderData bssrdf_sd[BSSRDF_MAX_HITS];
float bssrdf_u, bssrdf_v;
path_rng_2D(kg, &bssrdf_rng, sample*num_samples + j, aa_samples*num_samples, rng_offset + PRNG_BSDF_U, &bssrdf_u, &bssrdf_v);
int num_hits = subsurface_scatter_multi_step(kg, &sd, bssrdf_sd, state.flag, sc, &lcg_state, bssrdf_u, bssrdf_v, true);
/* compute lighting with the BSDF closure */
for(int hit = 0; hit < num_hits; hit++)
kernel_path_non_progressive_lighting(kg, rng, sample*num_samples + j,
&bssrdf_sd[hit], throughput, num_samples_inv,
ray_pdf, ray_pdf, state, rng_offset, &L, buffer);
}
}
}
}