2016-07-25 03:03:23 +02:00
|
|
|
/*
|
|
|
|
* Copyright 2011-2016 Blender Foundation
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
Cycles: merge of cycles-x branch, a major update to the renderer
This includes much improved GPU rendering performance, viewport interactivity,
new shadow catcher, revamped sampling settings, subsurface scattering anisotropy,
new GPU volume sampling, improved PMJ sampling pattern, and more.
Some features have also been removed or changed, breaking backwards compatibility.
Including the removal of the OpenCL backend, for which alternatives are under
development.
Release notes and code docs:
https://wiki.blender.org/wiki/Reference/Release_Notes/3.0/Cycles
https://wiki.blender.org/wiki/Source/Render/Cycles
Credits:
* Sergey Sharybin
* Brecht Van Lommel
* Patrick Mours (OptiX backend)
* Christophe Hery (subsurface scattering anisotropy)
* William Leeson (PMJ sampling pattern)
* Alaska (various fixes and tweaks)
* Thomas Dinges (various fixes)
For the full commit history, see the cycles-x branch. This squashes together
all the changes since intermediate changes would often fail building or tests.
Ref T87839, T87837, T87836
Fixes T90734, T89353, T80267, T80267, T77185, T69800
2021-09-20 17:59:20 +02:00
|
|
|
#pragma once
|
|
|
|
|
2016-07-25 03:03:23 +02:00
|
|
|
CCL_NAMESPACE_BEGIN
|
|
|
|
|
|
|
|
ccl_device ShaderClosure *closure_alloc(ShaderData *sd, int size, ClosureType type, float3 weight)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
kernel_assert(size <= sizeof(ShaderClosure));
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
if (sd->num_closure_left == 0)
|
|
|
|
return NULL;
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
ShaderClosure *sc = &sd->closure[sd->num_closure];
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
sc->type = type;
|
|
|
|
sc->weight = weight;
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
sd->num_closure++;
|
|
|
|
sd->num_closure_left--;
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
return sc;
|
2016-07-25 03:03:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ccl_device ccl_addr_space void *closure_alloc_extra(ShaderData *sd, int size)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Allocate extra space for closure that need more parameters. We allocate
|
|
|
|
* in chunks of sizeof(ShaderClosure) starting from the end of the closure
|
|
|
|
* array.
|
|
|
|
*
|
|
|
|
* This lets us keep the same fast array iteration over closures, as we
|
|
|
|
* found linked list iteration and iteration with skipping to be slower. */
|
|
|
|
int num_extra = ((size + sizeof(ShaderClosure) - 1) / sizeof(ShaderClosure));
|
|
|
|
|
|
|
|
if (num_extra > sd->num_closure_left) {
|
|
|
|
/* Remove previous closure if it was allocated. */
|
|
|
|
sd->num_closure--;
|
|
|
|
sd->num_closure_left++;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sd->num_closure_left -= num_extra;
|
|
|
|
return (ccl_addr_space void *)(sd->closure + sd->num_closure + sd->num_closure_left);
|
2016-07-25 03:03:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ccl_device_inline ShaderClosure *bsdf_alloc(ShaderData *sd, int size, float3 weight)
|
|
|
|
{
|
2021-05-11 20:36:15 +02:00
|
|
|
kernel_assert(isfinite3_safe(weight));
|
|
|
|
|
2021-05-11 17:53:09 +02:00
|
|
|
const float sample_weight = fabsf(average(weight));
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2021-05-11 17:53:09 +02:00
|
|
|
/* Use comparison this way to help dealing with non-finite weight: if the average is not finite
|
|
|
|
* we will not allocate new closure. */
|
|
|
|
if (sample_weight >= CLOSURE_WEIGHT_CUTOFF) {
|
|
|
|
ShaderClosure *sc = closure_alloc(sd, size, CLOSURE_NONE_ID, weight);
|
|
|
|
if (sc == NULL) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
sc->sample_weight = sample_weight;
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2021-05-11 17:53:09 +02:00
|
|
|
return sc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2016-07-25 03:03:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __OSL__
|
2019-04-17 06:17:24 +02:00
|
|
|
ccl_device_inline ShaderClosure *bsdf_alloc_osl(ShaderData *sd,
|
|
|
|
int size,
|
|
|
|
float3 weight,
|
|
|
|
void *data)
|
2016-07-25 03:03:23 +02:00
|
|
|
{
|
2021-05-11 20:36:15 +02:00
|
|
|
kernel_assert(isfinite3_safe(weight));
|
|
|
|
|
2021-05-11 17:53:09 +02:00
|
|
|
const float sample_weight = fabsf(average(weight));
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2021-05-11 17:53:09 +02:00
|
|
|
/* Use comparison this way to help dealing with non-finite weight: if the average is not finite
|
|
|
|
* we will not allocate new closure. */
|
|
|
|
if (sample_weight >= CLOSURE_WEIGHT_CUTOFF) {
|
|
|
|
ShaderClosure *sc = closure_alloc(sd, size, CLOSURE_NONE_ID, weight);
|
|
|
|
if (!sc) {
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2021-05-11 17:53:09 +02:00
|
|
|
memcpy((void *)sc, data, size);
|
2016-07-25 03:03:23 +02:00
|
|
|
|
2021-05-11 17:53:09 +02:00
|
|
|
sc->weight = weight;
|
|
|
|
sc->sample_weight = sample_weight;
|
|
|
|
|
|
|
|
return sc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
2016-07-25 03:03:23 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
CCL_NAMESPACE_END
|