2011-04-27 11:58:34 +00:00
|
|
|
/*
|
2016-09-29 10:11:40 +02:00
|
|
|
* Copyright 2011-2013 Blender Foundation
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
2014-03-29 13:03:45 +01:00
|
|
|
* limitations under the License.
|
2011-04-27 11:58:34 +00:00
|
|
|
*/
|
2012-09-05 17:08:56 +00:00
|
|
|
|
2014-03-29 13:03:48 +01:00
|
|
|
/* Triangle Primitive
|
|
|
|
*
|
2014-12-15 21:18:01 +05:00
|
|
|
* Basic triangle with 3 vertices is used to represent mesh surfaces. For BVH
|
|
|
|
* ray intersection we use a precomputed triangle storage to accelerate
|
|
|
|
* intersection at the cost of more memory usage */
|
2014-03-29 13:03:48 +01:00
|
|
|
|
Cycles: merge of cycles-x branch, a major update to the renderer
This includes much improved GPU rendering performance, viewport interactivity,
new shadow catcher, revamped sampling settings, subsurface scattering anisotropy,
new GPU volume sampling, improved PMJ sampling pattern, and more.
Some features have also been removed or changed, breaking backwards compatibility.
Including the removal of the OpenCL backend, for which alternatives are under
development.
Release notes and code docs:
https://wiki.blender.org/wiki/Reference/Release_Notes/3.0/Cycles
https://wiki.blender.org/wiki/Source/Render/Cycles
Credits:
* Sergey Sharybin
* Brecht Van Lommel
* Patrick Mours (OptiX backend)
* Christophe Hery (subsurface scattering anisotropy)
* William Leeson (PMJ sampling pattern)
* Alaska (various fixes and tweaks)
* Thomas Dinges (various fixes)
For the full commit history, see the cycles-x branch. This squashes together
all the changes since intermediate changes would often fail building or tests.
Ref T87839, T87837, T87836
Fixes T90734, T89353, T80267, T80267, T77185, T69800
2021-09-20 17:59:20 +02:00
|
|
|
#pragma once
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
CCL_NAMESPACE_BEGIN
|
|
|
|
|
2021-06-24 15:56:58 +10:00
|
|
|
/* Normal on triangle. */
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device_inline float3 triangle_normal(KernelGlobals kg, ccl_private ShaderData *sd)
|
2014-06-13 21:27:21 +02:00
|
|
|
{
|
|
|
|
/* load triangle vertices */
|
2017-02-16 06:24:13 -05:00
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
|
2021-02-28 23:23:24 +01:00
|
|
|
const float3 v0 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 0));
|
|
|
|
const float3 v1 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 1));
|
|
|
|
const float3 v2 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 2));
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2014-06-13 21:27:21 +02:00
|
|
|
/* return normal */
|
2017-02-16 06:24:13 -05:00
|
|
|
if (sd->object_flag & SD_OBJECT_NEGATIVE_SCALE_APPLIED) {
|
2014-08-13 16:19:12 +06:00
|
|
|
return normalize(cross(v2 - v0, v1 - v0));
|
2016-12-23 11:31:19 +01:00
|
|
|
}
|
|
|
|
else {
|
2014-08-13 16:19:12 +06:00
|
|
|
return normalize(cross(v1 - v0, v2 - v0));
|
2016-12-23 11:31:19 +01:00
|
|
|
}
|
2014-06-13 21:27:21 +02:00
|
|
|
}
|
|
|
|
|
2021-06-24 15:56:58 +10:00
|
|
|
/* Point and normal on triangle. */
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device_inline void triangle_point_normal(KernelGlobals kg,
|
Cycles: merge of cycles-x branch, a major update to the renderer
This includes much improved GPU rendering performance, viewport interactivity,
new shadow catcher, revamped sampling settings, subsurface scattering anisotropy,
new GPU volume sampling, improved PMJ sampling pattern, and more.
Some features have also been removed or changed, breaking backwards compatibility.
Including the removal of the OpenCL backend, for which alternatives are under
development.
Release notes and code docs:
https://wiki.blender.org/wiki/Reference/Release_Notes/3.0/Cycles
https://wiki.blender.org/wiki/Source/Render/Cycles
Credits:
* Sergey Sharybin
* Brecht Van Lommel
* Patrick Mours (OptiX backend)
* Christophe Hery (subsurface scattering anisotropy)
* William Leeson (PMJ sampling pattern)
* Alaska (various fixes and tweaks)
* Thomas Dinges (various fixes)
For the full commit history, see the cycles-x branch. This squashes together
all the changes since intermediate changes would often fail building or tests.
Ref T87839, T87837, T87836
Fixes T90734, T89353, T80267, T80267, T77185, T69800
2021-09-20 17:59:20 +02:00
|
|
|
int object,
|
|
|
|
int prim,
|
|
|
|
float u,
|
|
|
|
float v,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private float3 *P,
|
|
|
|
ccl_private float3 *Ng,
|
|
|
|
ccl_private int *shader)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
|
|
|
/* load triangle vertices */
|
Cycles: Reduce memory usage by de-duplicating triangle storage
There are several internal changes for this:
First idea is to make __tri_verts to behave similar to __tri_storage,
meaning, __tri_verts array now contains all vertices of all triangles
instead of just mesh vertices. This saves some lookup when reading
triangle coordinates in functions like triangle_normal().
In order to make it efficient needed to store global triangle offset
somewhere. So no __tri_vindex.w contains a global triangle index which
can be used to read triangle vertices.
Additionally, the order of vertices in that array is aligned with
primitives from BVH. This is needed to keep cache as much coherent as
possible for BVH traversal. This causes some extra tricks needed to
fill the array in and deal with True Displacement but those trickery
is fully required to prevent noticeable slowdown.
Next idea was to use this __tri_verts instead of __tri_storage in
intersection code. Unfortunately, this is quite tricky to do without
noticeable speed loss. Mainly this loss is caused by extra lookup
happening to access vertex coordinate.
Fortunately, tricks here and there (i,e, some types changes to avoid
casts which are not really coming for free) reduces those losses to
an acceptable level. So now they are within couple of percent only,
On a positive site we've achieved:
- Few percent of memory save with triangle-only scenes. Actual save
in this case is close to size of all vertices.
On a more fine-subdivided scenes this benefit might become more
obvious.
- Huge memory save of hairy scenes. For example, on koro.blend
there is about 20% memory save. Similar figure for bunny.blend.
This memory save was the main goal of this commit to move forward
with Hair BVH which required more memory per BVH node. So while
this sounds exciting, this memory optimization will become invisible
by upcoming Hair BVH work.
But again on a positive side, we can add an option to NOT use Hair
BVH and then we'll have same-ish render times as we've got currently
but will have this 20% memory benefit on hairy scenes.
2016-06-10 16:13:50 +02:00
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
|
2021-02-28 23:23:24 +01:00
|
|
|
float3 v0 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 0));
|
|
|
|
float3 v1 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 1));
|
|
|
|
float3 v2 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 2));
|
2014-03-29 13:03:45 +01:00
|
|
|
/* compute point */
|
|
|
|
float t = 1.0f - u - v;
|
|
|
|
*P = (u * v0 + v * v1 + t * v2);
|
2016-09-02 21:37:17 -04:00
|
|
|
/* get object flags */
|
|
|
|
int object_flag = kernel_tex_fetch(__object_flag, object);
|
2014-06-13 21:27:21 +02:00
|
|
|
/* compute normal */
|
2016-12-23 11:31:19 +01:00
|
|
|
if (object_flag & SD_OBJECT_NEGATIVE_SCALE_APPLIED) {
|
2014-08-13 16:19:12 +06:00
|
|
|
*Ng = normalize(cross(v2 - v0, v1 - v0));
|
2016-12-23 11:31:19 +01:00
|
|
|
}
|
|
|
|
else {
|
2014-08-13 16:19:12 +06:00
|
|
|
*Ng = normalize(cross(v1 - v0, v2 - v0));
|
2016-12-23 11:31:19 +01:00
|
|
|
}
|
2014-06-13 21:27:21 +02:00
|
|
|
/* shader`*/
|
2014-09-24 13:34:28 +02:00
|
|
|
*shader = kernel_tex_fetch(__tri_shader, prim);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2014-03-29 13:03:48 +01:00
|
|
|
/* Triangle vertex locations */
|
|
|
|
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device_inline void triangle_vertices(KernelGlobals kg, int prim, float3 P[3])
|
2012-11-03 14:32:13 +00:00
|
|
|
{
|
Cycles: Reduce memory usage by de-duplicating triangle storage
There are several internal changes for this:
First idea is to make __tri_verts to behave similar to __tri_storage,
meaning, __tri_verts array now contains all vertices of all triangles
instead of just mesh vertices. This saves some lookup when reading
triangle coordinates in functions like triangle_normal().
In order to make it efficient needed to store global triangle offset
somewhere. So no __tri_vindex.w contains a global triangle index which
can be used to read triangle vertices.
Additionally, the order of vertices in that array is aligned with
primitives from BVH. This is needed to keep cache as much coherent as
possible for BVH traversal. This causes some extra tricks needed to
fill the array in and deal with True Displacement but those trickery
is fully required to prevent noticeable slowdown.
Next idea was to use this __tri_verts instead of __tri_storage in
intersection code. Unfortunately, this is quite tricky to do without
noticeable speed loss. Mainly this loss is caused by extra lookup
happening to access vertex coordinate.
Fortunately, tricks here and there (i,e, some types changes to avoid
casts which are not really coming for free) reduces those losses to
an acceptable level. So now they are within couple of percent only,
On a positive site we've achieved:
- Few percent of memory save with triangle-only scenes. Actual save
in this case is close to size of all vertices.
On a more fine-subdivided scenes this benefit might become more
obvious.
- Huge memory save of hairy scenes. For example, on koro.blend
there is about 20% memory save. Similar figure for bunny.blend.
This memory save was the main goal of this commit to move forward
with Hair BVH which required more memory per BVH node. So while
this sounds exciting, this memory optimization will become invisible
by upcoming Hair BVH work.
But again on a positive side, we can add an option to NOT use Hair
BVH and then we'll have same-ish render times as we've got currently
but will have this 20% memory benefit on hairy scenes.
2016-06-10 16:13:50 +02:00
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
|
2021-02-28 23:23:24 +01:00
|
|
|
P[0] = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 0));
|
|
|
|
P[1] = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 1));
|
|
|
|
P[2] = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 2));
|
2012-11-03 14:32:13 +00:00
|
|
|
}
|
|
|
|
|
2021-06-28 13:54:18 +02:00
|
|
|
/* Triangle vertex locations and vertex normals */
|
|
|
|
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device_inline void triangle_vertices_and_normals(KernelGlobals kg,
|
2021-06-29 20:13:24 +10:00
|
|
|
int prim,
|
|
|
|
float3 P[3],
|
|
|
|
float3 N[3])
|
2021-06-28 13:54:18 +02:00
|
|
|
{
|
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
|
2021-02-28 23:23:24 +01:00
|
|
|
P[0] = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 0));
|
|
|
|
P[1] = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 1));
|
|
|
|
P[2] = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 2));
|
2021-06-28 13:54:18 +02:00
|
|
|
N[0] = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.x));
|
|
|
|
N[1] = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.y));
|
|
|
|
N[2] = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.z));
|
|
|
|
}
|
|
|
|
|
2014-03-29 13:03:48 +01:00
|
|
|
/* Interpolate smooth vertex normal from vertices */
|
|
|
|
|
2017-05-07 14:40:58 +02:00
|
|
|
ccl_device_inline float3
|
2021-10-17 16:10:10 +02:00
|
|
|
triangle_smooth_normal(KernelGlobals kg, float3 Ng, int prim, float u, float v)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
|
|
|
/* load triangle vertices */
|
Cycles: Reduce memory usage by de-duplicating triangle storage
There are several internal changes for this:
First idea is to make __tri_verts to behave similar to __tri_storage,
meaning, __tri_verts array now contains all vertices of all triangles
instead of just mesh vertices. This saves some lookup when reading
triangle coordinates in functions like triangle_normal().
In order to make it efficient needed to store global triangle offset
somewhere. So no __tri_vindex.w contains a global triangle index which
can be used to read triangle vertices.
Additionally, the order of vertices in that array is aligned with
primitives from BVH. This is needed to keep cache as much coherent as
possible for BVH traversal. This causes some extra tricks needed to
fill the array in and deal with True Displacement but those trickery
is fully required to prevent noticeable slowdown.
Next idea was to use this __tri_verts instead of __tri_storage in
intersection code. Unfortunately, this is quite tricky to do without
noticeable speed loss. Mainly this loss is caused by extra lookup
happening to access vertex coordinate.
Fortunately, tricks here and there (i,e, some types changes to avoid
casts which are not really coming for free) reduces those losses to
an acceptable level. So now they are within couple of percent only,
On a positive site we've achieved:
- Few percent of memory save with triangle-only scenes. Actual save
in this case is close to size of all vertices.
On a more fine-subdivided scenes this benefit might become more
obvious.
- Huge memory save of hairy scenes. For example, on koro.blend
there is about 20% memory save. Similar figure for bunny.blend.
This memory save was the main goal of this commit to move forward
with Hair BVH which required more memory per BVH node. So while
this sounds exciting, this memory optimization will become invisible
by upcoming Hair BVH work.
But again on a positive side, we can add an option to NOT use Hair
BVH and then we'll have same-ish render times as we've got currently
but will have this 20% memory benefit on hairy scenes.
2016-06-10 16:13:50 +02:00
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
|
|
|
|
float3 n0 = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.x));
|
|
|
|
float3 n1 = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.y));
|
|
|
|
float3 n2 = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.z));
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2017-05-07 14:40:58 +02:00
|
|
|
float3 N = safe_normalize((1.0f - u - v) * n2 + u * n0 + v * n1);
|
|
|
|
|
|
|
|
return is_zero(N) ? Ng : N;
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device_inline float3 triangle_smooth_normal_unnormalized(
|
|
|
|
KernelGlobals kg, ccl_private const ShaderData *sd, float3 Ng, int prim, float u, float v)
|
2021-08-17 23:24:46 +02:00
|
|
|
{
|
|
|
|
/* load triangle vertices */
|
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
|
|
|
|
float3 n0 = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.x));
|
|
|
|
float3 n1 = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.y));
|
|
|
|
float3 n2 = float4_to_float3(kernel_tex_fetch(__tri_vnormal, tri_vindex.z));
|
|
|
|
|
2021-08-24 01:42:45 +02:00
|
|
|
/* ensure that the normals are in object space */
|
|
|
|
if (sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED) {
|
|
|
|
object_inverse_normal_transform(kg, sd, &n0);
|
|
|
|
object_inverse_normal_transform(kg, sd, &n1);
|
|
|
|
object_inverse_normal_transform(kg, sd, &n2);
|
|
|
|
}
|
|
|
|
|
2021-08-17 23:24:46 +02:00
|
|
|
float3 N = (1.0f - u - v) * n2 + u * n0 + v * n1;
|
|
|
|
|
|
|
|
return is_zero(N) ? Ng : N;
|
|
|
|
}
|
|
|
|
|
2014-03-29 13:03:48 +01:00
|
|
|
/* Ray differentials on triangle */
|
|
|
|
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device_inline void triangle_dPdudv(KernelGlobals kg,
|
Cycles: OpenCL kernel split
This commit contains all the work related on the AMD megakernel split work
which was mainly done by Varun Sundar, George Kyriazis and Lenny Wang, plus
some help from Sergey Sharybin, Martijn Berger, Thomas Dinges and likely
someone else which we're forgetting to mention.
Currently only AMD cards are enabled for the new split kernel, but it is
possible to force split opencl kernel to be used by setting the following
environment variable: CYCLES_OPENCL_SPLIT_KERNEL_TEST=1.
Not all the features are supported yet, and that being said no motion blur,
camera blur, SSS and volumetrics for now. Also transparent shadows are
disabled on AMD device because of some compiler bug.
This kernel is also only implements regular path tracing and supporting
branched one will take a bit. Branched path tracing is exposed to the
interface still, which is a bit misleading and will be hidden there soon.
More feature will be enabled once they're ported to the split kernel and
tested.
Neither regular CPU nor CUDA has any difference, they're generating the
same exact code, which means no regressions/improvements there.
Based on the research paper:
https://research.nvidia.com/sites/default/files/publications/laine2013hpg_paper.pdf
Here's the documentation:
https://docs.google.com/document/d/1LuXW-CV-sVJkQaEGZlMJ86jZ8FmoPfecaMdR-oiWbUY/edit
Design discussion of the patch:
https://developer.blender.org/T44197
Differential Revision: https://developer.blender.org/D1200
2015-05-09 19:34:30 +05:00
|
|
|
int prim,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private float3 *dPdu,
|
|
|
|
ccl_private float3 *dPdv)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
|
|
|
/* fetch triangle vertex coordinates */
|
Cycles: Reduce memory usage by de-duplicating triangle storage
There are several internal changes for this:
First idea is to make __tri_verts to behave similar to __tri_storage,
meaning, __tri_verts array now contains all vertices of all triangles
instead of just mesh vertices. This saves some lookup when reading
triangle coordinates in functions like triangle_normal().
In order to make it efficient needed to store global triangle offset
somewhere. So no __tri_vindex.w contains a global triangle index which
can be used to read triangle vertices.
Additionally, the order of vertices in that array is aligned with
primitives from BVH. This is needed to keep cache as much coherent as
possible for BVH traversal. This causes some extra tricks needed to
fill the array in and deal with True Displacement but those trickery
is fully required to prevent noticeable slowdown.
Next idea was to use this __tri_verts instead of __tri_storage in
intersection code. Unfortunately, this is quite tricky to do without
noticeable speed loss. Mainly this loss is caused by extra lookup
happening to access vertex coordinate.
Fortunately, tricks here and there (i,e, some types changes to avoid
casts which are not really coming for free) reduces those losses to
an acceptable level. So now they are within couple of percent only,
On a positive site we've achieved:
- Few percent of memory save with triangle-only scenes. Actual save
in this case is close to size of all vertices.
On a more fine-subdivided scenes this benefit might become more
obvious.
- Huge memory save of hairy scenes. For example, on koro.blend
there is about 20% memory save. Similar figure for bunny.blend.
This memory save was the main goal of this commit to move forward
with Hair BVH which required more memory per BVH node. So while
this sounds exciting, this memory optimization will become invisible
by upcoming Hair BVH work.
But again on a positive side, we can add an option to NOT use Hair
BVH and then we'll have same-ish render times as we've got currently
but will have this 20% memory benefit on hairy scenes.
2016-06-10 16:13:50 +02:00
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
|
2021-02-28 23:23:24 +01:00
|
|
|
const float3 p0 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 0));
|
|
|
|
const float3 p1 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 1));
|
|
|
|
const float3 p2 = float4_to_float3(kernel_tex_fetch(__tri_verts, tri_vindex.w + 2));
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
/* compute derivatives of P w.r.t. uv */
|
|
|
|
*dPdu = (p0 - p2);
|
|
|
|
*dPdv = (p1 - p2);
|
|
|
|
}
|
|
|
|
|
2014-03-29 13:03:48 +01:00
|
|
|
/* Reading attributes on various triangle elements */
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device float triangle_attribute_float(KernelGlobals kg,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private const ShaderData *sd,
|
Cycles: merge of cycles-x branch, a major update to the renderer
This includes much improved GPU rendering performance, viewport interactivity,
new shadow catcher, revamped sampling settings, subsurface scattering anisotropy,
new GPU volume sampling, improved PMJ sampling pattern, and more.
Some features have also been removed or changed, breaking backwards compatibility.
Including the removal of the OpenCL backend, for which alternatives are under
development.
Release notes and code docs:
https://wiki.blender.org/wiki/Reference/Release_Notes/3.0/Cycles
https://wiki.blender.org/wiki/Source/Render/Cycles
Credits:
* Sergey Sharybin
* Brecht Van Lommel
* Patrick Mours (OptiX backend)
* Christophe Hery (subsurface scattering anisotropy)
* William Leeson (PMJ sampling pattern)
* Alaska (various fixes and tweaks)
* Thomas Dinges (various fixes)
For the full commit history, see the cycles-x branch. This squashes together
all the changes since intermediate changes would often fail building or tests.
Ref T87839, T87837, T87836
Fixes T90734, T89353, T80267, T80267, T77185, T69800
2021-09-20 17:59:20 +02:00
|
|
|
const AttributeDescriptor desc,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private float *dx,
|
|
|
|
ccl_private float *dy)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER)) {
|
|
|
|
float f0, f1, f2;
|
|
|
|
|
|
|
|
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {
|
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
|
|
|
|
f0 = kernel_tex_fetch(__attributes_float, desc.offset + tri_vindex.x);
|
|
|
|
f1 = kernel_tex_fetch(__attributes_float, desc.offset + tri_vindex.y);
|
|
|
|
f2 = kernel_tex_fetch(__attributes_float, desc.offset + tri_vindex.z);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
const int tri = desc.offset + sd->prim * 3;
|
|
|
|
f0 = kernel_tex_fetch(__attributes_float, tri + 0);
|
|
|
|
f1 = kernel_tex_fetch(__attributes_float, tri + 1);
|
|
|
|
f2 = kernel_tex_fetch(__attributes_float, tri + 2);
|
|
|
|
}
|
2013-01-03 12:08:54 +00:00
|
|
|
|
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
2017-02-16 06:24:13 -05:00
|
|
|
if (dx)
|
|
|
|
*dx = sd->du.dx * f0 + sd->dv.dx * f1 - (sd->du.dx + sd->dv.dx) * f2;
|
|
|
|
if (dy)
|
|
|
|
*dy = sd->du.dy * f0 + sd->dv.dy * f1 - (sd->du.dy + sd->dv.dy) * f2;
|
2013-01-03 12:08:54 +00:00
|
|
|
#endif
|
|
|
|
|
2017-02-16 06:24:13 -05:00
|
|
|
return sd->u * f0 + sd->v * f1 + (1.0f - sd->u - sd->v) * f2;
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
else {
|
2020-10-26 19:25:00 +01:00
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
2013-01-03 12:08:54 +00:00
|
|
|
if (dx)
|
|
|
|
*dx = 0.0f;
|
|
|
|
if (dy)
|
|
|
|
*dy = 0.0f;
|
2020-10-26 19:25:00 +01:00
|
|
|
#endif
|
2013-01-03 12:08:54 +00:00
|
|
|
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_FACE | ATTR_ELEMENT_OBJECT | ATTR_ELEMENT_MESH)) {
|
|
|
|
const int offset = (desc.element == ATTR_ELEMENT_FACE) ? desc.offset + sd->prim :
|
|
|
|
desc.offset;
|
|
|
|
return kernel_tex_fetch(__attributes_float, offset);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return 0.0f;
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device float2 triangle_attribute_float2(KernelGlobals kg,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private const ShaderData *sd,
|
2019-03-05 14:54:54 +01:00
|
|
|
const AttributeDescriptor desc,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private float2 *dx,
|
|
|
|
ccl_private float2 *dy)
|
2019-03-05 14:54:54 +01:00
|
|
|
{
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER)) {
|
2019-03-05 14:54:54 +01:00
|
|
|
float2 f0, f1, f2;
|
|
|
|
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {
|
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
|
|
|
|
f0 = kernel_tex_fetch(__attributes_float2, desc.offset + tri_vindex.x);
|
|
|
|
f1 = kernel_tex_fetch(__attributes_float2, desc.offset + tri_vindex.y);
|
|
|
|
f2 = kernel_tex_fetch(__attributes_float2, desc.offset + tri_vindex.z);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
const int tri = desc.offset + sd->prim * 3;
|
2019-03-05 14:54:54 +01:00
|
|
|
f0 = kernel_tex_fetch(__attributes_float2, tri + 0);
|
|
|
|
f1 = kernel_tex_fetch(__attributes_float2, tri + 1);
|
|
|
|
f2 = kernel_tex_fetch(__attributes_float2, tri + 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
|
|
|
if (dx)
|
|
|
|
*dx = sd->du.dx * f0 + sd->dv.dx * f1 - (sd->du.dx + sd->dv.dx) * f2;
|
|
|
|
if (dy)
|
|
|
|
*dy = sd->du.dy * f0 + sd->dv.dy * f1 - (sd->du.dy + sd->dv.dy) * f2;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return sd->u * f0 + sd->v * f1 + (1.0f - sd->u - sd->v) * f2;
|
|
|
|
}
|
|
|
|
else {
|
2020-10-26 19:25:00 +01:00
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
2019-03-05 14:54:54 +01:00
|
|
|
if (dx)
|
|
|
|
*dx = make_float2(0.0f, 0.0f);
|
|
|
|
if (dy)
|
|
|
|
*dy = make_float2(0.0f, 0.0f);
|
2020-10-26 19:25:00 +01:00
|
|
|
#endif
|
2019-03-05 14:54:54 +01:00
|
|
|
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_FACE | ATTR_ELEMENT_OBJECT | ATTR_ELEMENT_MESH)) {
|
|
|
|
const int offset = (desc.element == ATTR_ELEMENT_FACE) ? desc.offset + sd->prim :
|
|
|
|
desc.offset;
|
|
|
|
return kernel_tex_fetch(__attributes_float2, offset);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return make_float2(0.0f, 0.0f);
|
|
|
|
}
|
2019-03-05 14:54:54 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device float3 triangle_attribute_float3(KernelGlobals kg,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private const ShaderData *sd,
|
2016-07-01 17:36:27 -04:00
|
|
|
const AttributeDescriptor desc,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private float3 *dx,
|
|
|
|
ccl_private float3 *dy)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER)) {
|
2014-06-13 23:40:39 +02:00
|
|
|
float3 f0, f1, f2;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {
|
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
|
|
|
|
f0 = float4_to_float3(kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.x));
|
|
|
|
f1 = float4_to_float3(kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.y));
|
|
|
|
f2 = float4_to_float3(kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.z));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
const int tri = desc.offset + sd->prim * 3;
|
|
|
|
f0 = float4_to_float3(kernel_tex_fetch(__attributes_float3, tri + 0));
|
|
|
|
f1 = float4_to_float3(kernel_tex_fetch(__attributes_float3, tri + 1));
|
|
|
|
f2 = float4_to_float3(kernel_tex_fetch(__attributes_float3, tri + 2));
|
|
|
|
}
|
2013-01-03 12:08:54 +00:00
|
|
|
|
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
2017-02-16 06:24:13 -05:00
|
|
|
if (dx)
|
|
|
|
*dx = sd->du.dx * f0 + sd->dv.dx * f1 - (sd->du.dx + sd->dv.dx) * f2;
|
|
|
|
if (dy)
|
|
|
|
*dy = sd->du.dy * f0 + sd->dv.dy * f1 - (sd->du.dy + sd->dv.dy) * f2;
|
2012-12-28 14:21:30 +00:00
|
|
|
#endif
|
2012-04-30 12:49:26 +00:00
|
|
|
|
2017-02-16 06:24:13 -05:00
|
|
|
return sd->u * f0 + sd->v * f1 + (1.0f - sd->u - sd->v) * f2;
|
2012-05-07 10:53:09 +00:00
|
|
|
}
|
|
|
|
else {
|
2020-10-26 19:25:00 +01:00
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
2013-01-03 12:08:54 +00:00
|
|
|
if (dx)
|
|
|
|
*dx = make_float3(0.0f, 0.0f, 0.0f);
|
|
|
|
if (dy)
|
|
|
|
*dy = make_float3(0.0f, 0.0f, 0.0f);
|
2020-10-26 19:25:00 +01:00
|
|
|
#endif
|
2012-04-30 12:49:26 +00:00
|
|
|
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_FACE | ATTR_ELEMENT_OBJECT | ATTR_ELEMENT_MESH)) {
|
|
|
|
const int offset = (desc.element == ATTR_ELEMENT_FACE) ? desc.offset + sd->prim :
|
|
|
|
desc.offset;
|
|
|
|
return float4_to_float3(kernel_tex_fetch(__attributes_float3, offset));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return make_float3(0.0f, 0.0f, 0.0f);
|
|
|
|
}
|
2013-01-03 12:08:54 +00:00
|
|
|
}
|
2012-04-30 12:49:26 +00:00
|
|
|
}
|
|
|
|
|
2021-10-17 16:10:10 +02:00
|
|
|
ccl_device float4 triangle_attribute_float4(KernelGlobals kg,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private const ShaderData *sd,
|
2019-09-12 17:42:13 +02:00
|
|
|
const AttributeDescriptor desc,
|
Cycles: Kernel address space changes for MSL
This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation.
MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness.
The vast majority of deltas in this patch fall into one of two cases:
- Ensuring ccl_private is specified for thread-local pointer types
- Ensuring ccl_global is specified for device-wide pointer types
Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant.
In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture.
The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation.
Ref T92212
Reviewed By: brecht
Maniphest Tasks: T92212
Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
|
|
|
ccl_private float4 *dx,
|
|
|
|
ccl_private float4 *dy)
|
2019-09-12 17:42:13 +02:00
|
|
|
{
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER |
|
|
|
|
ATTR_ELEMENT_CORNER_BYTE)) {
|
2020-06-25 13:48:44 +02:00
|
|
|
float4 f0, f1, f2;
|
|
|
|
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {
|
|
|
|
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
|
|
|
|
f0 = kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.x);
|
|
|
|
f1 = kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.y);
|
|
|
|
f2 = kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.z);
|
2020-06-25 13:48:44 +02:00
|
|
|
}
|
|
|
|
else {
|
2020-10-26 19:25:00 +01:00
|
|
|
const int tri = desc.offset + sd->prim * 3;
|
|
|
|
if (desc.element == ATTR_ELEMENT_CORNER) {
|
|
|
|
f0 = kernel_tex_fetch(__attributes_float3, tri + 0);
|
|
|
|
f1 = kernel_tex_fetch(__attributes_float3, tri + 1);
|
|
|
|
f2 = kernel_tex_fetch(__attributes_float3, tri + 2);
|
|
|
|
}
|
|
|
|
else {
|
2020-11-09 17:37:21 +01:00
|
|
|
f0 = color_srgb_to_linear_v4(
|
|
|
|
color_uchar4_to_float4(kernel_tex_fetch(__attributes_uchar4, tri + 0)));
|
|
|
|
f1 = color_srgb_to_linear_v4(
|
|
|
|
color_uchar4_to_float4(kernel_tex_fetch(__attributes_uchar4, tri + 1)));
|
|
|
|
f2 = color_srgb_to_linear_v4(
|
|
|
|
color_uchar4_to_float4(kernel_tex_fetch(__attributes_uchar4, tri + 2)));
|
2020-10-26 19:25:00 +01:00
|
|
|
}
|
2020-06-25 13:48:44 +02:00
|
|
|
}
|
2019-09-12 17:42:13 +02:00
|
|
|
|
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
|
|
|
if (dx)
|
|
|
|
*dx = sd->du.dx * f0 + sd->dv.dx * f1 - (sd->du.dx + sd->dv.dx) * f2;
|
|
|
|
if (dy)
|
|
|
|
*dy = sd->du.dy * f0 + sd->dv.dy * f1 - (sd->du.dy + sd->dv.dy) * f2;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return sd->u * f0 + sd->v * f1 + (1.0f - sd->u - sd->v) * f2;
|
|
|
|
}
|
|
|
|
else {
|
2020-10-26 19:25:00 +01:00
|
|
|
#ifdef __RAY_DIFFERENTIALS__
|
2019-09-12 17:42:13 +02:00
|
|
|
if (dx)
|
|
|
|
*dx = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
|
|
|
|
if (dy)
|
|
|
|
*dy = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
|
2020-10-26 19:25:00 +01:00
|
|
|
#endif
|
2019-09-12 17:42:13 +02:00
|
|
|
|
2020-10-26 19:25:00 +01:00
|
|
|
if (desc.element & (ATTR_ELEMENT_FACE | ATTR_ELEMENT_OBJECT | ATTR_ELEMENT_MESH)) {
|
|
|
|
const int offset = (desc.element == ATTR_ELEMENT_FACE) ? desc.offset + sd->prim :
|
|
|
|
desc.offset;
|
|
|
|
return kernel_tex_fetch(__attributes_float3, offset);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
|
|
|
|
}
|
2019-09-12 17:42:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
CCL_NAMESPACE_END
|