Files
blender/intern/cycles/kernel/geom/triangle.h

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

358 lines
13 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: Apache-2.0
* Copyright 2011-2022 Blender Foundation */
/* Triangle Primitive
*
* Basic triangle with 3 vertices is used to represent mesh surfaces. For BVH
* ray intersection we use a precomputed triangle storage to accelerate
* intersection at the cost of more memory usage */
#pragma once
CCL_NAMESPACE_BEGIN
/* Normal on triangle. */
ccl_device_inline float3 triangle_normal(KernelGlobals kg, ccl_private ShaderData *sd)
{
/* load triangle vertices */
2017-02-16 06:24:13 -05:00
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
const float3 v0 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 0);
const float3 v1 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 1);
const float3 v2 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 2);
/* return normal */
2017-02-16 06:24:13 -05:00
if (sd->object_flag & SD_OBJECT_NEGATIVE_SCALE_APPLIED) {
return normalize(cross(v2 - v0, v1 - v0));
}
else {
return normalize(cross(v1 - v0, v2 - v0));
}
}
/* Point and normal on triangle. */
ccl_device_inline void triangle_point_normal(KernelGlobals kg,
int object,
int prim,
float u,
float v,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private float3 *P,
ccl_private float3 *Ng,
ccl_private int *shader)
{
/* load triangle vertices */
Cycles: Reduce memory usage by de-duplicating triangle storage There are several internal changes for this: First idea is to make __tri_verts to behave similar to __tri_storage, meaning, __tri_verts array now contains all vertices of all triangles instead of just mesh vertices. This saves some lookup when reading triangle coordinates in functions like triangle_normal(). In order to make it efficient needed to store global triangle offset somewhere. So no __tri_vindex.w contains a global triangle index which can be used to read triangle vertices. Additionally, the order of vertices in that array is aligned with primitives from BVH. This is needed to keep cache as much coherent as possible for BVH traversal. This causes some extra tricks needed to fill the array in and deal with True Displacement but those trickery is fully required to prevent noticeable slowdown. Next idea was to use this __tri_verts instead of __tri_storage in intersection code. Unfortunately, this is quite tricky to do without noticeable speed loss. Mainly this loss is caused by extra lookup happening to access vertex coordinate. Fortunately, tricks here and there (i,e, some types changes to avoid casts which are not really coming for free) reduces those losses to an acceptable level. So now they are within couple of percent only, On a positive site we've achieved: - Few percent of memory save with triangle-only scenes. Actual save in this case is close to size of all vertices. On a more fine-subdivided scenes this benefit might become more obvious. - Huge memory save of hairy scenes. For example, on koro.blend there is about 20% memory save. Similar figure for bunny.blend. This memory save was the main goal of this commit to move forward with Hair BVH which required more memory per BVH node. So while this sounds exciting, this memory optimization will become invisible by upcoming Hair BVH work. But again on a positive side, we can add an option to NOT use Hair BVH and then we'll have same-ish render times as we've got currently but will have this 20% memory benefit on hairy scenes.
2016-06-10 16:13:50 +02:00
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
float3 v0 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 0);
float3 v1 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 1);
float3 v2 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 2);
/* compute point */
float t = 1.0f - u - v;
*P = (u * v0 + v * v1 + t * v2);
/* get object flags */
int object_flag = kernel_tex_fetch(__object_flag, object);
/* compute normal */
if (object_flag & SD_OBJECT_NEGATIVE_SCALE_APPLIED) {
*Ng = normalize(cross(v2 - v0, v1 - v0));
}
else {
*Ng = normalize(cross(v1 - v0, v2 - v0));
}
/* shader`*/
*shader = kernel_tex_fetch(__tri_shader, prim);
}
/* Triangle vertex locations */
ccl_device_inline void triangle_vertices(KernelGlobals kg, int prim, float3 P[3])
{
Cycles: Reduce memory usage by de-duplicating triangle storage There are several internal changes for this: First idea is to make __tri_verts to behave similar to __tri_storage, meaning, __tri_verts array now contains all vertices of all triangles instead of just mesh vertices. This saves some lookup when reading triangle coordinates in functions like triangle_normal(). In order to make it efficient needed to store global triangle offset somewhere. So no __tri_vindex.w contains a global triangle index which can be used to read triangle vertices. Additionally, the order of vertices in that array is aligned with primitives from BVH. This is needed to keep cache as much coherent as possible for BVH traversal. This causes some extra tricks needed to fill the array in and deal with True Displacement but those trickery is fully required to prevent noticeable slowdown. Next idea was to use this __tri_verts instead of __tri_storage in intersection code. Unfortunately, this is quite tricky to do without noticeable speed loss. Mainly this loss is caused by extra lookup happening to access vertex coordinate. Fortunately, tricks here and there (i,e, some types changes to avoid casts which are not really coming for free) reduces those losses to an acceptable level. So now they are within couple of percent only, On a positive site we've achieved: - Few percent of memory save with triangle-only scenes. Actual save in this case is close to size of all vertices. On a more fine-subdivided scenes this benefit might become more obvious. - Huge memory save of hairy scenes. For example, on koro.blend there is about 20% memory save. Similar figure for bunny.blend. This memory save was the main goal of this commit to move forward with Hair BVH which required more memory per BVH node. So while this sounds exciting, this memory optimization will become invisible by upcoming Hair BVH work. But again on a positive side, we can add an option to NOT use Hair BVH and then we'll have same-ish render times as we've got currently but will have this 20% memory benefit on hairy scenes.
2016-06-10 16:13:50 +02:00
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
P[0] = kernel_tex_fetch(__tri_verts, tri_vindex.w + 0);
P[1] = kernel_tex_fetch(__tri_verts, tri_vindex.w + 1);
P[2] = kernel_tex_fetch(__tri_verts, tri_vindex.w + 2);
}
/* Triangle vertex locations and vertex normals */
ccl_device_inline void triangle_vertices_and_normals(KernelGlobals kg,
2021-06-29 20:13:24 +10:00
int prim,
float3 P[3],
float3 N[3])
{
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
P[0] = kernel_tex_fetch(__tri_verts, tri_vindex.w + 0);
P[1] = kernel_tex_fetch(__tri_verts, tri_vindex.w + 1);
P[2] = kernel_tex_fetch(__tri_verts, tri_vindex.w + 2);
N[0] = kernel_tex_fetch(__tri_vnormal, tri_vindex.x);
N[1] = kernel_tex_fetch(__tri_vnormal, tri_vindex.y);
N[2] = kernel_tex_fetch(__tri_vnormal, tri_vindex.z);
}
/* Interpolate smooth vertex normal from vertices */
ccl_device_inline float3
triangle_smooth_normal(KernelGlobals kg, float3 Ng, int prim, float u, float v)
{
/* load triangle vertices */
Cycles: Reduce memory usage by de-duplicating triangle storage There are several internal changes for this: First idea is to make __tri_verts to behave similar to __tri_storage, meaning, __tri_verts array now contains all vertices of all triangles instead of just mesh vertices. This saves some lookup when reading triangle coordinates in functions like triangle_normal(). In order to make it efficient needed to store global triangle offset somewhere. So no __tri_vindex.w contains a global triangle index which can be used to read triangle vertices. Additionally, the order of vertices in that array is aligned with primitives from BVH. This is needed to keep cache as much coherent as possible for BVH traversal. This causes some extra tricks needed to fill the array in and deal with True Displacement but those trickery is fully required to prevent noticeable slowdown. Next idea was to use this __tri_verts instead of __tri_storage in intersection code. Unfortunately, this is quite tricky to do without noticeable speed loss. Mainly this loss is caused by extra lookup happening to access vertex coordinate. Fortunately, tricks here and there (i,e, some types changes to avoid casts which are not really coming for free) reduces those losses to an acceptable level. So now they are within couple of percent only, On a positive site we've achieved: - Few percent of memory save with triangle-only scenes. Actual save in this case is close to size of all vertices. On a more fine-subdivided scenes this benefit might become more obvious. - Huge memory save of hairy scenes. For example, on koro.blend there is about 20% memory save. Similar figure for bunny.blend. This memory save was the main goal of this commit to move forward with Hair BVH which required more memory per BVH node. So while this sounds exciting, this memory optimization will become invisible by upcoming Hair BVH work. But again on a positive side, we can add an option to NOT use Hair BVH and then we'll have same-ish render times as we've got currently but will have this 20% memory benefit on hairy scenes.
2016-06-10 16:13:50 +02:00
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
float3 n0 = kernel_tex_fetch(__tri_vnormal, tri_vindex.x);
float3 n1 = kernel_tex_fetch(__tri_vnormal, tri_vindex.y);
float3 n2 = kernel_tex_fetch(__tri_vnormal, tri_vindex.z);
float3 N = safe_normalize((1.0f - u - v) * n2 + u * n0 + v * n1);
return is_zero(N) ? Ng : N;
}
ccl_device_inline float3 triangle_smooth_normal_unnormalized(
KernelGlobals kg, ccl_private const ShaderData *sd, float3 Ng, int prim, float u, float v)
{
/* load triangle vertices */
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
float3 n0 = kernel_tex_fetch(__tri_vnormal, tri_vindex.x);
float3 n1 = kernel_tex_fetch(__tri_vnormal, tri_vindex.y);
float3 n2 = kernel_tex_fetch(__tri_vnormal, tri_vindex.z);
/* ensure that the normals are in object space */
if (sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED) {
object_inverse_normal_transform(kg, sd, &n0);
object_inverse_normal_transform(kg, sd, &n1);
object_inverse_normal_transform(kg, sd, &n2);
}
float3 N = (1.0f - u - v) * n2 + u * n0 + v * n1;
return is_zero(N) ? Ng : N;
}
/* Ray differentials on triangle */
ccl_device_inline void triangle_dPdudv(KernelGlobals kg,
2015-05-09 19:34:30 +05:00
int prim,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private float3 *dPdu,
ccl_private float3 *dPdv)
{
/* fetch triangle vertex coordinates */
Cycles: Reduce memory usage by de-duplicating triangle storage There are several internal changes for this: First idea is to make __tri_verts to behave similar to __tri_storage, meaning, __tri_verts array now contains all vertices of all triangles instead of just mesh vertices. This saves some lookup when reading triangle coordinates in functions like triangle_normal(). In order to make it efficient needed to store global triangle offset somewhere. So no __tri_vindex.w contains a global triangle index which can be used to read triangle vertices. Additionally, the order of vertices in that array is aligned with primitives from BVH. This is needed to keep cache as much coherent as possible for BVH traversal. This causes some extra tricks needed to fill the array in and deal with True Displacement but those trickery is fully required to prevent noticeable slowdown. Next idea was to use this __tri_verts instead of __tri_storage in intersection code. Unfortunately, this is quite tricky to do without noticeable speed loss. Mainly this loss is caused by extra lookup happening to access vertex coordinate. Fortunately, tricks here and there (i,e, some types changes to avoid casts which are not really coming for free) reduces those losses to an acceptable level. So now they are within couple of percent only, On a positive site we've achieved: - Few percent of memory save with triangle-only scenes. Actual save in this case is close to size of all vertices. On a more fine-subdivided scenes this benefit might become more obvious. - Huge memory save of hairy scenes. For example, on koro.blend there is about 20% memory save. Similar figure for bunny.blend. This memory save was the main goal of this commit to move forward with Hair BVH which required more memory per BVH node. So while this sounds exciting, this memory optimization will become invisible by upcoming Hair BVH work. But again on a positive side, we can add an option to NOT use Hair BVH and then we'll have same-ish render times as we've got currently but will have this 20% memory benefit on hairy scenes.
2016-06-10 16:13:50 +02:00
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, prim);
const float3 p0 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 0);
const float3 p1 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 1);
const float3 p2 = kernel_tex_fetch(__tri_verts, tri_vindex.w + 2);
/* compute derivatives of P w.r.t. uv */
*dPdu = (p0 - p2);
*dPdv = (p1 - p2);
}
/* Reading attributes on various triangle elements */
ccl_device float triangle_attribute_float(KernelGlobals kg,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private const ShaderData *sd,
const AttributeDescriptor desc,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private float *dx,
ccl_private float *dy)
{
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER)) {
float f0, f1, f2;
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
f0 = kernel_tex_fetch(__attributes_float, desc.offset + tri_vindex.x);
f1 = kernel_tex_fetch(__attributes_float, desc.offset + tri_vindex.y);
f2 = kernel_tex_fetch(__attributes_float, desc.offset + tri_vindex.z);
}
else {
const int tri = desc.offset + sd->prim * 3;
f0 = kernel_tex_fetch(__attributes_float, tri + 0);
f1 = kernel_tex_fetch(__attributes_float, tri + 1);
f2 = kernel_tex_fetch(__attributes_float, tri + 2);
}
#ifdef __RAY_DIFFERENTIALS__
2017-02-16 06:24:13 -05:00
if (dx)
*dx = sd->du.dx * f0 + sd->dv.dx * f1 - (sd->du.dx + sd->dv.dx) * f2;
if (dy)
*dy = sd->du.dy * f0 + sd->dv.dy * f1 - (sd->du.dy + sd->dv.dy) * f2;
#endif
2017-02-16 06:24:13 -05:00
return sd->u * f0 + sd->v * f1 + (1.0f - sd->u - sd->v) * f2;
}
else {
#ifdef __RAY_DIFFERENTIALS__
if (dx)
*dx = 0.0f;
if (dy)
*dy = 0.0f;
#endif
if (desc.element & (ATTR_ELEMENT_FACE | ATTR_ELEMENT_OBJECT | ATTR_ELEMENT_MESH)) {
const int offset = (desc.element == ATTR_ELEMENT_FACE) ? desc.offset + sd->prim :
desc.offset;
return kernel_tex_fetch(__attributes_float, offset);
}
else {
return 0.0f;
}
}
}
ccl_device float2 triangle_attribute_float2(KernelGlobals kg,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private const ShaderData *sd,
const AttributeDescriptor desc,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private float2 *dx,
ccl_private float2 *dy)
{
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER)) {
float2 f0, f1, f2;
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
f0 = kernel_tex_fetch(__attributes_float2, desc.offset + tri_vindex.x);
f1 = kernel_tex_fetch(__attributes_float2, desc.offset + tri_vindex.y);
f2 = kernel_tex_fetch(__attributes_float2, desc.offset + tri_vindex.z);
}
else {
const int tri = desc.offset + sd->prim * 3;
f0 = kernel_tex_fetch(__attributes_float2, tri + 0);
f1 = kernel_tex_fetch(__attributes_float2, tri + 1);
f2 = kernel_tex_fetch(__attributes_float2, tri + 2);
}
#ifdef __RAY_DIFFERENTIALS__
if (dx)
*dx = sd->du.dx * f0 + sd->dv.dx * f1 - (sd->du.dx + sd->dv.dx) * f2;
if (dy)
*dy = sd->du.dy * f0 + sd->dv.dy * f1 - (sd->du.dy + sd->dv.dy) * f2;
#endif
return sd->u * f0 + sd->v * f1 + (1.0f - sd->u - sd->v) * f2;
}
else {
#ifdef __RAY_DIFFERENTIALS__
if (dx)
*dx = make_float2(0.0f, 0.0f);
if (dy)
*dy = make_float2(0.0f, 0.0f);
#endif
if (desc.element & (ATTR_ELEMENT_FACE | ATTR_ELEMENT_OBJECT | ATTR_ELEMENT_MESH)) {
const int offset = (desc.element == ATTR_ELEMENT_FACE) ? desc.offset + sd->prim :
desc.offset;
return kernel_tex_fetch(__attributes_float2, offset);
}
else {
return make_float2(0.0f, 0.0f);
}
}
}
ccl_device float3 triangle_attribute_float3(KernelGlobals kg,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private const ShaderData *sd,
const AttributeDescriptor desc,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private float3 *dx,
ccl_private float3 *dy)
{
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER)) {
float3 f0, f1, f2;
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
f0 = kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.x);
f1 = kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.y);
f2 = kernel_tex_fetch(__attributes_float3, desc.offset + tri_vindex.z);
}
else {
const int tri = desc.offset + sd->prim * 3;
f0 = kernel_tex_fetch(__attributes_float3, tri + 0);
f1 = kernel_tex_fetch(__attributes_float3, tri + 1);
f2 = kernel_tex_fetch(__attributes_float3, tri + 2);
}
#ifdef __RAY_DIFFERENTIALS__
2017-02-16 06:24:13 -05:00
if (dx)
*dx = sd->du.dx * f0 + sd->dv.dx * f1 - (sd->du.dx + sd->dv.dx) * f2;
if (dy)
*dy = sd->du.dy * f0 + sd->dv.dy * f1 - (sd->du.dy + sd->dv.dy) * f2;
#endif
2017-02-16 06:24:13 -05:00
return sd->u * f0 + sd->v * f1 + (1.0f - sd->u - sd->v) * f2;
}
else {
#ifdef __RAY_DIFFERENTIALS__
if (dx)
*dx = make_float3(0.0f, 0.0f, 0.0f);
if (dy)
*dy = make_float3(0.0f, 0.0f, 0.0f);
#endif
if (desc.element & (ATTR_ELEMENT_FACE | ATTR_ELEMENT_OBJECT | ATTR_ELEMENT_MESH)) {
const int offset = (desc.element == ATTR_ELEMENT_FACE) ? desc.offset + sd->prim :
desc.offset;
return kernel_tex_fetch(__attributes_float3, offset);
}
else {
return make_float3(0.0f, 0.0f, 0.0f);
}
}
}
ccl_device float4 triangle_attribute_float4(KernelGlobals kg,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private const ShaderData *sd,
const AttributeDescriptor desc,
Cycles: Kernel address space changes for MSL This is the first of a sequence of changes to support compiling Cycles kernels as MSL (Metal Shading Language) in preparation for a Metal GPU device implementation. MSL requires that all pointer types be declared with explicit address space attributes (device, thread, etc...). There is already precedent for this with Cycles' address space macros (ccl_global, ccl_private, etc...), therefore the first step of MSL-enablement is to apply these consistently. Line-for-line this represents the largest change required to enable MSL. Applying this change first will simplify future patches as well as offering the emergent benefit of enhanced descriptiveness. The vast majority of deltas in this patch fall into one of two cases: - Ensuring ccl_private is specified for thread-local pointer types - Ensuring ccl_global is specified for device-wide pointer types Additionally, the ccl_addr_space qualifier can be removed. Prior to Cycles X, ccl_addr_space was used as a context-dependent address space qualifier, but now it is either redundant (e.g. in struct typedefs), or can be replaced by ccl_global in the case of pointer types. Associated function variants (e.g. lcg_step_float_addrspace) are also redundant. In cases where address space qualifiers are chained with "const", this patch places the address space qualifier first. The rationale for this is that the choice of address space is likely to have the greater impact on runtime performance and overall architecture. The final part of this patch is the addition of a metal/compat.h header. This is partially complete and will be extended in future patches, paving the way for the full Metal implementation. Ref T92212 Reviewed By: brecht Maniphest Tasks: T92212 Differential Revision: https://developer.blender.org/D12864
2021-10-14 13:53:40 +01:00
ccl_private float4 *dx,
ccl_private float4 *dy)
{
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION | ATTR_ELEMENT_CORNER |
ATTR_ELEMENT_CORNER_BYTE)) {
float4 f0, f1, f2;
if (desc.element & (ATTR_ELEMENT_VERTEX | ATTR_ELEMENT_VERTEX_MOTION)) {
const uint4 tri_vindex = kernel_tex_fetch(__tri_vindex, sd->prim);
f0 = kernel_tex_fetch(__attributes_float4, desc.offset + tri_vindex.x);
f1 = kernel_tex_fetch(__attributes_float4, desc.offset + tri_vindex.y);
f2 = kernel_tex_fetch(__attributes_float4, desc.offset + tri_vindex.z);
}
else {
const int tri = desc.offset + sd->prim * 3;
if (desc.element == ATTR_ELEMENT_CORNER) {
f0 = kernel_tex_fetch(__attributes_float4, tri + 0);
f1 = kernel_tex_fetch(__attributes_float4, tri + 1);
f2 = kernel_tex_fetch(__attributes_float4, tri + 2);
}
else {
2020-11-09 17:37:21 +01:00
f0 = color_srgb_to_linear_v4(
color_uchar4_to_float4(kernel_tex_fetch(__attributes_uchar4, tri + 0)));
f1 = color_srgb_to_linear_v4(
color_uchar4_to_float4(kernel_tex_fetch(__attributes_uchar4, tri + 1)));
f2 = color_srgb_to_linear_v4(
color_uchar4_to_float4(kernel_tex_fetch(__attributes_uchar4, tri + 2)));
}
}
#ifdef __RAY_DIFFERENTIALS__
if (dx)
*dx = sd->du.dx * f0 + sd->dv.dx * f1 - (sd->du.dx + sd->dv.dx) * f2;
if (dy)
*dy = sd->du.dy * f0 + sd->dv.dy * f1 - (sd->du.dy + sd->dv.dy) * f2;
#endif
return sd->u * f0 + sd->v * f1 + (1.0f - sd->u - sd->v) * f2;
}
else {
#ifdef __RAY_DIFFERENTIALS__
if (dx)
*dx = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (dy)
*dy = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
#endif
if (desc.element & (ATTR_ELEMENT_FACE | ATTR_ELEMENT_OBJECT | ATTR_ELEMENT_MESH)) {
const int offset = (desc.element == ATTR_ELEMENT_FACE) ? desc.offset + sd->prim :
desc.offset;
return kernel_tex_fetch(__attributes_float4, offset);
}
else {
return make_float4(0.0f, 0.0f, 0.0f, 0.0f);
}
}
}
CCL_NAMESPACE_END