From acc90b40bff5a15604c4d98692ff3ba32fe44603 Mon Sep 17 00:00:00 2001 From: "Sv. Lockal" Date: Mon, 6 Jan 2014 22:05:31 +0400 Subject: [PATCH] Cycles: Minor optimization (~1%) for texture access on CPU --- intern/cycles/kernel/svm/svm_image.h | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/intern/cycles/kernel/svm/svm_image.h b/intern/cycles/kernel/svm/svm_image.h index 45a2a8b802c..06228cdbea9 100644 --- a/intern/cycles/kernel/svm/svm_image.h +++ b/intern/cycles/kernel/svm/svm_image.h @@ -116,6 +116,9 @@ ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y, #ifdef __KERNEL_CPU__ r = kernel_tex_image_interp(id, x, y); +#ifdef __KERNEL_SSE2__ + __m128 *rv = (__m128 *)&r; +#endif #else /* not particularly proud of this massive switch, what are the * alternatives? @@ -234,6 +237,13 @@ ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y, #endif if(use_alpha && r.w != 1.0f && r.w != 0.0f) { +#ifdef __KERNEL_SSE2__ + float alpha = r.w; + *rv = _mm_div_ps(*rv, _mm_set1_ps(alpha)); + if(id >= TEX_NUM_FLOAT_IMAGES) + *rv = _mm_min_ps(*rv, _mm_set1_ps(1.0f)); + r.w = alpha; +#else float invw = 1.0f/r.w; r.x *= invw; r.y *= invw; @@ -244,12 +254,12 @@ ccl_device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y, r.y = min(r.y, 1.0f); r.z = min(r.z, 1.0f); } +#endif } if(srgb) { #ifdef __KERNEL_SSE2__ float alpha = r.w; - __m128 *rv = (__m128 *)&r; *rv = color_srgb_to_scene_linear(*rv); r.w = alpha; #else