Cycles: test code for sse 4.1 kernel and alignment for some vector types.

This is mostly work towards enabling the __KERNEL_SSE__ option to start using
SIMD operations for vector math operations. This 4.1 kernel performes about 8%
faster with that option but overall is still slower than without the option.

WITH_CYCLES_OPTIMIZED_KERNEL_SSE41 is the cmake flag for testing this kernel.

Alignment of int3, int4, float3, float4 to 16 bytes seems to give a slight 1-2%
speedup on tested systems with the current kernel already, so is enabled now.
This commit is contained in:
Martijn Berger
2013-11-22 14:16:47 +01:00
committed by Brecht Van Lommel
parent 5feb0d2bfe
commit e3a79258d1
11 changed files with 187 additions and 6 deletions

View File

@@ -17,9 +17,11 @@ if(WIN32 AND MSVC)
if(CMAKE_CL_64) if(CMAKE_CL_64)
set(CYCLES_SSE2_KERNEL_FLAGS "/fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-") set(CYCLES_SSE2_KERNEL_FLAGS "/fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-")
set(CYCLES_SSE3_KERNEL_FLAGS "/fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-") set(CYCLES_SSE3_KERNEL_FLAGS "/fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-")
set(CYCLES_SSE41_KERNEL_FLAGS "/fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-")
else() else()
set(CYCLES_SSE2_KERNEL_FLAGS "/arch:SSE2 /fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-") set(CYCLES_SSE2_KERNEL_FLAGS "/arch:SSE2 /fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-")
set(CYCLES_SSE3_KERNEL_FLAGS "/arch:SSE2 /fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-") set(CYCLES_SSE3_KERNEL_FLAGS "/arch:SSE2 /fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-")
set(CYCLES_SSE41_KERNEL_FLAGS "/arch:SSE2 /fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-")
endif() endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /fp:fast -D_CRT_SECURE_NO_WARNINGS /Gs-")
@@ -29,10 +31,12 @@ if(WIN32 AND MSVC)
elseif(CMAKE_COMPILER_IS_GNUCC) elseif(CMAKE_COMPILER_IS_GNUCC)
set(CYCLES_SSE2_KERNEL_FLAGS "-ffast-math -msse -msse2 -mfpmath=sse") set(CYCLES_SSE2_KERNEL_FLAGS "-ffast-math -msse -msse2 -mfpmath=sse")
set(CYCLES_SSE3_KERNEL_FLAGS "-ffast-math -msse -msse2 -msse3 -mssse3 -mfpmath=sse") set(CYCLES_SSE3_KERNEL_FLAGS "-ffast-math -msse -msse2 -msse3 -mssse3 -mfpmath=sse")
set(CYCLES_SSE41_KERNEL_FLAGS "-ffast-math -msse -msse2 -msse3 -mssse3 -msse4.1 -mfpmath=sse")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math")
elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set(CYCLES_SSE2_KERNEL_FLAGS "-ffast-math -msse -msse2") set(CYCLES_SSE2_KERNEL_FLAGS "-ffast-math -msse -msse2")
set(CYCLES_SSE3_KERNEL_FLAGS "-ffast-math -msse -msse2 -msse3 -mssse3") set(CYCLES_SSE3_KERNEL_FLAGS "-ffast-math -msse -msse2 -msse3 -mssse3")
set(CYCLES_SSE41_KERNEL_FLAGS "-ffast-math -msse -msse2 -msse3 -mssse3 -msse4.1")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -ffast-math")
endif() endif()

View File

@@ -37,6 +37,7 @@ sources = cycles.Glob('bvh/*.cpp') + cycles.Glob('device/*.cpp') + cycles.Glob('
sources.remove(path.join('util', 'util_view.cpp')) sources.remove(path.join('util', 'util_view.cpp'))
sources.remove(path.join('kernel', 'kernel_sse2.cpp')) sources.remove(path.join('kernel', 'kernel_sse2.cpp'))
sources.remove(path.join('kernel', 'kernel_sse3.cpp')) sources.remove(path.join('kernel', 'kernel_sse3.cpp'))
sources.remove(path.join('kernel', 'kernel_sse41.cpp'))
incs = [] incs = []
defs = [] defs = []
@@ -77,21 +78,30 @@ if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc', '
if env['WITH_BF_RAYOPTIMIZATION']: if env['WITH_BF_RAYOPTIMIZATION']:
sse2_cxxflags = Split(env['CXXFLAGS']) sse2_cxxflags = Split(env['CXXFLAGS'])
sse3_cxxflags = Split(env['CXXFLAGS']) sse3_cxxflags = Split(env['CXXFLAGS'])
sse41_cxxflags = Split(env['CXXFLAGS'])
if env['OURPLATFORM'] == 'win32-vc': if env['OURPLATFORM'] == 'win32-vc':
# there is no /arch:SSE3, but intrinsics are available anyway # there is no /arch:SSE3, but intrinsics are available anyway
sse2_cxxflags.append('/arch:SSE /arch:SSE2 -D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split()) sse2_cxxflags.append('/arch:SSE /arch:SSE2 -D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split())
sse3_cxxflags.append('/arch:SSE /arch:SSE2 -D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split()) sse3_cxxflags.append('/arch:SSE /arch:SSE2 -D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split())
sse41_cxxflags.append('/arch:SSE /arch:SSE2 -D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split())
elif env['OURPLATFORM'] == 'win64-vc': elif env['OURPLATFORM'] == 'win64-vc':
sse2_cxxflags.append('-D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split()) sse2_cxxflags.append('-D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split())
sse3_cxxflags.append('-D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split()) sse3_cxxflags.append('-D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split())
sse41_cxxflags.append('-D_CRT_SECURE_NO_WARNINGS /fp:fast /Ox /Gs-'.split())
else: else:
sse2_cxxflags.append('-ffast-math -msse -msse2 -mfpmath=sse'.split()) sse2_cxxflags.append('-ffast-math -msse -msse2 -mfpmath=sse'.split())
sse3_cxxflags.append('-ffast-math -msse -msse2 -msse3 -mssse3 -mfpmath=sse'.split()) sse3_cxxflags.append('-ffast-math -msse -msse2 -msse3 -mssse3 -mfpmath=sse'.split())
sse41_cxxflags.append('-ffast-math -msse -msse2 -msse3 -mssse3 -msse4.1 -mfpmath=sse'.split())
defs.append('WITH_OPTIMIZED_KERNEL') defs.append('WITH_OPTIMIZED_KERNEL')
optim_defs = defs[:] optim_defs = defs[:]
# Disabled sse4+ patchs for now
#cycles_sse41 = cycles.Clone()
#sse41_sources = [path.join('kernel', 'kernel_sse41.cpp')]
#cycles_sse41.BlenderLib('bf_intern_cycles_sse41', sse41_sources, incs, optim_defs, libtype=['intern'], priority=[10], cxx_compileflags=sse41_cxxflags)
cycles_sse3 = cycles.Clone() cycles_sse3 = cycles.Clone()
sse3_sources = [path.join('kernel', 'kernel_sse3.cpp')] sse3_sources = [path.join('kernel', 'kernel_sse3.cpp')]
cycles_sse3.BlenderLib('bf_intern_cycles_sse3', sse3_sources, incs, optim_defs, libtype=['intern'], priority=[10], cxx_compileflags=sse3_cxxflags) cycles_sse3.BlenderLib('bf_intern_cycles_sse3', sse3_sources, incs, optim_defs, libtype=['intern'], priority=[10], cxx_compileflags=sse3_cxxflags)

View File

@@ -13,6 +13,10 @@ set(INC_SYS
${GLEW_INCLUDE_PATH} ${GLEW_INCLUDE_PATH}
) )
if(WITH_CYCLES_OPTIMIZED_KERNEL_SSE41)
add_definitions(-DWITH_CYCLES_OPTIMIZED_KERNEL_SSE41=1)
endif()
set(SRC set(SRC
device.cpp device.cpp
device_cpu.cpp device_cpu.cpp

View File

@@ -58,6 +58,7 @@ public:
/* do now to avoid thread issues */ /* do now to avoid thread issues */
system_cpu_support_sse2(); system_cpu_support_sse2();
system_cpu_support_sse3(); system_cpu_support_sse3();
system_cpu_support_sse41();
} }
~CPUDevice() ~CPUDevice()
@@ -164,6 +165,28 @@ public:
int end_sample = tile.start_sample + tile.num_samples; int end_sample = tile.start_sample + tile.num_samples;
#ifdef WITH_OPTIMIZED_KERNEL #ifdef WITH_OPTIMIZED_KERNEL
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE41
if(system_cpu_support_sse41()) {
for(int sample = start_sample; sample < end_sample; sample++) {
if (task.get_cancel() || task_pool.canceled()) {
if(task.need_finish_queue == false)
break;
}
for(int y = tile.y; y < tile.y + tile.h; y++) {
for(int x = tile.x; x < tile.x + tile.w; x++) {
kernel_cpu_sse41_path_trace(&kg, render_buffer, rng_state,
sample, x, y, tile.offset, tile.stride);
}
}
tile.sample = sample + 1;
task.update_progress(tile);
}
}
else
#endif
if(system_cpu_support_sse3()) { if(system_cpu_support_sse3()) {
for(int sample = start_sample; sample < end_sample; sample++) { for(int sample = start_sample; sample < end_sample; sample++) {
if (task.get_cancel() || task_pool.canceled()) { if (task.get_cancel() || task_pool.canceled()) {
@@ -243,6 +266,15 @@ public:
if(task.rgba_half) { if(task.rgba_half) {
#ifdef WITH_OPTIMIZED_KERNEL #ifdef WITH_OPTIMIZED_KERNEL
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE41
if(system_cpu_support_sse41()) {
for(int y = task.y; y < task.y + task.h; y++)
for(int x = task.x; x < task.x + task.w; x++)
kernel_cpu_sse41_convert_to_half_float(&kernel_globals, (uchar4*)task.rgba_half, (float*)task.buffer,
sample_scale, x, y, task.offset, task.stride);
}
else
#endif
if(system_cpu_support_sse3()) { if(system_cpu_support_sse3()) {
for(int y = task.y; y < task.y + task.h; y++) for(int y = task.y; y < task.y + task.h; y++)
for(int x = task.x; x < task.x + task.w; x++) for(int x = task.x; x < task.x + task.w; x++)
@@ -266,6 +298,14 @@ public:
} }
else { else {
#ifdef WITH_OPTIMIZED_KERNEL #ifdef WITH_OPTIMIZED_KERNEL
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE41
if(system_cpu_support_sse41()) {
for(int y = task.y; y < task.y + task.h; y++)
for(int x = task.x; x < task.x + task.w; x++)
kernel_cpu_sse41_convert_to_byte(&kernel_globals, (uchar4*)task.rgba_byte, (float*)task.buffer,
sample_scale, x, y, task.offset, task.stride);
}
#endif
if(system_cpu_support_sse3()) { if(system_cpu_support_sse3()) {
for(int y = task.y; y < task.y + task.h; y++) for(int y = task.y; y < task.y + task.h; y++)
for(int x = task.x; x < task.x + task.w; x++) for(int x = task.x; x < task.x + task.w; x++)
@@ -298,6 +338,16 @@ public:
#endif #endif
#ifdef WITH_OPTIMIZED_KERNEL #ifdef WITH_OPTIMIZED_KERNEL
#ifdef WITH_CYCLES_OPTIMIZED_KERNEL_SSE41
if(system_cpu_support_sse41()) {
for(int x = task.shader_x; x < task.shader_x + task.shader_w; x++) {
kernel_cpu_sse41_shader(&kg, (uint4*)task.shader_input, (float4*)task.shader_output, task.shader_eval_type, x);
if(task_pool.canceled())
break;
}
}
#endif
if(system_cpu_support_sse3()) { if(system_cpu_support_sse3()) {
for(int x = task.shader_x; x < task.shader_x + task.shader_w; x++) { for(int x = task.shader_x; x < task.shader_x + task.shader_w; x++) {
kernel_cpu_sse3_shader(&kg, (uint4*)task.shader_input, (float4*)task.shader_output, task.shader_eval_type, x); kernel_cpu_sse3_shader(&kg, (uint4*)task.shader_input, (float4*)task.shader_output, task.shader_eval_type, x);

View File

@@ -190,13 +190,18 @@ endif()
include_directories(${INC}) include_directories(${INC})
include_directories(SYSTEM ${INC_SYS}) include_directories(SYSTEM ${INC_SYS})
add_library(cycles_kernel ${SRC} ${SRC_HEADERS} ${SRC_CLOSURE_HEADERS} ${SRC_SVM_HEADERS})
if(WITH_CYCLES_OPTIMIZED_KERNEL) if(WITH_CYCLES_OPTIMIZED_KERNEL)
set_source_files_properties(kernel_sse2.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE2_KERNEL_FLAGS}") set_source_files_properties(kernel_sse2.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE2_KERNEL_FLAGS}")
set_source_files_properties(kernel_sse3.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE3_KERNEL_FLAGS}") set_source_files_properties(kernel_sse3.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE3_KERNEL_FLAGS}")
endif() endif()
if(WITH_CYCLES_OPTIMIZED_KERNEL_SSE41)
set_source_files_properties(kernel_sse41.cpp PROPERTIES COMPILE_FLAGS "${CYCLES_SSE41_KERNEL_FLAGS}")
list(APPEND SRC kernel_sse41.cpp)
endif()
add_library(cycles_kernel ${SRC} ${SRC_HEADERS} ${SRC_CLOSURE_HEADERS} ${SRC_SVM_HEADERS})
if(WITH_CYCLES_CUDA) if(WITH_CYCLES_CUDA)
add_dependencies(cycles_kernel cycles_kernel_cuda) add_dependencies(cycles_kernel cycles_kernel_cuda)
endif() endif()

View File

@@ -61,6 +61,15 @@ void kernel_cpu_sse3_convert_to_half_float(KernelGlobals *kg, uchar4 *rgba, floa
float sample_scale, int x, int y, int offset, int stride); float sample_scale, int x, int y, int offset, int stride);
void kernel_cpu_sse3_shader(KernelGlobals *kg, uint4 *input, float4 *output, void kernel_cpu_sse3_shader(KernelGlobals *kg, uint4 *input, float4 *output,
int type, int i); int type, int i);
void kernel_cpu_sse41_path_trace(KernelGlobals *kg, float *buffer, unsigned int *rng_state,
int sample, int x, int y, int offset, int stride);
void kernel_cpu_sse41_convert_to_byte(KernelGlobals *kg, uchar4 *rgba, float *buffer,
float sample_scale, int x, int y, int offset, int stride);
void kernel_cpu_sse41_convert_to_half_float(KernelGlobals *kg, uchar4 *rgba, float *buffer,
float sample_scale, int x, int y, int offset, int stride);
void kernel_cpu_sse41_shader(KernelGlobals *kg, uint4 *input, float4 *output,
int type, int i);
#endif #endif
CCL_NAMESPACE_END CCL_NAMESPACE_END

View File

@@ -0,0 +1,76 @@
/*
* Copyright 2011-2013 Blender Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
/* Optimized CPU kernel entry points. This file is compiled with SSE3/SSSE3
* optimization flags and nearly all functions inlined, while kernel.cpp
* is compiled without for other CPU's. */
#ifdef WITH_OPTIMIZED_KERNEL
/* SSE optimization disabled for now on 32 bit, see bug #36316 */
#if !(defined(__GNUC__) && (defined(i386) || defined(_M_IX86)))
#define __KERNEL_SSE2__
#define __KERNEL_SSE3__
#define __KERNEL_SSSE3__
#define __KERNEL_SSE41__
#endif
#include "kernel.h"
#include "kernel_compat_cpu.h"
#include "kernel_math.h"
#include "kernel_types.h"
#include "kernel_globals.h"
#include "kernel_film.h"
#include "kernel_path.h"
#include "kernel_displace.h"
CCL_NAMESPACE_BEGIN
/* Path Tracing */
void kernel_cpu_sse41_path_trace(KernelGlobals *kg, float *buffer, unsigned int *rng_state, int sample, int x, int y, int offset, int stride)
{
#ifdef __BRANCHED_PATH__
if(kernel_data.integrator.branched)
kernel_branched_path_trace(kg, buffer, rng_state, sample, x, y, offset, stride);
else
#endif
kernel_path_trace(kg, buffer, rng_state, sample, x, y, offset, stride);
}
/* Film */
void kernel_cpu_sse41_convert_to_byte(KernelGlobals *kg, uchar4 *rgba, float *buffer, float sample_scale, int x, int y, int offset, int stride)
{
kernel_film_convert_to_byte(kg, rgba, buffer, sample_scale, x, y, offset, stride);
}
void kernel_cpu_sse41_convert_to_half_float(KernelGlobals *kg, uchar4 *rgba, float *buffer, float sample_scale, int x, int y, int offset, int stride)
{
kernel_film_convert_to_half_float(kg, rgba, buffer, sample_scale, x, y, offset, stride);
}
/* Shader Evaluate */
void kernel_cpu_sse41_shader(KernelGlobals *kg, uint4 *input, float4 *output, int type, int i)
{
kernel_shader_evaluate(kg, input, output, (ShaderEvalType)type, i);
}
CCL_NAMESPACE_END
#endif

View File

@@ -462,7 +462,11 @@ ccl_device_inline float3 operator/=(float3& a, float f)
ccl_device_inline float dot(const float3 a, const float3 b) ccl_device_inline float dot(const float3 a, const float3 b)
{ {
#if defined(__KERNEL_SSE41__) && defined(__KERNEL_SSE__)
return _mm_cvtss_f32(_mm_dp_ps(a, b, 0x7F));
#else
return a.x*b.x + a.y*b.y + a.z*b.z; return a.x*b.x + a.y*b.y + a.z*b.z;
#endif
} }
ccl_device_inline float3 cross(const float3 a, const float3 b) ccl_device_inline float3 cross(const float3 a, const float3 b)
@@ -475,7 +479,11 @@ ccl_device_inline float3 cross(const float3 a, const float3 b)
ccl_device_inline float len(const float3 a) ccl_device_inline float len(const float3 a)
{ {
#if defined(__KERNEL_SSE41__) && defined(__KERNEL_SSE__)
return _mm_cvtss_f32(_mm_sqrt_ss(_mm_dp_ps(a.m128, a.m128, 0x7F)));
#else
return sqrtf(dot(a, a)); return sqrtf(dot(a, a));
#endif
} }
ccl_device_inline float len_squared(const float3 a) ccl_device_inline float len_squared(const float3 a)
@@ -487,7 +495,12 @@ ccl_device_inline float len_squared(const float3 a)
ccl_device_inline float3 normalize(const float3 a) ccl_device_inline float3 normalize(const float3 a)
{ {
#if defined(__KERNEL_SSE41__) && defined(__KERNEL_SSE__)
__m128 norm = _mm_sqrt_ps(_mm_dp_ps(a.m128, a.m128, 0x7F));
return _mm_div_ps(a.m128, norm);
#else
return a/len(a); return a/len(a);
#endif
} }
#endif #endif

View File

@@ -195,6 +195,11 @@ bool system_cpu_support_sse3()
return caps.sse && caps.sse2 && caps.sse3 && caps.ssse3; return caps.sse && caps.sse2 && caps.sse3 && caps.ssse3;
} }
bool system_cpu_support_sse41()
{
CPUCapabilities& caps = system_cpu_capabilities();
return caps.sse && caps.sse2 && caps.sse3 && caps.ssse3 && caps.sse41;
}
#else #else
bool system_cpu_support_sse2() bool system_cpu_support_sse2()

View File

@@ -26,6 +26,7 @@ string system_cpu_brand_string();
int system_cpu_bits(); int system_cpu_bits();
bool system_cpu_support_sse2(); bool system_cpu_support_sse2();
bool system_cpu_support_sse3(); bool system_cpu_support_sse3();
bool system_cpu_support_sse41();
CCL_NAMESPACE_END CCL_NAMESPACE_END

View File

@@ -95,6 +95,10 @@
#include <tmmintrin.h> /* SSSE 3 */ #include <tmmintrin.h> /* SSSE 3 */
#endif #endif
#ifdef __KERNEL_SSE41__
#include <smmintrin.h> /* SSE 4.1 */
#endif
#else #else
/* MinGW64 has conflicting declarations for these SSE headers in <windows.h>. /* MinGW64 has conflicting declarations for these SSE headers in <windows.h>.
@@ -199,7 +203,7 @@ struct ccl_align(16) int3 {
__forceinline operator const __m128i&(void) const { return m128; } __forceinline operator const __m128i&(void) const { return m128; }
__forceinline operator __m128i&(void) { return m128; } __forceinline operator __m128i&(void) { return m128; }
#else #else
struct int3 { struct ccl_align(16) int3 {
int x, y, z, w; int x, y, z, w;
#endif #endif
@@ -219,7 +223,7 @@ struct ccl_align(16) int4 {
__forceinline operator const __m128i&(void) const { return m128; } __forceinline operator const __m128i&(void) const { return m128; }
__forceinline operator __m128i&(void) { return m128; } __forceinline operator __m128i&(void) { return m128; }
#else #else
struct int4 { struct ccl_align(16) int4 {
int x, y, z, w; int x, y, z, w;
#endif #endif
@@ -267,7 +271,7 @@ struct ccl_align(16) float3 {
__forceinline operator const __m128&(void) const { return m128; } __forceinline operator const __m128&(void) const { return m128; }
__forceinline operator __m128&(void) { return m128; } __forceinline operator __m128&(void) { return m128; }
#else #else
struct float3 { struct ccl_align(16) float3 {
float x, y, z, w; float x, y, z, w;
#endif #endif
@@ -287,7 +291,7 @@ struct ccl_align(16) float4 {
__forceinline operator const __m128&(void) const { return m128; } __forceinline operator const __m128&(void) const { return m128; }
__forceinline operator __m128&(void) { return m128; } __forceinline operator __m128&(void) { return m128; }
#else #else
struct float4 { struct ccl_align(16) float4 {
float x, y, z, w; float x, y, z, w;
#endif #endif