style cleanup: block comments

This commit is contained in:
Campbell Barton
2012-06-09 17:22:52 +00:00
parent 2f60d9b0b9
commit 0fbb6bff27
70 changed files with 551 additions and 536 deletions

View File

@@ -141,7 +141,7 @@ static void blender_camera_from_object(BlenderCamera *bcam, BL::Object b_ob)
bcam->lens = b_camera.lens();
/* allow f/stop number to change aperture_size but still
give manual control over aperture radius */
* give manual control over aperture radius */
int aperture_type = RNA_enum_get(&ccamera, "aperture_type");
if(aperture_type == 1) {
@@ -179,8 +179,8 @@ static Transform blender_camera_matrix(const Transform& tfm, CameraType type)
if(type == CAMERA_PANORAMA) {
/* make it so environment camera needs to be pointed in the direction
of the positive x-axis to match an environment texture, this way
it is looking at the center of the texture */
* of the positive x-axis to match an environment texture, this way
* it is looking at the center of the texture */
result = tfm *
make_transform( 0.0f, -1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,

View File

@@ -68,8 +68,8 @@ static void create_mesh(Scene *scene, Mesh *mesh, BL::Mesh b_mesh, const vector<
}
/* create generated coordinates. todo: we should actually get the orco
coordinates from modifiers, for now we use texspace loc/size which
is available in the api. */
* coordinates from modifiers, for now we use texspace loc/size which
* is available in the api. */
if(mesh->need_attribute(scene, ATTR_STD_GENERATED)) {
Attribute *attr = mesh->attributes.add(ATTR_STD_GENERATED);
float3 loc = get_float3(b_mesh.texspace_location());
@@ -181,8 +181,10 @@ static void create_subd_mesh(Mesh *mesh, BL::Mesh b_mesh, PointerRNA *cmesh, con
if(n == 4)
sdmesh.add_face(vi[0], vi[1], vi[2], vi[3]);
/*else
sdmesh.add_face(vi[0], vi[1], vi[2]);*/
#if 0
else
sdmesh.add_face(vi[0], vi[1], vi[2]);
#endif
}
/* finalize subd mesh */
@@ -232,7 +234,7 @@ Mesh *BlenderSync::sync_mesh(BL::Object b_ob, bool object_updated)
/* if transform was applied to mesh, need full update */
if(object_updated && mesh->transform_applied);
/* test if shaders changed, these can be object level so mesh
does not get tagged for recalc */
* does not get tagged for recalc */
else if(mesh->used_shaders != used_shaders);
else {
/* even if not tagged for recalc, we may need to sync anyway

View File

@@ -264,7 +264,7 @@ void BlenderSync::sync_object(BL::Object b_parent, int b_index, BL::Object b_ob,
}
/* camera flag is not actually used, instead is tested
against render layer flags */
* against render layer flags */
if(object->visibility & PATH_RAY_CAMERA) {
object->visibility |= layer_flag << PATH_RAY_LAYER_SHIFT;
object->visibility &= ~PATH_RAY_CAMERA;

View File

@@ -304,7 +304,7 @@ void BlenderSession::synchronize()
session->set_pause(BlenderSync::get_session_pause(b_scene, background));
/* copy recalc flags, outside of mutex so we can decide to do the real
synchronization at a later time to not block on running updates */
* synchronization at a later time to not block on running updates */
sync->sync_recalc();
/* try to acquire mutex. if we don't want to or can't, come back later */
@@ -334,7 +334,7 @@ void BlenderSession::synchronize()
bool BlenderSession::draw(int w, int h)
{
/* before drawing, we verify camera and viewport size changes, because
we do not get update callbacks for those, we must detect them here */
* we do not get update callbacks for those, we must detect them here */
if(session->ready_to_reset()) {
bool reset = false;
@@ -429,7 +429,7 @@ void BlenderSession::tag_redraw()
{
if(background) {
/* update stats and progress, only for background here because
in 3d view we do it in draw for thread safety reasons */
* in 3d view we do it in draw for thread safety reasons */
update_status_progress();
/* offline render, redraw if timeout passed */

View File

@@ -641,7 +641,7 @@ static void add_nodes(BL::BlendData b_data, BL::Scene b_scene, ShaderGraph *grap
to_pair = sockets_map[b_to_sock.ptr.data];
/* either node may be NULL when the node was not exported, typically
because the node type is not supported */
* because the node type is not supported */
if(from_pair.first && to_pair.first) {
ShaderOutput *output = from_pair.first->output(from_pair.second.c_str());
ShaderInput *input = to_pair.first->input(to_pair.second.c_str());

View File

@@ -63,7 +63,7 @@ BlenderSync::~BlenderSync()
bool BlenderSync::sync_recalc()
{
/* sync recalc flags from blender to cycles. actual update is done separate,
so we can do it later on if doing it immediate is not suitable */
* so we can do it later on if doing it immediate is not suitable */
BL::BlendData::materials_iterator b_mat;

View File

@@ -27,7 +27,7 @@
#include "util_vector.h"
/* Hacks to hook into Blender API
todo: clean this up ... */
* todo: clean this up ... */
extern "C" {
@@ -121,7 +121,7 @@ static inline Transform get_transform(BL::Array<float, 16> array)
Transform tfm;
/* we assume both types to be just 16 floats, and transpose because blender
use column major matrix order while we use row major */
* use column major matrix order while we use row major */
memcpy(&tfm, &array, sizeof(float)*16);
tfm = transform_transpose(tfm);
@@ -164,12 +164,14 @@ static inline uint get_layer(BL::Array<int, 20> array)
return layer;
}
/*static inline float3 get_float3(PointerRNA& ptr, const char *name)
#if 0
static inline float3 get_float3(PointerRNA& ptr, const char *name)
{
float3 f;
RNA_float_get_array(&ptr, name, &f.x);
return f;
}*/
}
#endif
static inline bool get_boolean(PointerRNA& ptr, const char *name)
{

View File

@@ -292,13 +292,13 @@ void BVH::pack_triangles()
void BVH::pack_instances(size_t nodes_size)
{
/* The BVH's for instances are built separately, but for traversal all
BVH's are stored in global arrays. This function merges them into the
top level BVH, adjusting indexes and offsets where appropriate. */
* BVH's are stored in global arrays. This function merges them into the
* top level BVH, adjusting indexes and offsets where appropriate. */
bool use_qbvh = params.use_qbvh;
size_t nsize = (use_qbvh)? BVH_QNODE_SIZE: BVH_NODE_SIZE;
/* adjust primitive index to point to the triangle in the global array, for
meshes with transform applied and already in the top level BVH */
* meshes with transform applied and already in the top level BVH */
for(size_t i = 0; i < pack.prim_index.size(); i++)
if(pack.prim_index[i] != -1)
pack.prim_index[i] += objects[pack.prim_object[i]]->mesh->tri_offset;
@@ -356,14 +356,14 @@ void BVH::pack_instances(size_t nodes_size)
Mesh *mesh = ob->mesh;
/* if mesh transform is applied, that means it's already in the top
level BVH, and we don't need to merge it in */
* level BVH, and we don't need to merge it in */
if(mesh->transform_applied) {
pack.object_node[object_offset++] = 0;
continue;
}
/* if mesh already added once, don't add it again, but used set
node offset for this object */
* node offset for this object */
map<Mesh*, int>::iterator it = mesh_map.find(mesh);
if(mesh_map.find(mesh) != mesh_map.end()) {

View File

@@ -46,7 +46,7 @@ class Progress;
struct PackedBVH {
/* BVH nodes storage, one node is 4x int4, and contains two bounding boxes,
and child, triangle or object indexes dependening on the node type */
* and child, triangle or object indexes dependening on the node type */
array<int4> nodes;
/* object index to BVH node index mapping for instances */
array<int> object_node;
@@ -55,12 +55,12 @@ struct PackedBVH {
/* visibility visibilitys for primitives */
array<uint> prim_visibility;
/* mapping from BVH primitive index to true primitive index, as primitives
may be duplicated due to spatial splits. -1 for instances. */
* may be duplicated due to spatial splits. -1 for instances. */
array<int> prim_index;
/* mapping from BVH primitive index, to the object id of that primitive. */
array<int> prim_object;
/* quick array to lookup if a node is a leaf, not used for traversal, only
for instance BVH merging */
* for instance BVH merging */
array<int> is_leaf;
/* index of the root node. */

View File

@@ -200,7 +200,7 @@ void BVHObjectBinning::split(BVHReference* prims, BVHObjectBinning& left_o, BVHO
}
/* object medium split if we did not make progress, can happen when all
primitives have same centroid */
* primitives have same centroid */
lgeom_bounds = BoundBox::empty;
rgeom_bounds = BoundBox::empty;
lcent_bounds = BoundBox::empty;

View File

@@ -124,7 +124,7 @@ void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int dy, int w
uint8_t *pixels = (uint8_t*)rgba.data_pointer;
/* for multi devices, this assumes the ineffecient method that we allocate
all pixels on the device even though we only render to a subset */
* all pixels on the device even though we only render to a subset */
pixels += 4*y*w;
glDrawPixels(w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixels);

View File

@@ -226,7 +226,7 @@ public:
void task_add(DeviceTask& task)
{
/* split task into smaller ones, more than number of threads for uneven
workloads where some parts of the image render slower than others */
* workloads where some parts of the image render slower than others */
list<DeviceTask> tasks;
task.split(tasks, TaskScheduler::num_threads()*10);

View File

@@ -771,7 +771,7 @@ public:
cuda_push_context();
/* for multi devices, this assumes the ineffecient method that we allocate
all pixels on the device even though we only render to a subset */
* all pixels on the device even though we only render to a subset */
size_t offset = sizeof(uint8_t)*4*y*w;
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pmem.cuPBO);

View File

@@ -126,8 +126,10 @@ typedef struct RPCReceive {
if(len == data_size) {
archive_str = (data.size())? string(&data[0], data.size()): string("");
/*istringstream archive_stream(archive_str);
boost::archive::text_iarchive archive(archive_stream);*/
#if 0
istringstream archive_stream(archive_str);
boost::archive::text_iarchive archive(archive_stream);
#endif
archive_stream = new istringstream(archive_str);
archive = new boost::archive::text_iarchive(*archive_stream);

View File

@@ -239,7 +239,7 @@ public:
}
/* we don't check CL_DEVICE_VERSION since for e.g. nvidia sm 1.3 cards this is
1.0 even if the language features are there, just limited shared memory */
* 1.0 even if the language features are there, just limited shared memory */
return true;
}
@@ -344,8 +344,8 @@ public:
bool compile_kernel(const string& kernel_path, const string& kernel_md5)
{
/* we compile kernels consisting of many files. unfortunately opencl
kernel caches do not seem to recognize changes in included files.
so we force recompile on changes by adding the md5 hash of all files */
* kernel caches do not seem to recognize changes in included files.
* so we force recompile on changes by adding the md5 hash of all files */
string source = "#include \"kernel.cl\" // " + kernel_md5 + "\n";
source = path_source_replace_includes(source, kernel_path);

View File

@@ -271,8 +271,8 @@ __device_inline float3 path_radiance_sum(KernelGlobals *kg, PathRadiance *L)
#ifdef __PASSES__
if(L->use_light_pass) {
/* this division is a bit ugly, but means we only have to keep track of
only a single throughput further along the path, here we recover just
the indirect parth that is not influenced by any particular BSDF type */
* only a single throughput further along the path, here we recover just
* the indirect parth that is not influenced by any particular BSDF type */
L->direct_emission = safe_divide_color(L->direct_emission, L->direct_throughput);
L->direct_diffuse += L->indirect_diffuse*L->direct_emission;
L->direct_glossy += L->indirect_glossy*L->direct_emission;

View File

@@ -34,8 +34,8 @@ CCL_NAMESPACE_BEGIN
#define TRI_NODE_SIZE 3
/* silly workaround for float extended precision that happens when compiling
without sse support on x86, it results in different results for float ops
that you would otherwise expect to compare correctly */
* without sse support on x86, it results in different results for float ops
* that you would otherwise expect to compare correctly */
#if !defined(__i386__) || defined(__SSE__)
#define NO_EXTENDED_PRECISION
#else
@@ -160,7 +160,7 @@ __device_inline void bvh_triangle_intersect(KernelGlobals *kg, Intersection *ise
if(v >= 0.0f && u + v <= 1.0f) {
#ifdef __VISIBILITY_FLAG__
/* visibility flag test. we do it here under the assumption
that most triangles are culled by node flags */
* that most triangles are culled by node flags */
if(kernel_tex_fetch(__prim_visibility, triAddr) & visibility)
#endif
{

View File

@@ -28,13 +28,13 @@
CCL_NAMESPACE_BEGIN
/* Assertions inside the kernel only work for the CPU device, so we wrap it in
a macro which is empty for other devices */
* a macro which is empty for other devices */
#define kernel_assert(cond) assert(cond)
/* Texture types to be compatible with CUDA textures. These are really just
simple arrays and after inlining fetch hopefully revert to being a simple
pointer lookup. */
* simple arrays and after inlining fetch hopefully revert to being a simple
* pointer lookup. */
template<typename T> struct texture {
T fetch(int index)
@@ -43,7 +43,8 @@ template<typename T> struct texture {
return data[index];
}
/*__m128 fetch_m128(int index)
#if 0
__m128 fetch_m128(int index)
{
kernel_assert(index >= 0 && index < width);
return ((__m128*)data)[index];
@@ -53,7 +54,8 @@ template<typename T> struct texture {
{
kernel_assert(index >= 0 && index < width);
return ((__m128i*)data)[index];
}*/
}
#endif
float interp(float x, int size)
{

View File

@@ -44,7 +44,7 @@ __device float3 direct_emissive_eval(KernelGlobals *kg, float rando,
ls->Ng = sd.Ng;
/* no path flag, we're evaluating this for all closures. that's weak but
we'd have to do multiple evaluations otherwise */
* we'd have to do multiple evaluations otherwise */
shader_eval_surface(kg, &sd, rando, 0);
/* evaluate emissive closure */
@@ -145,7 +145,7 @@ __device float3 indirect_emission(KernelGlobals *kg, ShaderData *sd, float t, in
if(!(path_flag & PATH_RAY_MIS_SKIP) && (sd->flag & SD_SAMPLE_AS_LIGHT)) {
/* multiple importance sampling, get triangle light pdf,
and compute weight with respect to BSDF pdf */
* and compute weight with respect to BSDF pdf */
float pdf = triangle_light_pdf(kg, sd->Ng, sd->I, t);
float mis_weight = power_heuristic(bsdf_pdf, pdf);
@@ -172,7 +172,7 @@ __device float3 indirect_background(KernelGlobals *kg, Ray *ray, int path_flag,
if(!(path_flag & PATH_RAY_MIS_SKIP) && res) {
/* multiple importance sampling, get background light pdf for ray
direction, and compute weight with respect to BSDF pdf */
* direction, and compute weight with respect to BSDF pdf */
float pdf = background_light_pdf(kg, ray->D);
float mis_weight = power_heuristic(bsdf_pdf, pdf);

View File

@@ -29,9 +29,9 @@
CCL_NAMESPACE_BEGIN
/* On the CPU, we pass along the struct KernelGlobals to nearly everywhere in
the kernel, to access constant data. These are all stored as "textures", but
these are really just standard arrays. We can't use actually globals because
multiple renders may be running inside the same process. */
* the kernel, to access constant data. These are all stored as "textures", but
* these are really just standard arrays. We can't use actually globals because
* multiple renders may be running inside the same process. */
#ifdef __KERNEL_CPU__
@@ -45,7 +45,7 @@ typedef struct KernelGlobals {
#ifdef __OSL__
/* On the CPU, we also have the OSL globals here. Most data structures are shared
with SVM, the difference is in the shaders and object/mesh attributes. */
* with SVM, the difference is in the shaders and object/mesh attributes. */
OSLGlobals osl;
#endif
@@ -54,9 +54,9 @@ typedef struct KernelGlobals {
#endif
/* For CUDA, constant memory textures must be globals, so we can't put them
into a struct. As a result we don't actually use this struct and use actual
globals and simply pass along a NULL pointer everywhere, which we hope gets
optimized out. */
* into a struct. As a result we don't actually use this struct and use actual
* globals and simply pass along a NULL pointer everywhere, which we hope gets
* optimized out. */
#ifdef __KERNEL_CUDA__

View File

@@ -64,8 +64,8 @@ __device float3 area_light_sample(float3 axisu, float3 axisv, float randu, float
__device float3 background_light_sample(KernelGlobals *kg, float randu, float randv, float *pdf)
{
/* for the following, the CDF values are actually a pair of floats, with the
function value as X and the actual CDF as Y. The last entry's function
value is the CDF total. */
* function value as X and the actual CDF as Y. The last entry's function
* value is the CDF total. */
int res = kernel_data.integrator.pdf_background_res;
int cdf_count = res + 1;
@@ -326,9 +326,9 @@ __device float triangle_light_pdf(KernelGlobals *kg,
__device int light_distribution_sample(KernelGlobals *kg, float randt)
{
/* this is basically std::upper_bound as used by pbrt, to find a point light or
triangle to emit from, proportional to area. a good improvement would be to
also sample proportional to power, though it's not so well defined with
OSL shaders. */
* triangle to emit from, proportional to area. a good improvement would be to
* also sample proportional to power, though it's not so well defined with
* OSL shaders. */
int first = 0;
int len = kernel_data.integrator.num_distribution + 1;

View File

@@ -17,8 +17,8 @@
*/
/* Optimized CPU kernel entry points. This file is compiled with SSE3
optimization flags and nearly all functions inlined, while kernel.cpp
is compiled without for other CPU's. */
* optimization flags and nearly all functions inlined, while kernel.cpp
* is compiled without for other CPU's. */
#ifdef WITH_OPTIMIZED_KERNEL

View File

@@ -59,7 +59,7 @@ __device_inline void path_state_init(PathState *state)
__device_inline void path_state_next(KernelGlobals *kg, PathState *state, int label)
{
/* ray through transparent keeps same flags from previous ray and is
not counted as a regular bounce, transparent has separate max */
* not counted as a regular bounce, transparent has separate max */
if(label & LABEL_TRANSPARENT) {
state->flag |= PATH_RAY_TRANSPARENT;
state->transparent_bounce++;
@@ -159,13 +159,13 @@ __device_inline bool shadow_blocked(KernelGlobals *kg, PathState *state, Ray *ra
#ifdef __TRANSPARENT_SHADOWS__
if(result && kernel_data.integrator.transparent_shadows) {
/* transparent shadows work in such a way to try to minimize overhead
in cases where we don't need them. after a regular shadow ray is
cast we check if the hit primitive was potentially transparent, and
only in that case start marching. this gives on extra ray cast for
the cases were we do want transparency.
also note that for this to work correct, multi close sampling must
be used, since we don't pass a random number to shader_eval_surface */
* in cases where we don't need them. after a regular shadow ray is
* cast we check if the hit primitive was potentially transparent, and
* only in that case start marching. this gives on extra ray cast for
* the cases were we do want transparency.
*
* also note that for this to work correct, multi close sampling must
* be used, since we don't pass a random number to shader_eval_surface */
if(shader_transparent_shadow(kg, &isect)) {
float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
float3 Pend = ray->P + ray->D*ray->t;
@@ -266,7 +266,7 @@ __device float4 kernel_path_integrate(KernelGlobals *kg, RNG *rng, int sample, R
kernel_write_data_passes(kg, buffer, &L, &sd, sample, state.flag, throughput);
/* blurring of bsdf after bounces, for rays that have a small likelihood
of following this particular path (diffuse, rough glossy) */
* of following this particular path (diffuse, rough glossy) */
if(kernel_data.integrator.filter_glossy != FLT_MAX) {
float blur_pdf = kernel_data.integrator.filter_glossy*min_ray_pdf;
@@ -305,8 +305,8 @@ __device float4 kernel_path_integrate(KernelGlobals *kg, RNG *rng, int sample, R
#endif
/* path termination. this is a strange place to put the termination, it's
mainly due to the mixed in MIS that we use. gives too many unneeded
shader evaluations, only need emission if we are going to terminate */
* mainly due to the mixed in MIS that we use. gives too many unneeded
* shader evaluations, only need emission if we are going to terminate */
float probability = path_state_terminate_probability(kg, &state, throughput);
float terminate = path_rng(kg, rng, sample, rng_offset + PRNG_TERMINATE);

View File

@@ -23,8 +23,8 @@ typedef uint RNG;
#ifdef __SOBOL__
/* skip initial numbers that are not as well distributed, especially the
first sequence is just 0 everywhere, which can be problematic for e.g.
path termination */
* first sequence is just 0 everywhere, which can be problematic for e.g.
* path termination */
#define SOBOL_SKIP 64
/* High Dimensional Sobol */
@@ -66,7 +66,7 @@ __device uint sobol_inverse(uint i)
}
/* multidimensional sobol with generator matrices
dimension 0 and 1 are equal to van_der_corput() and sobol() respectively */
* dimension 0 and 1 are equal to van_der_corput() and sobol() respectively */
__device uint sobol_dimension(KernelGlobals *kg, int index, int dimension)
{
uint result = 0;

View File

@@ -208,7 +208,7 @@ __device float4 triangle_motion_vector(KernelGlobals *kg, ShaderData *sd)
motion_post = triangle_attribute_float3(kg, sd, ATTR_ELEMENT_VERTEX, offset_post, NULL, NULL);
/* object motion. note that depending on the mesh having motion vectors, this
transformation was set match the world/object space of motion_pre/post */
* transformation was set match the world/object space of motion_pre/post */
Transform tfm;
tfm = object_fetch_transform(kg, sd->object, TIME_INVALID, OBJECT_TRANSFORM_MOTION_PRE);
@@ -220,7 +220,7 @@ __device float4 triangle_motion_vector(KernelGlobals *kg, ShaderData *sd)
float3 P;
/* camera motion, for perspective/orthographic motion.pre/post will be a
world-to-raster matrix, for panorama it's world-to-camera */
* world-to-raster matrix, for panorama it's world-to-camera */
if (kernel_data.cam.type != CAMERA_PANORAMA) {
tfm = kernel_data.cam.worldtoraster;
P = transform_perspective(&tfm, sd->P);

View File

@@ -17,7 +17,7 @@
float fresnel_dielectric(vector Incoming, normal Normal, float eta)
{
/* compute fresnel reflectance without explicitly computing
the refracted direction */
* the refracted direction */
float c = fabs(dot(Incoming, Normal));
float g = eta * eta - 1 + c * c;
float result;

View File

@@ -65,7 +65,7 @@ void OSLRenderServices::thread_init(KernelGlobals *kernel_globals_)
bool OSLRenderServices::get_matrix(OSL::Matrix44 &result, OSL::TransformationPtr xform, float time)
{
/* this is only used for shader and object space, we don't really have
a concept of shader space, so we just use object space for both. */
* a concept of shader space, so we just use object space for both. */
if (xform) {
KernelGlobals *kg = kernel_globals;
const ShaderData *sd = (const ShaderData *)xform;
@@ -86,7 +86,7 @@ bool OSLRenderServices::get_matrix(OSL::Matrix44 &result, OSL::TransformationPtr
bool OSLRenderServices::get_inverse_matrix(OSL::Matrix44 &result, OSL::TransformationPtr xform, float time)
{
/* this is only used for shader and object space, we don't really have
a concept of shader space, so we just use object space for both. */
* a concept of shader space, so we just use object space for both. */
if (xform) {
KernelGlobals *kg = kernel_globals;
const ShaderData *sd = (const ShaderData *)xform;
@@ -305,11 +305,11 @@ void *OSLRenderServices::get_pointcloud_attr_query(ustring *attr_names,
AttrQuery &query = m_attr_queries.back();
/* make space for what we need. the only reason to use
std::vector is to skip the delete */
* std::vector is to skip the delete */
query.attr_names.resize(nattrs);
query.attr_partio_types.resize(nattrs);
/* capacity will keep the length of the smallest array passed
to the query. Just to prevent buffer overruns */
* to the query. Just to prevent buffer overruns */
query.capacity = -1;
for (int i = 0; i < nattrs; ++i) {
@@ -323,7 +323,7 @@ void *OSLRenderServices::get_pointcloud_attr_query(ustring *attr_names,
query.capacity = min(query.capacity, (int)attr_types[i].numelements());
/* convert the OSL (OIIO) type to the equivalent Partio type so
we can do a fast check at query time. */
* we can do a fast check at query time. */
if (element_type == TypeDesc::TypeFloat) {
query.attr_partio_types[i] = Partio::FLOAT;
}
@@ -359,7 +359,7 @@ int OSLRenderServices::pointcloud(ustring filename, const OSL::Vec3 &center, flo
int max_points, void *_attr_query, void **attr_outdata)
{
/* todo: this code has never been tested, and most likely does not
work. it's based on the example code in OSL */
* work. it's based on the example code in OSL */
#ifdef WITH_PARTIO
/* query Partio for this pointcloud lookup using cached attr_query */
@@ -374,7 +374,7 @@ int OSLRenderServices::pointcloud(ustring filename, const OSL::Vec3 &center, flo
Partio::ParticlesData *cloud = get_pointcloud(filename);
/* now we have to look up all the attributes in the file. we can't do this
before hand cause we never know what we are going to load. */
* before hand cause we never know what we are going to load. */
int nattrs = attr_query->attr_names.size();
Partio::ParticleAttribute *attr = (Partio::ParticleAttribute *)alloca(sizeof(Partio::ParticleAttribute) * nattrs);
@@ -414,8 +414,8 @@ int OSLRenderServices::pointcloud(ustring filename, const OSL::Vec3 &center, flo
}
else {
/* note we make a single call per attribute, we don't loop over the
points. Partio does it, so it is there that we have to care about
performance */
* points. Partio does it, so it is there that we have to care about
* performance */
cloud->data(attr[j], count, &indices[0], true, attr_outdata[j]);
}
}

View File

@@ -76,14 +76,14 @@ private:
#ifdef WITH_PARTIO
/* OSL gets pointers to this but its definition is private.
right now it only caches the types already converted to
Partio constants. this is what get_pointcloud_attr_query
returns */
* right now it only caches the types already converted to
* Partio constants. this is what get_pointcloud_attr_query
* returns */
struct AttrQuery {
/* names of the attributes to query */
std::vector<ustring> attr_names;
/* types as (enum Partio::ParticleAttributeType) of the
attributes in the query */
* attributes in the query */
std::vector<int> attr_partio_types;
/* for sanity checks, capacity of the output arrays */
int capacity;

View File

@@ -374,7 +374,7 @@ __device void svm_node_mix_closure(ShaderData *sd, float *stack,
{
#ifdef __MULTI_CLOSURE__
/* fetch weight from blend input, previous mix closures,
and write to stack to be used by closure nodes later */
* and write to stack to be used by closure nodes later */
uint weight_offset, in_weight_offset, weight1_offset, weight2_offset;
decode_node_uchar4(node.y, &weight_offset, &in_weight_offset, &weight1_offset, &weight2_offset);
@@ -387,8 +387,8 @@ __device void svm_node_mix_closure(ShaderData *sd, float *stack,
stack_store_float(stack, weight2_offset, in_weight*weight);
#else
/* pick a closure and make the random number uniform over 0..1 again.
closure 1 starts on the next node, for closure 2 the start is at an
offset from the current node, so we jump */
* closure 1 starts on the next node, for closure 2 the start is at an
* offset from the current node, so we jump */
uint weight_offset = node.y;
uint node_jump = node.z;
float weight = stack_load_float(stack, weight_offset);
@@ -410,8 +410,8 @@ __device void svm_node_add_closure(ShaderData *sd, float *stack, uint unused,
/* nothing to do, handled in compiler */
#else
/* pick one of the two closures with probability 0.5. sampling quality
is not going to be great, for that we'd need to evaluate the weights
of the two closures being added */
* is not going to be great, for that we'd need to evaluate the weights
* of the two closures being added */
float weight = 0.5f;
if(*randb < weight) {

View File

@@ -92,13 +92,13 @@ __device float4 svm_image_texture(KernelGlobals *kg, int id, float x, float y)
float4 r;
/* not particularly proud of this massive switch, what are the
alternatives?
- use a single big 1D texture, and do our own lookup/filtering
- group by size and use a 3d texture, performance impact
- group into larger texture with some padding for correct lerp
also note that cuda has 128 textures limit, we use 100 now, since
we still need some for other storage */
* alternatives?
* - use a single big 1D texture, and do our own lookup/filtering
* - group by size and use a 3d texture, performance impact
* - group into larger texture with some padding for correct lerp
*
* also note that cuda has 128 textures limit, we use 100 now, since
* we still need some for other storage */
switch(id) {
case 0: r = kernel_tex_image_interp(__tex_image_000, x, y); break;

View File

@@ -152,7 +152,7 @@ __device float voronoi_CrS(float3 p) { return 2.0f*voronoi_Cr(p) - 1.0f; }
__device float noise_basis(float3 p, NodeNoiseBasis basis)
{
/* Only Perlin enabled for now, others break CUDA compile by making kernel
too big, with compile using > 4GB, due to everything being inlined. */
* too big, with compile using > 4GB, due to everything being inlined. */
#if 0
if(basis == NODE_NOISE_PERLIN)

View File

@@ -19,7 +19,7 @@
CCL_NAMESPACE_BEGIN
/* note: the interfaces here are just as an example, need to figure
out the right functions and parameters to use */
* out the right functions and parameters to use */
/* ISOTROPIC VOLUME CLOSURE */

View File

@@ -35,7 +35,7 @@ class Device;
struct float4;
/* Buffer Parameters
Size of render buffer and how it fits in the full image (border render). */
* Size of render buffer and how it fits in the full image (border render). */
class BufferParams {
public:
@@ -98,8 +98,8 @@ public:
/* buffer parameters */
BufferParams params;
/* dimensions for how much of the buffer is actually ready for display.
with progressive render we can be using only a subset of the buffer.
if these are zero, it means nothing can be drawn yet */
* with progressive render we can be using only a subset of the buffer.
* if these are zero, it means nothing can be drawn yet */
int draw_width, draw_height;
/* draw alpha channel? */
bool transparent;

View File

@@ -142,7 +142,7 @@ void Pass::add(PassType type, vector<Pass>& passes)
passes.push_back(pass);
/* order from by components, to ensure alignment so passes with size 4
come first and then passes with size 1 */
* come first and then passes with size 1 */
sort(passes.begin(), passes.end(), compare_pass_order);
if(pass.divide_type != PASS_NONE)

View File

@@ -375,8 +375,8 @@ void ShaderGraph::break_cycles(ShaderNode *node, vector<bool>& visited, vector<b
void ShaderGraph::clean()
{
/* we do two things here: find cycles and break them, and remove unused
nodes that don't feed into the output. how cycles are broken is
undefined, they are invalid input, the important thing is to not crash */
* nodes that don't feed into the output. how cycles are broken is
* undefined, they are invalid input, the important thing is to not crash */
vector<bool> removed(nodes.size(), false);
vector<bool> visited(nodes.size(), false);
@@ -495,7 +495,7 @@ void ShaderGraph::bump_from_displacement()
copy_nodes(nodes_displace, nodes_dy);
/* mark nodes to indicate they are use for bump computation, so
that any texture coordinates are shifted by dx/dy when sampling */
* that any texture coordinates are shifted by dx/dy when sampling */
foreach(NodePair& pair, nodes_center)
pair.second->bump = SHADER_BUMP_CENTER;
foreach(NodePair& pair, nodes_dx)
@@ -516,15 +516,15 @@ void ShaderGraph::bump_from_displacement()
connect(out_dy, bump->input("SampleY"));
/* connect bump output to normal input nodes that aren't set yet. actually
this will only set the normal input to the geometry node that we created
and connected to all other normal inputs already. */
* this will only set the normal input to the geometry node that we created
* and connected to all other normal inputs already. */
foreach(ShaderNode *node, nodes)
foreach(ShaderInput *input, node->inputs)
if(!input->link && input->default_value == ShaderInput::NORMAL)
connect(bump->output("Normal"), input);
/* finally, add the copied nodes to the graph. we can't do this earlier
because we would create dependency cycles in the above loop */
* because we would create dependency cycles in the above loop */
foreach(NodePair& pair, nodes_center)
add(pair.second);
foreach(NodePair& pair, nodes_dx)

View File

@@ -66,7 +66,7 @@ static bool is_float_image(const string& filename)
if(in->open(filename, spec)) {
/* check the main format, and channel formats;
if any take up more than one byte, we'll need a float texture slot */
* if any take up more than one byte, we'll need a float texture slot */
if(spec.format.basesize() > 1)
is_float = true;
@@ -171,8 +171,8 @@ void ImageManager::remove_image(const string& filename)
assert(images[slot]->users >= 0);
/* don't remove immediately, rather do it all together later on. one of
the reasons for this is that on shader changes we add and remove nodes
that use them, but we do not want to reload the image all the time. */
* the reasons for this is that on shader changes we add and remove nodes
* that use them, but we do not want to reload the image all the time. */
if(images[slot]->users == 0)
need_update = true;
@@ -189,8 +189,8 @@ void ImageManager::remove_image(const string& filename)
assert(float_images[slot]->users >= 0);
/* don't remove immediately, rather do it all together later on. one of
the reasons for this is that on shader changes we add and remove nodes
that use them, but we do not want to reload the image all the time. */
* the reasons for this is that on shader changes we add and remove nodes
* that use them, but we do not want to reload the image all the time. */
if(float_images[slot]->users == 0)
need_update = true;
@@ -483,7 +483,7 @@ void ImageManager::device_update(Device *device, DeviceScene *dscene, Progress&
void ImageManager::device_pack_images(Device *device, DeviceScene *dscene, Progress& progess)
{
/* for OpenCL, we pack all image textures inside a single big texture, and
will do our own interpolation in the kernel */
* will do our own interpolation in the kernel */
size_t size = 0;
for(size_t slot = 0; slot < images.size(); slot++) {

View File

@@ -362,7 +362,7 @@ void LightManager::device_update_background(Device *device, DeviceScene *dscene,
float cdf_total = cond_cdf[i * cdf_count + res - 1].y + cond_cdf[i * cdf_count + res - 1].x / res;
/* stuff the total into the brightness value for the last entry, because
we are going to normalize the CDFs to 0.0 to 1.0 afterwards */
* we are going to normalize the CDFs to 0.0 to 1.0 afterwards */
cond_cdf[i * cdf_count + res].x = cdf_total;
if(cdf_total > 0.0f)

View File

@@ -474,7 +474,7 @@ void MeshManager::device_update_attributes(Device *device, DeviceScene *dscene,
AttributeRequestSet& attributes = mesh_attributes[i];
/* todo: we now store std and name attributes from requests even if
they actually refer to the same mesh attributes, optimize */
* they actually refer to the same mesh attributes, optimize */
foreach(AttributeRequest& req, attributes.requests) {
Attribute *mattr = mesh->attributes.find(req);
@@ -493,7 +493,7 @@ void MeshManager::device_update_attributes(Device *device, DeviceScene *dscene,
}
/* we abuse AttributeRequest to pass on info like element and
offset, it doesn't really make sense but is convenient */
* offset, it doesn't really make sense but is convenient */
/* store element and type */
if(mattr->element == Attribute::VERTEX)
@@ -528,7 +528,7 @@ void MeshManager::device_update_attributes(Device *device, DeviceScene *dscene,
}
/* mesh vertex/triangle index is global, not per object, so we sneak
a correction for that in here */
* a correction for that in here */
if(req.element == ATTR_ELEMENT_VERTEX)
req.offset -= mesh->vert_offset;
else if(mattr->element == Attribute::FACE)

View File

@@ -89,7 +89,7 @@ void Object::apply_transform()
Transform ntfm = transform_transpose(transform_inverse(tfm));
/* we keep normals pointing in same direction on negative scale, notify
mesh about this in it (re)calculates normals */
* mesh about this in it (re)calculates normals */
if(transform_negative_scale(tfm))
mesh->transform_negative_scaled = true;
@@ -161,7 +161,7 @@ void ObjectManager::device_update_transforms(Device *device, DeviceScene *dscene
Transform itfm = transform_inverse(tfm);
/* compute surface area. for uniform scale we can do avoid the many
transform calls and share computation for instances */
* transform calls and share computation for instances */
/* todo: correct for displacement, and move to a better place */
float uniform_scale;
float surface_area = 0.0f;
@@ -206,8 +206,8 @@ void ObjectManager::device_update_transforms(Device *device, DeviceScene *dscene
if(need_motion == Scene::MOTION_PASS) {
/* motion transformations, is world/object space depending if mesh
comes with deformed position in object space, or if we transform
the shading point in world space */
* comes with deformed position in object space, or if we transform
* the shading point in world space */
Transform mtfm_pre = ob->motion.pre;
Transform mtfm_post = ob->motion.post;

View File

@@ -167,7 +167,7 @@ string OSLCompiler::compatible_name(const char *name)
bool OSLCompiler::node_skip_input(ShaderNode *node, ShaderInput *input)
{
/* exception for output node, only one input is actually used
depending on the current shader type */
* depending on the current shader type */
if(node->name == ustring("output")) {
if(strcmp(input->name, "Surface") == 0 && current_type != SHADER_TYPE_SURFACE)

View File

@@ -116,8 +116,8 @@ bool Session::ready_to_reset()
void Session::reset_gpu(BufferParams& buffer_params, int samples)
{
/* block for buffer acces and reset immediately. we can't do this
in the thread, because we need to allocate an OpenGL buffer, and
that only works in the main thread */
* in the thread, because we need to allocate an OpenGL buffer, and
* that only works in the main thread */
thread_scoped_lock display_lock(display->mutex);
thread_scoped_lock buffers_lock(buffers->mutex);
@@ -140,10 +140,10 @@ bool Session::draw_gpu(BufferParams& buffer_params)
/* first check we already rendered something */
if(gpu_draw_ready) {
/* then verify the buffers have the expected size, so we don't
draw previous results in a resized window */
* draw previous results in a resized window */
if(!buffer_params.modified(display->params)) {
/* for CUDA we need to do tonemapping still, since we can
only access GL buffers from the main thread */
* only access GL buffers from the main thread */
if(gpu_need_tonemap) {
thread_scoped_lock buffers_lock(buffers->mutex);
tonemap();
@@ -185,7 +185,7 @@ void Session::run_gpu()
}
else {
/* if in interactive mode, and we are either paused or done for now,
wait for pause condition notify to wake up again */
* wait for pause condition notify to wake up again */
thread_scoped_lock pause_lock(pause_mutex);
if(pause || no_tiles) {
@@ -224,8 +224,8 @@ void Session::run_gpu()
if(!no_tiles) {
/* buffers mutex is locked entirely while rendering each
sample, and released/reacquired on each iteration to allow
reset and draw in between */
* sample, and released/reacquired on each iteration to allow
* reset and draw in between */
thread_scoped_lock buffers_lock(buffers->mutex);
/* update status and timing */
@@ -294,7 +294,7 @@ bool Session::draw_cpu(BufferParams& buffer_params)
/* first check we already rendered something */
if(display->draw_ready()) {
/* then verify the buffers have the expected size, so we don't
draw previous results in a resized window */
* draw previous results in a resized window */
if(!buffer_params.modified(display->params)) {
display->draw(device);
@@ -334,7 +334,7 @@ void Session::run_cpu()
}
else {
/* if in interactive mode, and we are either paused or done for now,
wait for pause condition notify to wake up again */
* wait for pause condition notify to wake up again */
thread_scoped_lock pause_lock(pause_mutex);
if(pause || no_tiles) {
@@ -362,8 +362,8 @@ void Session::run_cpu()
if(!no_tiles) {
/* buffers mutex is locked entirely while rendering each
sample, and released/reacquired on each iteration to allow
reset and draw in between */
* sample, and released/reacquired on each iteration to allow
* reset and draw in between */
thread_scoped_lock buffers_lock(buffers->mutex);
/* update scene */
@@ -406,7 +406,7 @@ void Session::run_cpu()
}
else if(need_tonemap) {
/* tonemap only if we do not reset, we don't we don't
want to show the result of an incomplete sample*/
* wan't to show the result of an incomplete sample*/
tonemap();
}
@@ -535,8 +535,8 @@ void Session::update_scene()
progress.set_status("Updating Scene");
/* update camera if dimensions changed for progressive render. the camera
knows nothing about progressive or cropped rendering, it just gets the
image dimensions passed in */
* knows nothing about progressive or cropped rendering, it just gets the
* image dimensions passed in */
Camera *cam = scene->camera;
int width = tile_manager.state.buffer.full_width;
int height = tile_manager.state.buffer.full_height;

View File

@@ -81,9 +81,9 @@ void Shader::tag_update(Scene *scene)
scene->light_manager->need_update = true;
/* get requested attributes. this could be optimized by pruning unused
nodes here already, but that's the job of the shader manager currently,
and may not be so great for interactive rendering where you temporarily
disconnect a node */
* nodes here already, but that's the job of the shader manager currently,
* and may not be so great for interactive rendering where you temporarily
* disconnect a node */
AttributeRequestSet prev_attributes = attributes;
attributes.clear();
@@ -91,7 +91,7 @@ void Shader::tag_update(Scene *scene)
node->attributes(&attributes);
/* compare if the attributes changed, mesh manager will check
need_update_attributes, update the relevant meshes and clear it. */
* need_update_attributes, update the relevant meshes and clear it. */
if(attributes.modified(prev_attributes)) {
need_update_attributes = true;
scene->mesh_manager->need_update = true;

View File

@@ -53,8 +53,8 @@ public:
ShaderGraph *graph;
/* shader graph with auto bump mapping included, we compile two shaders,
with and without bump, because the displacement method is a mesh
level setting, so we need to handle both */
* with and without bump, because the displacement method is a mesh
* level setting, so we need to handle both */
ShaderGraph *graph_bump;
/* sampling */
@@ -109,7 +109,7 @@ public:
int get_shader_id(uint shader, Mesh *mesh = NULL, bool smooth = false);
/* add default shaders to scene, to use as default for things that don't
have any shader assigned explicitly */
* have any shader assigned explicitly */
static void add_default(Scene *scene);
protected:

View File

@@ -251,11 +251,11 @@ void SVMCompiler::stack_link(ShaderInput *input, ShaderOutput *output)
void SVMCompiler::stack_clear_users(ShaderNode *node, set<ShaderNode*>& done)
{
/* optimization we should add:
find and lower user counts for outputs for which all inputs are done.
this is done before the node is compiled, under the assumption that the
node will first load all inputs from the stack and then writes its
outputs. this used to work, but was disabled because it gave trouble
with inputs getting stack positions assigned */
* find and lower user counts for outputs for which all inputs are done.
* this is done before the node is compiled, under the assumption that the
* node will first load all inputs from the stack and then writes its
* outputs. this used to work, but was disabled because it gave trouble
* with inputs getting stack positions assigned */
foreach(ShaderInput *input, node->inputs) {
ShaderOutput *output = input->link;
@@ -418,8 +418,8 @@ void SVMCompiler::generate_closure(ShaderNode *node, set<ShaderNode*>& done)
add_node(NODE_ADD_CLOSURE, 0, 0, 0);
/* generate code for closure 1
note we backup all compiler state and restore it afterwards, so one
closure choice doesn't influence the other*/
* note we backup all compiler state and restore it afterwards, so one
* closure choice doesn't influence the other*/
if(cl1in->link) {
StackBackup backup;
stack_backup(backup, done);
@@ -448,7 +448,7 @@ void SVMCompiler::generate_closure(ShaderNode *node, set<ShaderNode*>& done)
add_node(NODE_END, 0, 0, 0);
/* set jump for mix node, -1 because offset is already
incremented when this jump is added to it */
* incremented when this jump is added to it */
svm_nodes[mix_offset].z = cl2_offset - mix_offset - 1;
done.insert(node);
@@ -482,9 +482,9 @@ void SVMCompiler::generate_closure(ShaderNode *node, set<ShaderNode*>& done)
void SVMCompiler::generate_multi_closure(ShaderNode *node, set<ShaderNode*>& done, uint in_offset)
{
/* todo: the weaks point here is that unlike the single closure sampling
we will evaluate all nodes even if they are used as input for closures
that are unused. it's not clear what would be the best way to skip such
nodes at runtime, especially if they are tangled up */
* we will evaluate all nodes even if they are used as input for closures
* that are unused. it's not clear what would be the best way to skip such
* nodes at runtime, especially if they are tangled up */
if(node->name == ustring("mix_closure") || node->name == ustring("add_closure")) {
ShaderInput *fin = node->input("Fac");

View File

@@ -82,7 +82,7 @@ static float pseudoValence(SubdVert *vert)
if(vert->is_boundary()) {
/* we treat boundary verts as being half a closed mesh. corners are
special case. n = 4 for corners and n = 2*(n-1) for boundaries. */
* special case. n = 4 for corners and n = 2*(n-1) for boundaries. */
if(valence == 2) return 4;
return (valence - 1)*2;
}
@@ -481,10 +481,11 @@ void SubdAccBuilder::computeInteriorStencil(SubdFaceRing *ring, GregoryAccStenci
}
/* this change makes the triangle boundaries smoother, but distorts the quads next to them */
/*if(ring->is_triangle() || SubdFaceRing::is_triangle(edge->pair->face))
{
#if 0
if(ring->is_triangle() || SubdFaceRing::is_triangle(edge->pair->face)) {
y *= 4.0f / 3.0f;
}*/
}
#endif
stencil->get(interior1Indices[primitiveOffset+v]) = stencil->get(edge1Indices[primitiveOffset+v]);
stencil->get(interior1Indices[primitiveOffset+v]) += x;
@@ -539,8 +540,10 @@ void SubdAccBuilder::computeInteriorStencil(SubdFaceRing *ring, GregoryAccStenci
}
/* this change makes the triangle boundaries smoother, but distorts the quads next to them. */
/*if(ring->is_triangle() || SubdFaceRing::is_triangle(edge->pair->face))
y *= 4.0f / 3.0f;*/
#if 0
if(ring->is_triangle() || SubdFaceRing::is_triangle(edge->pair->face))
y *= 4.0f / 3.0f;
#endif
stencil->get(interior2Indices[primitiveOffset+v]) = stencil->get(edge2Indices[primitiveOffset+v]);
stencil->get(interior2Indices[primitiveOffset+v]) += x;

View File

@@ -81,9 +81,9 @@ void EdgeDice::stitch_triangles(vector<int>& outer, vector<int>& inner)
return; // XXX avoid crashes for Mu or Mv == 1, missing polygons
/* stitch together two arrays of verts with triangles. at each step,
we compare using the next verts on both sides, to find the split
direction with the smallest diagonal, and use that in order to keep
the triangle shape reasonable. */
* we compare using the next verts on both sides, to find the split
* direction with the smallest diagonal, and use that in order to keep
* the triangle shape reasonable. */
for(size_t i = 0, j = 0; i+1 < inner.size() || j+1 < outer.size();) {
int v0, v1, v2;
@@ -354,8 +354,8 @@ void TriangleDice::add_grid(SubPatch& sub, EdgeFactors& ef, int M)
// XXX normals are flipped, why?
/* grid is constructed starting from the outside edges, and adding
progressively smaller inner triangles that connected to the outer
one, until M = 1 or 2, the we fill up the last part. */
* progressively smaller inner triangles that connected to the outer
* one, until M = 1 or 2, the we fill up the last part. */
vector<int> outer_u, outer_v, outer_w;
int m;

View File

@@ -82,8 +82,8 @@ int SubdFaceRing::vert_index(SubdVert *vertex)
void SubdFaceRing::evaluate_stencils(float3 *P, StencilMask *mask, int num)
{
/* first we sort verts by id. this way verts will always be added
in the same order to ensure the exact same float ops happen for control
points of other patches, so we get water-tight patches */
* in the same order to ensure the exact same float ops happen for control
* points of other patches, so we get water-tight patches */
int num_verts = m_verts.size();
vector<int> vmap(num_verts);
@@ -161,8 +161,8 @@ bool SubdFaceRing::is_quad(SubdFace *face)
bool SubdFaceRing::is_boundary(SubdFace *face)
{
/* note that face->is_boundary() returns a different result. That function
returns true when any of the *edges* are on the boundary. however, this
function returns true if any of the face *verts* are on the boundary. */
* returns true when any of the *edges* are on the boundary. however, this
* function returns true if any of the face *verts* are on the boundary. */
for(SubdFace::EdgeIterator it(face->edges()); !it.isDone(); it.advance()) {
SubdEdge *edge = it.current();

View File

@@ -326,8 +326,8 @@ bool cuLibraryInit()
CUDA_LIBRARY_FIND(cuCtxGetLimit);
/* functions which changed 3.1 -> 3.2 for 64 bit stuff, the cuda library
has both the old ones for compatibility and new ones with _v2 postfix,
we load the _v2 ones here. */
* has both the old ones for compatibility and new ones with _v2 postfix,
* we load the _v2 ones here. */
CUDA_LIBRARY_FIND_V2(cuDeviceTotalMem);
CUDA_LIBRARY_FIND_V2(cuCtxCreate);
CUDA_LIBRARY_FIND_V2(cuModuleGetGlobal);

View File

@@ -148,7 +148,7 @@ void MD5Hash::process(const uint8_t *data /*[64]*/)
/* Round 1. */
/* Let [abcd k s i] denote the operation
a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */
* a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */
#define F(x, y, z) (((x) & (y)) | (~(x) & (z)))
#define SET(a, b, c, d, k, s, Ti)\
t = a + F(b,c,d) + X[k] + Ti;\
@@ -174,7 +174,7 @@ void MD5Hash::process(const uint8_t *data /*[64]*/)
/* Round 2. */
/* Let [abcd k s i] denote the operation
a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */
* a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */
#define G(x, y, z) (((x) & (z)) | ((y) & ~(z)))
#define SET(a, b, c, d, k, s, Ti)\
t = a + G(b,c,d) + X[k] + Ti;\
@@ -200,7 +200,7 @@ void MD5Hash::process(const uint8_t *data /*[64]*/)
/* Round 3. */
/* Let [abcd k s t] denote the operation
a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */
* a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */
#define H(x, y, z) ((x) ^ (y) ^ (z))
#define SET(a, b, c, d, k, s, Ti)\
t = a + H(b,c,d) + X[k] + Ti;\
@@ -226,7 +226,7 @@ void MD5Hash::process(const uint8_t *data /*[64]*/)
/* Round 4. */
/* Let [abcd k s t] denote the operation
a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */
* a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */
#define I(x, y, z) ((y) ^ ((x) | ~(z)))
#define SET(a, b, c, d, k, s, Ti)\
t = a + I(b,c,d) + X[k] + Ti;\
@@ -251,8 +251,8 @@ void MD5Hash::process(const uint8_t *data /*[64]*/)
#undef SET
/* Then perform the following additions. (That is increment each
of the four registers by the value it had before this block
was started.) */
* of the four registers by the value it had before this block
* was started.) */
abcd[0] += a;
abcd[1] += b;
abcd[2] += c;

View File

@@ -179,8 +179,8 @@ static bool path_read_text(const string& path, string& text)
string path_source_replace_includes(const string& source_, const string& path)
{
/* our own little c preprocessor that replaces #includes with the file
contents, to work around issue of opencl drivers not supporting
include paths with spaces in them */
* contents, to work around issue of opencl drivers not supporting
* include paths with spaces in them */
string source = source_;
const string include = "#include \"";
size_t n, pos = 0;

View File

@@ -149,8 +149,10 @@ bool system_cpu_support_optimized()
__cpuid(result, 0);
num = result[0];
/*__cpuid(result, 0x80000000);
num_ex = result[0];*/
#if 0
__cpuid(result, 0x80000000);
num_ex = result[0];
#endif
if(num >= 1) {
__cpuid(result, 0x00000001);
@@ -167,13 +169,15 @@ bool system_cpu_support_optimized()
caps.fma3 = (result[2] & ((int)1 << 12)) != 0;
}
/*if(num_ex >= 0x80000001) {
#if 0
if(num_ex >= 0x80000001) {
__cpuid(result, 0x80000001);
caps.x64 = (result[3] & ((int)1 << 29)) != 0;
caps.sse4a = (result[2] & ((int)1 << 6)) != 0;
caps.fma4 = (result[2] & ((int)1 << 16)) != 0;
caps.xop = (result[2] & ((int)1 << 11)) != 0;
}*/
}
#endif
caps_init = true;
}

View File

@@ -164,7 +164,7 @@ void TaskScheduler::init(int num_threads)
thread_scoped_lock lock(mutex);
/* multiple cycles instances can use this task scheduler, sharing the same
threads, so we keep track of the number of users. */
* threads, so we keep track of the number of users. */
if(users == 0) {
do_exit = false;

View File

@@ -34,7 +34,7 @@ typedef boost::mutex::scoped_lock thread_scoped_lock;
typedef boost::condition_variable thread_condition_variable;
/* own pthread based implementation, to avoid boost version conflicts with
dynamically loaded blender plugins */
* dynamically loaded blender plugins */
class thread {
public:

View File

@@ -139,7 +139,7 @@ Transform transform_inverse(const Transform& tfm)
if(!transform_matrix4_gj_inverse(R, M)) {
/* matrix is degenerate (e.g. 0 scale on some axis), ideally we should
never be in this situation, but try to invert it anyway with tweak */
* never be in this situation, but try to invert it anyway with tweak */
M[0][0] += 1e-8f;
M[1][1] += 1e-8f;
M[2][2] += 1e-8f;

View File

@@ -255,7 +255,7 @@ Transform transform_inverse(const Transform& a);
__device_inline bool transform_uniform_scale(const Transform& tfm, float& scale)
{
/* the epsilon here is quite arbitrary, but this function is only used for
surface area and bump, where we except it to not be so sensitive */
* surface area and bump, where we except it to not be so sensitive */
Transform ttfm = transform_transpose(tfm);
float eps = 1e-6f;