2011-04-27 11:58:34 +00:00
|
|
|
/*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Copyright 2011-2013 Blender Foundation
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2011-04-27 11:58:34 +00:00
|
|
|
*
|
2013-08-18 14:16:15 +00:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
2014-12-25 02:50:24 +01:00
|
|
|
* limitations under the License.
|
2011-04-27 11:58:34 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <limits.h>
|
2020-03-19 09:33:03 +01:00
|
|
|
#include <string.h>
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "device/device.h"
|
|
|
|
#include "render/bake.h"
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "render/buffers.h"
|
|
|
|
#include "render/camera.h"
|
|
|
|
#include "render/graph.h"
|
|
|
|
#include "render/integrator.h"
|
2019-02-26 14:05:54 +01:00
|
|
|
#include "render/light.h"
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "render/mesh.h"
|
|
|
|
#include "render/object.h"
|
|
|
|
#include "render/scene.h"
|
|
|
|
#include "render/session.h"
|
2011-04-27 11:58:34 +00:00
|
|
|
|
Cycles: Make all #include statements relative to cycles source directory
The idea is to make include statements more explicit and obvious where the
file is coming from, additionally reducing chance of wrong header being
picked up.
For example, it was not obvious whether bvh.h was refferring to builder
or traversal, whenter node.h is a generic graph node or a shader node
and cases like that.
Surely this might look obvious for the active developers, but after some
time of not touching the code it becomes less obvious where file is coming
from.
This was briefly mentioned in T50824 and seems @brecht is fine with such
explicitness, but need to agree with all active developers before committing
this.
Please note that this patch is lacking changes related on GPU/OpenCL
support. This will be solved if/when we all agree this is a good idea to move
forward.
Reviewers: brecht, lukasstockner97, maiself, nirved, dingto, juicyfruit, swerner
Reviewed By: lukasstockner97, maiself, nirved, dingto
Subscribers: brecht
Differential Revision: https://developer.blender.org/D2586
2017-03-28 20:39:14 +02:00
|
|
|
#include "util/util_foreach.h"
|
|
|
|
#include "util/util_function.h"
|
|
|
|
#include "util/util_logging.h"
|
|
|
|
#include "util/util_math.h"
|
|
|
|
#include "util/util_opengl.h"
|
|
|
|
#include "util/util_task.h"
|
|
|
|
#include "util/util_time.h"
|
2011-04-27 11:58:34 +00:00
|
|
|
|
|
|
|
CCL_NAMESPACE_BEGIN
|
|
|
|
|
2012-10-23 16:36:53 +00:00
|
|
|
/* Note about preserve_tile_device option for tile manager:
|
|
|
|
* progressive refine and viewport rendering does requires tiles to
|
|
|
|
* always be allocated for the same device
|
|
|
|
*/
|
2019-04-17 06:17:24 +02:00
|
|
|
Session::Session(const SessionParams ¶ms_)
|
|
|
|
: params(params_),
|
|
|
|
tile_manager(params.progressive,
|
|
|
|
params.samples,
|
|
|
|
params.tile_size,
|
|
|
|
params.start_resolution,
|
|
|
|
params.background == false || params.progressive_refine,
|
|
|
|
params.background,
|
|
|
|
params.tile_order,
|
|
|
|
max(params.device.multi_devices.size(), 1),
|
|
|
|
params.pixel_size),
|
|
|
|
stats(),
|
|
|
|
profiler()
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
device_use_gl = ((params.device.type != DEVICE_CPU) && !params.background);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
TaskScheduler::init(params.threads);
|
Cycles: merging features from tomato branch.
=== BVH build time optimizations ===
* BVH building was multithreaded. Not all building is multithreaded, packing
and the initial bounding/splitting is still single threaded, but recursive
splitting is, which was the main bottleneck.
* Object splitting now uses binning rather than sorting of all elements, using
code from the Embree raytracer from Intel.
http://software.intel.com/en-us/articles/embree-photo-realistic-ray-tracing-kernels/
* Other small changes to avoid allocations, pack memory more tightly, avoid
some unnecessary operations, ...
These optimizations do not work yet when Spatial Splits are enabled, for that
more work is needed. There's also other optimizations still needed, in
particular for the case of many low poly objects, the packing step and node
memory allocation.
BVH raytracing time should remain about the same, but BVH build time should be
significantly reduced, test here show speedup of about 5x to 10x on a dual core
and 5x to 25x on an 8-core machine, depending on the scene.
=== Threads ===
Centralized task scheduler for multithreading, which is basically the
CPU device threading code wrapped into something reusable.
Basic idea is that there is a single TaskScheduler that keeps a pool of threads,
one for each core. Other places in the code can then create a TaskPool that they
can drop Tasks in to be executed by the scheduler, and wait for them to complete
or cancel them early.
=== Normal ====
Added a Normal output to the texture coordinate node. This currently
gives the object space normal, which is the same under object animation.
In the future this might become a "generated" normal so it's also stable for
deforming objects, but for now it's already useful for non-deforming objects.
=== Render Layers ===
Per render layer Samples control, leaving it to 0 will use the common scene
setting.
Environment pass will now render environment even if film is set to transparent.
Exclude Layers" added. Scene layers (all object that influence the render,
directly or indirectly) are shared between all render layers. However sometimes
it's useful to leave out some object influence for a particular render layer.
That's what this option allows you to do.
=== Filter Glossy ===
When using a value higher than 0.0, this will blur glossy reflections after
blurry bounces, to reduce noise at the cost of accuracy. 1.0 is a good
starting value to tweak.
Some light paths have a low probability of being found while contributing much
light to the pixel. As a result these light paths will be found in some pixels
and not in others, causing fireflies. An example of such a difficult path might
be a small light that is causing a small specular highlight on a sharp glossy
material, which we are seeing through a rough glossy material. With path tracing
it is difficult to find the specular highlight, but if we increase the roughness
on the material the highlight gets bigger and softer, and so easier to find.
Often this blurring will be hardly noticeable, because we are seeing it through
a blurry material anyway, but there are also cases where this will lead to a
loss of detail in lighting.
2012-04-28 08:53:59 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
session_thread = NULL;
|
|
|
|
scene = NULL;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
reset_time = 0.0;
|
|
|
|
last_update_time = 0.0;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
delayed_reset.do_reset = false;
|
|
|
|
delayed_reset.samples = 0;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
display_outdated = false;
|
|
|
|
gpu_draw_ready = false;
|
2019-09-05 12:47:20 +02:00
|
|
|
gpu_need_display_buffer_update = false;
|
2019-04-17 06:17:24 +02:00
|
|
|
pause = false;
|
2020-10-28 19:55:41 +01:00
|
|
|
|
|
|
|
buffers = NULL;
|
|
|
|
display = NULL;
|
|
|
|
|
|
|
|
/* Validate denoising parameters. */
|
|
|
|
set_denoising(params.denoising);
|
|
|
|
|
|
|
|
/* Create CPU/GPU devices. */
|
|
|
|
device = Device::create(params.device, stats, profiler, params.background);
|
|
|
|
|
|
|
|
if (!device->error_message().empty()) {
|
|
|
|
progress.set_error(device->error_message());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create buffers for interactive rendering. */
|
|
|
|
if (!(params.background && !params.write_render_cb)) {
|
|
|
|
buffers = new RenderBuffers(device);
|
|
|
|
display = new DisplayBuffer(device, params.display_buffer_linear);
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Session::~Session()
|
|
|
|
{
|
2020-11-11 18:39:46 +01:00
|
|
|
cancel();
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2020-10-28 19:55:41 +01:00
|
|
|
if (buffers && params.write_render_cb) {
|
2019-09-05 12:47:20 +02:00
|
|
|
/* Copy to display buffer and write out image if requested */
|
2019-04-17 06:17:24 +02:00
|
|
|
delete display;
|
2014-04-15 15:19:05 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
display = new DisplayBuffer(device, false);
|
|
|
|
display->reset(buffers->params);
|
2019-09-05 12:47:20 +02:00
|
|
|
copy_to_display_buffer(params.samples);
|
2011-09-08 18:58:07 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
int w = display->draw_width;
|
|
|
|
int h = display->draw_height;
|
|
|
|
uchar4 *pixels = display->rgba_byte.copy_from_device(0, w, h);
|
|
|
|
params.write_render_cb((uchar *)pixels, w, h, 4);
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* clean up */
|
|
|
|
tile_manager.device_free();
|
2012-10-13 12:38:32 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
delete buffers;
|
|
|
|
delete display;
|
|
|
|
delete scene;
|
|
|
|
delete device;
|
Cycles: merging features from tomato branch.
=== BVH build time optimizations ===
* BVH building was multithreaded. Not all building is multithreaded, packing
and the initial bounding/splitting is still single threaded, but recursive
splitting is, which was the main bottleneck.
* Object splitting now uses binning rather than sorting of all elements, using
code from the Embree raytracer from Intel.
http://software.intel.com/en-us/articles/embree-photo-realistic-ray-tracing-kernels/
* Other small changes to avoid allocations, pack memory more tightly, avoid
some unnecessary operations, ...
These optimizations do not work yet when Spatial Splits are enabled, for that
more work is needed. There's also other optimizations still needed, in
particular for the case of many low poly objects, the packing step and node
memory allocation.
BVH raytracing time should remain about the same, but BVH build time should be
significantly reduced, test here show speedup of about 5x to 10x on a dual core
and 5x to 25x on an 8-core machine, depending on the scene.
=== Threads ===
Centralized task scheduler for multithreading, which is basically the
CPU device threading code wrapped into something reusable.
Basic idea is that there is a single TaskScheduler that keeps a pool of threads,
one for each core. Other places in the code can then create a TaskPool that they
can drop Tasks in to be executed by the scheduler, and wait for them to complete
or cancel them early.
=== Normal ====
Added a Normal output to the texture coordinate node. This currently
gives the object space normal, which is the same under object animation.
In the future this might become a "generated" normal so it's also stable for
deforming objects, but for now it's already useful for non-deforming objects.
=== Render Layers ===
Per render layer Samples control, leaving it to 0 will use the common scene
setting.
Environment pass will now render environment even if film is set to transparent.
Exclude Layers" added. Scene layers (all object that influence the render,
directly or indirectly) are shared between all render layers. However sometimes
it's useful to leave out some object influence for a particular render layer.
That's what this option allows you to do.
=== Filter Glossy ===
When using a value higher than 0.0, this will blur glossy reflections after
blurry bounces, to reduce noise at the cost of accuracy. 1.0 is a good
starting value to tweak.
Some light paths have a low probability of being found while contributing much
light to the pixel. As a result these light paths will be found in some pixels
and not in others, causing fireflies. An example of such a difficult path might
be a small light that is causing a small specular highlight on a sharp glossy
material, which we are seeing through a rough glossy material. With path tracing
it is difficult to find the specular highlight, but if we increase the roughness
on the material the highlight gets bigger and softer, and so easier to find.
Often this blurring will be hardly noticeable, because we are seeing it through
a blurry material anyway, but there are also cases where this will lead to a
loss of detail in lighting.
2012-04-28 08:53:59 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
TaskScheduler::exit();
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Session::start()
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (!session_thread) {
|
|
|
|
session_thread = new thread(function_bind(&Session::run, this));
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2020-11-11 18:39:46 +01:00
|
|
|
void Session::cancel()
|
|
|
|
{
|
|
|
|
if (session_thread) {
|
|
|
|
/* wait for session thread to end */
|
|
|
|
progress.set_cancel("Exiting");
|
|
|
|
|
|
|
|
gpu_need_display_buffer_update = false;
|
|
|
|
gpu_need_display_buffer_update_cond.notify_all();
|
|
|
|
|
|
|
|
{
|
|
|
|
thread_scoped_lock pause_lock(pause_mutex);
|
|
|
|
pause = false;
|
|
|
|
}
|
|
|
|
pause_cond.notify_all();
|
|
|
|
|
|
|
|
wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
bool Session::ready_to_reset()
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
double dt = time_dt() - reset_time;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
if (!display_outdated)
|
|
|
|
return (dt > params.reset_timeout);
|
|
|
|
else
|
|
|
|
return (dt > params.cancel_timeout);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* GPU Session */
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void Session::reset_gpu(BufferParams &buffer_params, int samples)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock pause_lock(pause_mutex);
|
2013-02-12 13:06:52 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* block for buffer access and reset immediately. we can't do this
|
|
|
|
* in the thread, because we need to allocate an OpenGL buffer, and
|
|
|
|
* that only works in the main thread */
|
|
|
|
thread_scoped_lock display_lock(display_mutex);
|
|
|
|
thread_scoped_lock buffers_lock(buffers_mutex);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
display_outdated = true;
|
|
|
|
reset_time = time_dt();
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
reset_(buffer_params, samples);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-09-05 12:47:20 +02:00
|
|
|
gpu_need_display_buffer_update = false;
|
|
|
|
gpu_need_display_buffer_update_cond.notify_all();
|
2011-08-29 10:21:10 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
pause_cond.notify_all();
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
bool Session::draw_gpu(BufferParams &buffer_params, DeviceDrawParams &draw_params)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* block for buffer access */
|
|
|
|
thread_scoped_lock display_lock(display_mutex);
|
|
|
|
|
|
|
|
/* first check we already rendered something */
|
|
|
|
if (gpu_draw_ready) {
|
|
|
|
/* then verify the buffers have the expected size, so we don't
|
|
|
|
* draw previous results in a resized window */
|
2020-02-11 16:30:01 +01:00
|
|
|
if (buffer_params.width == display->params.width &&
|
|
|
|
buffer_params.height == display->params.height) {
|
2019-06-12 09:04:10 +10:00
|
|
|
/* for CUDA we need to do tone-mapping still, since we can
|
|
|
|
* only access GL buffers from the main thread. */
|
2019-09-05 12:47:20 +02:00
|
|
|
if (gpu_need_display_buffer_update) {
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock buffers_lock(buffers_mutex);
|
2019-09-05 12:47:20 +02:00
|
|
|
copy_to_display_buffer(tile_manager.state.sample);
|
|
|
|
gpu_need_display_buffer_update = false;
|
|
|
|
gpu_need_display_buffer_update_cond.notify_all();
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
display->draw(device, draw_params);
|
|
|
|
|
|
|
|
if (display_outdated && (time_dt() - reset_time) > params.text_timeout)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void Session::run_gpu()
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
bool tiles_written = false;
|
|
|
|
|
|
|
|
reset_time = time_dt();
|
|
|
|
last_update_time = time_dt();
|
2020-02-11 16:30:01 +01:00
|
|
|
last_display_time = last_update_time;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
progress.set_render_start_time();
|
|
|
|
|
|
|
|
while (!progress.get_cancel()) {
|
|
|
|
/* advance to next tile */
|
|
|
|
bool no_tiles = !tile_manager.next();
|
|
|
|
|
|
|
|
DeviceKernelStatus kernel_state = DEVICE_KERNEL_UNKNOWN;
|
|
|
|
if (no_tiles) {
|
|
|
|
kernel_state = device->get_active_kernel_switch_state();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.background) {
|
|
|
|
/* if no work left and in background mode, we can stop immediately */
|
|
|
|
if (no_tiles) {
|
|
|
|
progress.set_status("Finished");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (no_tiles && kernel_state == DEVICE_KERNEL_FEATURE_KERNEL_AVAILABLE) {
|
|
|
|
reset_gpu(tile_manager.params, params.samples);
|
|
|
|
}
|
|
|
|
|
|
|
|
else {
|
|
|
|
/* if in interactive mode, and we are either paused or done for now,
|
|
|
|
* wait for pause condition notify to wake up again */
|
|
|
|
thread_scoped_lock pause_lock(pause_mutex);
|
|
|
|
|
|
|
|
if (!pause && !tile_manager.done()) {
|
|
|
|
/* reset could have happened after no_tiles was set, before this lock.
|
|
|
|
* in this case we shall not wait for pause condition
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
else if (pause || no_tiles) {
|
|
|
|
update_status_time(pause, no_tiles);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
scoped_timer pause_timer;
|
|
|
|
pause_cond.wait(pause_lock);
|
|
|
|
if (pause) {
|
|
|
|
progress.add_skip_time(pause_timer, params.background);
|
|
|
|
}
|
|
|
|
|
|
|
|
update_status_time(pause, no_tiles);
|
|
|
|
progress.set_update();
|
|
|
|
|
|
|
|
if (!pause)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (progress.get_cancel())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!no_tiles) {
|
|
|
|
/* update scene */
|
|
|
|
scoped_timer update_timer;
|
|
|
|
if (update_scene()) {
|
|
|
|
profiler.reset(scene->shaders.size(), scene->objects.size());
|
|
|
|
}
|
|
|
|
progress.add_skip_time(update_timer, params.background);
|
|
|
|
|
|
|
|
if (!device->error_message().empty())
|
|
|
|
progress.set_error(device->error_message());
|
|
|
|
|
|
|
|
if (progress.get_cancel())
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* buffers mutex is locked entirely while rendering each
|
|
|
|
* sample, and released/reacquired on each iteration to allow
|
|
|
|
* reset and draw in between */
|
|
|
|
thread_scoped_lock buffers_lock(buffers_mutex);
|
|
|
|
|
|
|
|
/* update status and timing */
|
|
|
|
update_status_time();
|
|
|
|
|
|
|
|
/* render */
|
2020-04-28 20:27:03 +02:00
|
|
|
bool delayed_denoise = false;
|
|
|
|
const bool need_denoise = render_need_denoise(delayed_denoise);
|
2020-02-26 16:30:42 +01:00
|
|
|
render(need_denoise);
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
device->task_wait();
|
|
|
|
|
|
|
|
if (!device->error_message().empty())
|
|
|
|
progress.set_cancel(device->error_message());
|
|
|
|
|
|
|
|
/* update status and timing */
|
|
|
|
update_status_time();
|
|
|
|
|
2020-04-28 20:27:03 +02:00
|
|
|
gpu_need_display_buffer_update = !delayed_denoise;
|
2019-04-17 06:17:24 +02:00
|
|
|
gpu_draw_ready = true;
|
|
|
|
progress.set_update();
|
|
|
|
|
2019-09-05 12:47:20 +02:00
|
|
|
/* wait for until display buffer is updated */
|
2019-04-17 06:17:24 +02:00
|
|
|
if (!params.background) {
|
2019-09-05 12:47:20 +02:00
|
|
|
while (gpu_need_display_buffer_update) {
|
2019-04-17 06:17:24 +02:00
|
|
|
if (progress.get_cancel())
|
|
|
|
break;
|
|
|
|
|
2019-09-05 12:47:20 +02:00
|
|
|
gpu_need_display_buffer_update_cond.wait(buffers_lock);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!device->error_message().empty())
|
|
|
|
progress.set_error(device->error_message());
|
|
|
|
|
|
|
|
tiles_written = update_progressive_refine(progress.get_cancel());
|
|
|
|
|
|
|
|
if (progress.get_cancel())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!tiles_written)
|
|
|
|
update_progressive_refine(true);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* CPU Session */
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void Session::reset_cpu(BufferParams &buffer_params, int samples)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock reset_lock(delayed_reset.mutex);
|
|
|
|
thread_scoped_lock pause_lock(pause_mutex);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
display_outdated = true;
|
|
|
|
reset_time = time_dt();
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
delayed_reset.params = buffer_params;
|
|
|
|
delayed_reset.samples = samples;
|
|
|
|
delayed_reset.do_reset = true;
|
|
|
|
device->task_cancel();
|
2011-08-29 10:21:10 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
pause_cond.notify_all();
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
bool Session::draw_cpu(BufferParams &buffer_params, DeviceDrawParams &draw_params)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock display_lock(display_mutex);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* first check we already rendered something */
|
|
|
|
if (display->draw_ready()) {
|
|
|
|
/* then verify the buffers have the expected size, so we don't
|
|
|
|
* draw previous results in a resized window */
|
2020-02-11 16:30:01 +01:00
|
|
|
if (buffer_params.width == display->params.width &&
|
|
|
|
buffer_params.height == display->params.height) {
|
2019-04-17 06:17:24 +02:00
|
|
|
display->draw(device, draw_params);
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
if (display_outdated && (time_dt() - reset_time) > params.text_timeout)
|
|
|
|
return false;
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
return false;
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2020-09-24 00:37:23 +02:00
|
|
|
bool Session::steal_tile(RenderTile &rtile, Device *tile_device, thread_scoped_lock &tile_lock)
|
|
|
|
{
|
|
|
|
/* Devices that can get their tiles stolen don't steal tiles themselves.
|
|
|
|
* Additionally, if there are no stealable tiles in flight, give up here. */
|
|
|
|
if (tile_device->info.type == DEVICE_CPU || stealable_tiles == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait until no other thread is trying to steal a tile. */
|
|
|
|
while (tile_stealing_state != NOT_STEALING && stealable_tiles > 0) {
|
|
|
|
/* Someone else is currently trying to get a tile.
|
|
|
|
* Wait on the condition variable and try later. */
|
|
|
|
tile_steal_cond.wait(tile_lock);
|
|
|
|
}
|
|
|
|
/* If another thread stole the last stealable tile in the meantime, give up. */
|
|
|
|
if (stealable_tiles == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* There are stealable tiles in flight, so signal that one should be released. */
|
|
|
|
tile_stealing_state = WAITING_FOR_TILE;
|
|
|
|
|
|
|
|
/* Wait until a device notices the signal and releases its tile. */
|
|
|
|
while (tile_stealing_state != GOT_TILE && stealable_tiles > 0) {
|
|
|
|
tile_steal_cond.wait(tile_lock);
|
|
|
|
}
|
|
|
|
/* If the last stealable tile finished on its own, give up. */
|
|
|
|
if (tile_stealing_state != GOT_TILE) {
|
|
|
|
tile_stealing_state = NOT_STEALING;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Successfully stole a tile, now move it to the new device. */
|
|
|
|
rtile = stolen_tile;
|
|
|
|
rtile.buffers->buffer.move_device(tile_device);
|
|
|
|
rtile.buffer = rtile.buffers->buffer.device_pointer;
|
|
|
|
rtile.stealing_state = RenderTile::NO_STEALING;
|
|
|
|
rtile.num_samples -= (rtile.sample - rtile.start_sample);
|
|
|
|
rtile.start_sample = rtile.sample;
|
|
|
|
|
|
|
|
tile_stealing_state = NOT_STEALING;
|
|
|
|
|
|
|
|
/* Poke any threads which might be waiting for NOT_STEALING above. */
|
|
|
|
tile_steal_cond.notify_one();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Session::get_tile_stolen()
|
|
|
|
{
|
|
|
|
/* If tile_stealing_state is WAITING_FOR_TILE, atomically set it to RELEASING_TILE
|
|
|
|
* and return true. */
|
|
|
|
TileStealingState expected = WAITING_FOR_TILE;
|
|
|
|
return tile_stealing_state.compare_exchange_weak(expected, RELEASING_TILE);
|
|
|
|
}
|
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
bool Session::acquire_tile(RenderTile &rtile, Device *tile_device, uint tile_types)
|
2012-09-04 13:29:07 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (progress.get_cancel()) {
|
|
|
|
if (params.progressive_refine == false) {
|
|
|
|
/* for progressive refine current sample should be finished for all tiles */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
thread_scoped_lock tile_lock(tile_mutex);
|
|
|
|
|
|
|
|
/* get next tile from manager */
|
|
|
|
Tile *tile;
|
|
|
|
int device_num = device->device_number(tile_device);
|
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
while (!tile_manager.next_tile(tile, device_num, tile_types)) {
|
2021-01-18 15:30:25 +01:00
|
|
|
/* Can only steal tiles on devices that support rendering
|
|
|
|
* This is because denoising tiles cannot be stolen (see below)
|
|
|
|
*/
|
|
|
|
if ((tile_types & (RenderTile::PATH_TRACE | RenderTile::BAKE)) &&
|
|
|
|
steal_tile(rtile, tile_device, tile_lock)) {
|
2021-01-10 19:16:07 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
/* Wait for denoising tiles to become available */
|
2020-02-26 16:30:42 +01:00
|
|
|
if ((tile_types & RenderTile::DENOISE) && !progress.get_cancel() && tile_manager.has_tiles()) {
|
2020-02-11 16:30:01 +01:00
|
|
|
denoising_cond.wait(tile_lock);
|
|
|
|
continue;
|
|
|
|
}
|
2020-09-24 00:37:23 +02:00
|
|
|
|
2021-01-10 19:16:07 +01:00
|
|
|
return false;
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
/* fill render tile */
|
|
|
|
rtile.x = tile_manager.state.buffer.full_x + tile->x;
|
|
|
|
rtile.y = tile_manager.state.buffer.full_y + tile->y;
|
|
|
|
rtile.w = tile->w;
|
|
|
|
rtile.h = tile->h;
|
|
|
|
rtile.start_sample = tile_manager.state.sample;
|
|
|
|
rtile.num_samples = tile_manager.state.num_samples;
|
|
|
|
rtile.resolution = tile_manager.state.resolution_divider;
|
|
|
|
rtile.tile_index = tile->index;
|
2021-01-10 19:16:07 +01:00
|
|
|
rtile.stealing_state = RenderTile::NO_STEALING;
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
|
|
|
|
if (tile->state == Tile::DENOISE) {
|
|
|
|
rtile.task = RenderTile::DENOISE;
|
|
|
|
}
|
|
|
|
else {
|
2020-09-24 00:37:23 +02:00
|
|
|
if (tile_device->info.type == DEVICE_CPU) {
|
|
|
|
stealable_tiles++;
|
|
|
|
rtile.stealing_state = RenderTile::CAN_BE_STOLEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (read_bake_tile_cb) {
|
|
|
|
rtile.task = RenderTile::BAKE;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
rtile.task = RenderTile::PATH_TRACE;
|
|
|
|
}
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
tile_lock.unlock();
|
|
|
|
|
|
|
|
/* in case of a permanent buffer, return it, otherwise we will allocate
|
|
|
|
* a new temporary buffer */
|
|
|
|
if (buffers) {
|
|
|
|
tile_manager.state.buffer.get_offset_stride(rtile.offset, rtile.stride);
|
|
|
|
|
|
|
|
rtile.buffer = buffers->buffer.device_pointer;
|
|
|
|
rtile.buffers = buffers;
|
|
|
|
|
|
|
|
device->map_tile(tile_device, rtile);
|
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
/* Reset copy state, since buffer contents change after the tile was acquired */
|
|
|
|
buffers->map_neighbor_copied = false;
|
|
|
|
|
2020-06-09 19:46:16 +02:00
|
|
|
/* This hack ensures that the copy in 'MultiDevice::map_neighbor_tiles' accounts
|
|
|
|
* for the buffer resolution divider. */
|
|
|
|
buffers->buffer.data_width = (buffers->params.width * buffers->params.get_passes_size()) /
|
|
|
|
tile_manager.state.resolution_divider;
|
|
|
|
buffers->buffer.data_height = buffers->params.height / tile_manager.state.resolution_divider;
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tile->buffers == NULL) {
|
|
|
|
/* fill buffer parameters */
|
|
|
|
BufferParams buffer_params = tile_manager.params;
|
|
|
|
buffer_params.full_x = rtile.x;
|
|
|
|
buffer_params.full_y = rtile.y;
|
|
|
|
buffer_params.width = rtile.w;
|
|
|
|
buffer_params.height = rtile.h;
|
|
|
|
|
|
|
|
/* allocate buffers */
|
|
|
|
tile->buffers = new RenderBuffers(tile_device);
|
|
|
|
tile->buffers->reset(buffer_params);
|
|
|
|
}
|
2021-01-20 14:12:43 +01:00
|
|
|
else if (tile->buffers->buffer.device != tile_device) {
|
2021-01-21 12:32:03 +01:00
|
|
|
/* Move buffer to current tile device again in case it was stolen before.
|
|
|
|
* Not needed for denoising since that already handles mapping of tiles and
|
|
|
|
* neighbors to its own device. */
|
|
|
|
if (rtile.task != RenderTile::DENOISE) {
|
|
|
|
tile->buffers->buffer.move_device(tile_device);
|
|
|
|
}
|
2021-01-20 14:12:43 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
tile->buffers->map_neighbor_copied = false;
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
tile->buffers->params.get_offset_stride(rtile.offset, rtile.stride);
|
|
|
|
|
|
|
|
rtile.buffer = tile->buffers->buffer.device_pointer;
|
|
|
|
rtile.buffers = tile->buffers;
|
|
|
|
rtile.sample = tile_manager.state.sample;
|
|
|
|
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
if (read_bake_tile_cb) {
|
|
|
|
/* This will read any passes needed as input for baking. */
|
2021-01-21 15:12:17 +01:00
|
|
|
if (tile_manager.state.sample == tile_manager.range_start_sample) {
|
2021-01-25 14:26:32 +01:00
|
|
|
{
|
|
|
|
thread_scoped_lock tile_lock(tile_mutex);
|
|
|
|
read_bake_tile_cb(rtile);
|
|
|
|
}
|
|
|
|
rtile.buffers->buffer.copy_to_device();
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* This will tag tile as IN PROGRESS in blender-side render pipeline,
|
|
|
|
* which is needed to highlight currently rendering tile before first
|
|
|
|
* sample was processed for it. */
|
|
|
|
update_tile_sample(rtile);
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
return true;
|
2012-09-04 13:29:07 +00:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void Session::update_tile_sample(RenderTile &rtile)
|
2012-09-04 13:29:07 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock tile_lock(tile_mutex);
|
2012-09-04 13:29:07 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
if (update_render_tile_cb) {
|
|
|
|
if (params.progressive_refine == false) {
|
|
|
|
/* todo: optimize this by making it thread safe and removing lock */
|
2012-09-04 13:29:07 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
update_render_tile_cb(rtile, true);
|
|
|
|
}
|
|
|
|
}
|
2012-09-04 13:29:07 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
update_status_time();
|
2012-09-04 13:29:07 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 20:27:03 +02:00
|
|
|
void Session::release_tile(RenderTile &rtile, const bool need_denoise)
|
2012-09-04 13:29:07 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock tile_lock(tile_mutex);
|
2012-09-04 13:29:07 +00:00
|
|
|
|
2020-09-24 00:37:23 +02:00
|
|
|
if (rtile.stealing_state != RenderTile::NO_STEALING) {
|
|
|
|
stealable_tiles--;
|
|
|
|
if (rtile.stealing_state == RenderTile::WAS_STOLEN) {
|
|
|
|
/* If the tile is being stolen, don't release it here - the new device will pick up where
|
|
|
|
* the old one left off. */
|
|
|
|
|
|
|
|
assert(tile_stealing_state == RELEASING_TILE);
|
|
|
|
assert(rtile.sample < rtile.start_sample + rtile.num_samples);
|
|
|
|
|
|
|
|
tile_stealing_state = GOT_TILE;
|
|
|
|
stolen_tile = rtile;
|
|
|
|
tile_steal_cond.notify_all();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
else if (stealable_tiles == 0) {
|
|
|
|
/* If this was the last stealable tile, wake up any threads still waiting for one. */
|
|
|
|
tile_steal_cond.notify_all();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
progress.add_finished_tile(rtile.task == RenderTile::DENOISE);
|
2017-01-19 00:39:52 +01:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
bool delete_tile;
|
2012-09-04 13:29:07 +00:00
|
|
|
|
2020-04-28 20:27:03 +02:00
|
|
|
if (tile_manager.finish_tile(rtile.tile_index, need_denoise, delete_tile)) {
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
/* Finished tile pixels write. */
|
2019-04-17 06:17:24 +02:00
|
|
|
if (write_render_tile_cb && params.progressive_refine == false) {
|
|
|
|
write_render_tile_cb(rtile);
|
|
|
|
}
|
2017-10-10 02:28:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
if (delete_tile) {
|
|
|
|
delete rtile.buffers;
|
|
|
|
tile_manager.state.tiles[rtile.tile_index].buffers = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
Cycles: code refactor to bake using regular render session and tiles
There should be no user visible change from this, except that tile size
now affects performance. The goal here is to simplify bake denoising in
D3099, letting it reuse more denoising tiles and pass code.
A lot of code is now shared with regular rendering, with the two main
differences being that we read some render result passes from the bake API
when starting to render a tile, and call the bake kernel instead of the
path trace kernel.
With this kind of design where Cycles asks for tiles from the bake API,
it should eventually be easier to reduce memory usage, show tiles as
they are baked, or bake multiple passes at once, though there's still
quite some work needed for that.
Reviewers: #cycles
Subscribers: monio, wmatyjewicz, lukasstockner97, michaelknubben
Differential Revision: https://developer.blender.org/D3108
2019-05-10 21:39:58 +02:00
|
|
|
/* In progress tile pixels update. */
|
2019-04-17 06:17:24 +02:00
|
|
|
if (update_render_tile_cb && params.progressive_refine == false) {
|
|
|
|
update_render_tile_cb(rtile, false);
|
|
|
|
}
|
|
|
|
}
|
2012-09-04 13:29:07 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
update_status_time();
|
2020-02-11 16:30:01 +01:00
|
|
|
|
|
|
|
/* Notify denoising thread that a tile was finished. */
|
|
|
|
denoising_cond.notify_all();
|
2012-09-04 13:29:07 +00:00
|
|
|
}
|
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
void Session::map_neighbor_tiles(RenderTileNeighbors &neighbors, Device *tile_device)
|
2017-05-07 14:40:58 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock tile_lock(tile_mutex);
|
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
const int4 image_region = make_int4(
|
|
|
|
tile_manager.state.buffer.full_x,
|
|
|
|
tile_manager.state.buffer.full_y,
|
|
|
|
tile_manager.state.buffer.full_x + tile_manager.state.buffer.width,
|
|
|
|
tile_manager.state.buffer.full_y + tile_manager.state.buffer.height);
|
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
RenderTile ¢er_tile = neighbors.tiles[RenderTileNeighbors::CENTER];
|
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
if (!tile_manager.schedule_denoising) {
|
|
|
|
/* Fix up tile slices with overlap. */
|
|
|
|
if (tile_manager.slice_overlap != 0) {
|
2020-07-09 20:01:22 +02:00
|
|
|
int y = max(center_tile.y - tile_manager.slice_overlap, image_region.y);
|
|
|
|
center_tile.h = min(center_tile.y + center_tile.h + tile_manager.slice_overlap,
|
|
|
|
image_region.w) -
|
|
|
|
y;
|
|
|
|
center_tile.y = y;
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Tiles are not being denoised individually, which means the entire image is processed. */
|
2020-07-09 20:01:22 +02:00
|
|
|
neighbors.set_bounds_from_center();
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
|
|
|
else {
|
2020-07-09 20:01:22 +02:00
|
|
|
int center_idx = center_tile.tile_index;
|
2020-02-11 16:30:01 +01:00
|
|
|
assert(tile_manager.state.tiles[center_idx].state == Tile::DENOISE);
|
|
|
|
|
|
|
|
for (int dy = -1, i = 0; dy <= 1; dy++) {
|
|
|
|
for (int dx = -1; dx <= 1; dx++, i++) {
|
2020-07-09 20:01:22 +02:00
|
|
|
RenderTile &rtile = neighbors.tiles[i];
|
2020-02-11 16:30:01 +01:00
|
|
|
int nindex = tile_manager.get_neighbor_index(center_idx, i);
|
|
|
|
if (nindex >= 0) {
|
|
|
|
Tile *tile = &tile_manager.state.tiles[nindex];
|
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
rtile.x = image_region.x + tile->x;
|
|
|
|
rtile.y = image_region.y + tile->y;
|
|
|
|
rtile.w = tile->w;
|
|
|
|
rtile.h = tile->h;
|
2020-02-11 16:30:01 +01:00
|
|
|
|
|
|
|
if (buffers) {
|
2020-07-09 20:01:22 +02:00
|
|
|
tile_manager.state.buffer.get_offset_stride(rtile.offset, rtile.stride);
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
rtile.buffer = buffers->buffer.device_pointer;
|
|
|
|
rtile.buffers = buffers;
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
assert(tile->buffers);
|
2020-07-09 20:01:22 +02:00
|
|
|
tile->buffers->params.get_offset_stride(rtile.offset, rtile.stride);
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
rtile.buffer = tile->buffers->buffer.device_pointer;
|
|
|
|
rtile.buffers = tile->buffers;
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2020-07-09 20:01:22 +02:00
|
|
|
int px = center_tile.x + dx * params.tile_size.x;
|
|
|
|
int py = center_tile.y + dy * params.tile_size.y;
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
rtile.x = clamp(px, image_region.x, image_region.z);
|
|
|
|
rtile.y = clamp(py, image_region.y, image_region.w);
|
|
|
|
rtile.w = rtile.h = 0;
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
rtile.buffer = (device_ptr)NULL;
|
|
|
|
rtile.buffers = NULL;
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
assert(center_tile.buffers);
|
|
|
|
device->map_neighbor_tiles(tile_device, neighbors);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
/* The denoised result is written back to the original tile. */
|
2020-07-09 20:01:22 +02:00
|
|
|
neighbors.target = center_tile;
|
2017-05-07 14:40:58 +02:00
|
|
|
}
|
|
|
|
|
2020-07-09 20:01:22 +02:00
|
|
|
void Session::unmap_neighbor_tiles(RenderTileNeighbors &neighbors, Device *tile_device)
|
2017-05-07 14:40:58 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock tile_lock(tile_mutex);
|
2020-07-09 20:01:22 +02:00
|
|
|
device->unmap_neighbor_tiles(tile_device, neighbors);
|
2017-05-07 14:40:58 +02:00
|
|
|
}
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
void Session::run_cpu()
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
bool tiles_written = false;
|
|
|
|
|
|
|
|
last_update_time = time_dt();
|
2020-02-11 16:30:01 +01:00
|
|
|
last_display_time = last_update_time;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
{
|
|
|
|
/* reset once to start */
|
|
|
|
thread_scoped_lock reset_lock(delayed_reset.mutex);
|
|
|
|
thread_scoped_lock buffers_lock(buffers_mutex);
|
|
|
|
thread_scoped_lock display_lock(display_mutex);
|
|
|
|
|
|
|
|
reset_(delayed_reset.params, delayed_reset.samples);
|
|
|
|
delayed_reset.do_reset = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!progress.get_cancel()) {
|
|
|
|
/* advance to next tile */
|
|
|
|
bool no_tiles = !tile_manager.next();
|
2019-09-05 12:47:20 +02:00
|
|
|
bool need_copy_to_display_buffer = false;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
DeviceKernelStatus kernel_state = DEVICE_KERNEL_UNKNOWN;
|
|
|
|
if (no_tiles) {
|
|
|
|
kernel_state = device->get_active_kernel_switch_state();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.background) {
|
|
|
|
/* if no work left and in background mode, we can stop immediately */
|
|
|
|
if (no_tiles) {
|
|
|
|
progress.set_status("Finished");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (no_tiles && kernel_state == DEVICE_KERNEL_FEATURE_KERNEL_AVAILABLE) {
|
|
|
|
reset_cpu(tile_manager.params, params.samples);
|
|
|
|
}
|
|
|
|
|
|
|
|
else {
|
|
|
|
/* if in interactive mode, and we are either paused or done for now,
|
|
|
|
* wait for pause condition notify to wake up again */
|
|
|
|
thread_scoped_lock pause_lock(pause_mutex);
|
|
|
|
|
|
|
|
if (!pause && delayed_reset.do_reset) {
|
|
|
|
/* reset once to start */
|
|
|
|
thread_scoped_lock reset_lock(delayed_reset.mutex);
|
|
|
|
thread_scoped_lock buffers_lock(buffers_mutex);
|
|
|
|
thread_scoped_lock display_lock(display_mutex);
|
|
|
|
|
|
|
|
reset_(delayed_reset.params, delayed_reset.samples);
|
|
|
|
delayed_reset.do_reset = false;
|
|
|
|
}
|
|
|
|
else if (pause || no_tiles) {
|
|
|
|
update_status_time(pause, no_tiles);
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
scoped_timer pause_timer;
|
|
|
|
pause_cond.wait(pause_lock);
|
|
|
|
if (pause) {
|
|
|
|
progress.add_skip_time(pause_timer, params.background);
|
|
|
|
}
|
|
|
|
|
|
|
|
update_status_time(pause, no_tiles);
|
|
|
|
progress.set_update();
|
|
|
|
|
|
|
|
if (!pause)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (progress.get_cancel())
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!no_tiles) {
|
|
|
|
/* update scene */
|
|
|
|
scoped_timer update_timer;
|
|
|
|
if (update_scene()) {
|
|
|
|
profiler.reset(scene->shaders.size(), scene->objects.size());
|
|
|
|
}
|
|
|
|
progress.add_skip_time(update_timer, params.background);
|
|
|
|
|
|
|
|
if (!device->error_message().empty())
|
|
|
|
progress.set_error(device->error_message());
|
|
|
|
|
|
|
|
if (progress.get_cancel())
|
|
|
|
break;
|
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
/* buffers mutex is locked entirely while rendering each
|
|
|
|
* sample, and released/reacquired on each iteration to allow
|
|
|
|
* reset and draw in between */
|
|
|
|
thread_scoped_lock buffers_lock(buffers_mutex);
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* update status and timing */
|
|
|
|
update_status_time();
|
|
|
|
|
|
|
|
/* render */
|
2020-04-28 20:27:03 +02:00
|
|
|
bool delayed_denoise = false;
|
|
|
|
const bool need_denoise = render_need_denoise(delayed_denoise);
|
2020-02-26 16:30:42 +01:00
|
|
|
render(need_denoise);
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* update status and timing */
|
|
|
|
update_status_time();
|
|
|
|
|
|
|
|
if (!params.background)
|
2020-04-28 20:27:03 +02:00
|
|
|
need_copy_to_display_buffer = !delayed_denoise;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
if (!device->error_message().empty())
|
|
|
|
progress.set_error(device->error_message());
|
|
|
|
}
|
|
|
|
|
|
|
|
device->task_wait();
|
|
|
|
|
|
|
|
{
|
|
|
|
thread_scoped_lock reset_lock(delayed_reset.mutex);
|
|
|
|
thread_scoped_lock buffers_lock(buffers_mutex);
|
|
|
|
thread_scoped_lock display_lock(display_mutex);
|
|
|
|
|
|
|
|
if (delayed_reset.do_reset) {
|
|
|
|
/* reset rendering if request from main thread */
|
|
|
|
delayed_reset.do_reset = false;
|
|
|
|
reset_(delayed_reset.params, delayed_reset.samples);
|
|
|
|
}
|
2019-09-05 12:47:20 +02:00
|
|
|
else if (need_copy_to_display_buffer) {
|
|
|
|
/* Only copy to display_buffer if we do not reset, we don't
|
2019-04-17 06:17:24 +02:00
|
|
|
* want to show the result of an incomplete sample */
|
2019-09-05 12:47:20 +02:00
|
|
|
copy_to_display_buffer(tile_manager.state.sample);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!device->error_message().empty())
|
|
|
|
progress.set_error(device->error_message());
|
|
|
|
|
|
|
|
tiles_written = update_progressive_refine(progress.get_cancel());
|
|
|
|
}
|
|
|
|
|
|
|
|
progress.set_update();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!tiles_written)
|
|
|
|
update_progressive_refine(true);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2014-05-19 19:17:57 +02:00
|
|
|
void Session::run()
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (params.use_profiling && (params.device.type == DEVICE_CPU)) {
|
|
|
|
profiler.start();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* session thread loop */
|
|
|
|
progress.set_status("Waiting for render to start");
|
|
|
|
|
|
|
|
/* run */
|
|
|
|
if (!progress.get_cancel()) {
|
|
|
|
/* reset number of rendered samples */
|
|
|
|
progress.reset_sample();
|
|
|
|
|
|
|
|
if (device_use_gl)
|
|
|
|
run_gpu();
|
|
|
|
else
|
|
|
|
run_cpu();
|
|
|
|
}
|
|
|
|
|
|
|
|
profiler.stop();
|
|
|
|
|
|
|
|
/* progress update */
|
|
|
|
if (progress.get_cancel())
|
2020-06-12 16:42:49 +02:00
|
|
|
progress.set_status(progress.get_cancel_message());
|
2019-04-17 06:17:24 +02:00
|
|
|
else
|
|
|
|
progress.set_update();
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
bool Session::draw(BufferParams &buffer_params, DeviceDrawParams &draw_params)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (device_use_gl)
|
|
|
|
return draw_gpu(buffer_params, draw_params);
|
|
|
|
else
|
|
|
|
return draw_cpu(buffer_params, draw_params);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void Session::reset_(BufferParams &buffer_params, int samples)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (buffers && buffer_params.modified(tile_manager.params)) {
|
|
|
|
gpu_draw_ready = false;
|
|
|
|
buffers->reset(buffer_params);
|
|
|
|
if (display) {
|
|
|
|
display->reset(buffer_params);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tile_manager.reset(buffer_params, samples);
|
2020-09-24 00:37:23 +02:00
|
|
|
stealable_tiles = 0;
|
|
|
|
tile_stealing_state = NOT_STEALING;
|
2019-04-17 06:17:24 +02:00
|
|
|
progress.reset_sample();
|
|
|
|
|
|
|
|
bool show_progress = params.background || tile_manager.get_num_effective_samples() != INT_MAX;
|
|
|
|
progress.set_total_pixel_samples(show_progress ? tile_manager.state.total_pixel_samples : 0);
|
|
|
|
|
|
|
|
if (!params.background)
|
|
|
|
progress.set_start_time();
|
|
|
|
progress.set_render_start_time();
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void Session::reset(BufferParams &buffer_params, int samples)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (device_use_gl)
|
|
|
|
reset_gpu(buffer_params, samples);
|
|
|
|
else
|
|
|
|
reset_cpu(buffer_params, samples);
|
2011-08-28 13:55:59 +00:00
|
|
|
}
|
|
|
|
|
2011-09-16 13:14:02 +00:00
|
|
|
void Session::set_samples(int samples)
|
2011-08-28 13:55:59 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (samples != params.samples) {
|
|
|
|
params.samples = samples;
|
|
|
|
tile_manager.set_samples(samples);
|
|
|
|
|
|
|
|
pause_cond.notify_all();
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2011-08-29 10:21:10 +00:00
|
|
|
void Session::set_pause(bool pause_)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
bool notify = false;
|
2011-08-29 16:54:13 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
|
|
|
thread_scoped_lock pause_lock(pause_mutex);
|
2011-08-29 16:54:13 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
if (pause != pause_) {
|
|
|
|
pause = pause_;
|
|
|
|
notify = true;
|
|
|
|
}
|
|
|
|
}
|
2011-08-29 10:21:10 +00:00
|
|
|
|
2020-08-06 18:34:41 +02:00
|
|
|
if (session_thread) {
|
|
|
|
if (notify) {
|
|
|
|
pause_cond.notify_all();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (pause_) {
|
|
|
|
update_status_time(pause_);
|
|
|
|
}
|
2011-08-29 10:21:10 +00:00
|
|
|
}
|
|
|
|
|
2020-05-31 23:49:10 +02:00
|
|
|
void Session::set_denoising(const DenoiseParams &denoising)
|
2020-02-11 16:30:01 +01:00
|
|
|
{
|
2020-06-25 15:14:30 +02:00
|
|
|
bool need_denoise = denoising.need_denoising_task();
|
2020-05-31 23:49:10 +02:00
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
/* Lock buffers so no denoising operation is triggered while the settings are changed here. */
|
|
|
|
thread_scoped_lock buffers_lock(buffers_mutex);
|
2020-05-31 23:49:10 +02:00
|
|
|
params.denoising = denoising;
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-06-25 15:14:30 +02:00
|
|
|
if (!(params.device.denoisers & denoising.type)) {
|
|
|
|
if (need_denoise) {
|
|
|
|
progress.set_error("Denoiser type not supported by compute device");
|
|
|
|
}
|
|
|
|
|
|
|
|
params.denoising.use = false;
|
|
|
|
need_denoise = false;
|
|
|
|
}
|
|
|
|
|
2020-02-11 16:30:01 +01:00
|
|
|
// TODO(pmours): Query the required overlap value for denoising from the device?
|
2020-05-31 23:49:10 +02:00
|
|
|
tile_manager.slice_overlap = need_denoise && !params.background ? 64 : 0;
|
|
|
|
|
|
|
|
/* Schedule per tile denoising for final renders if we are either denoising or
|
|
|
|
* need prefiltered passes for the native denoiser. */
|
|
|
|
tile_manager.schedule_denoising = need_denoise && !buffers;
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
|
|
|
|
2020-02-25 13:31:08 +01:00
|
|
|
void Session::set_denoising_start_sample(int sample)
|
|
|
|
{
|
2020-05-31 23:49:10 +02:00
|
|
|
if (sample != params.denoising.start_sample) {
|
|
|
|
params.denoising.start_sample = sample;
|
2020-02-25 13:31:08 +01:00
|
|
|
|
|
|
|
pause_cond.notify_all();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-27 11:58:34 +00:00
|
|
|
void Session::wait()
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (session_thread) {
|
|
|
|
session_thread->join();
|
|
|
|
delete session_thread;
|
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
session_thread = NULL;
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
bool Session::update_scene()
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
thread_scoped_lock scene_lock(scene->mutex);
|
|
|
|
|
|
|
|
/* update camera if dimensions changed for progressive render. the camera
|
|
|
|
* knows nothing about progressive or cropped rendering, it just gets the
|
|
|
|
* image dimensions passed in */
|
|
|
|
Camera *cam = scene->camera;
|
|
|
|
int width = tile_manager.state.buffer.full_width;
|
|
|
|
int height = tile_manager.state.buffer.full_height;
|
|
|
|
int resolution = tile_manager.state.resolution_divider;
|
|
|
|
|
2020-11-04 11:17:38 +01:00
|
|
|
cam->set_screen_size_and_resolution(width, height, resolution);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
|
|
|
/* number of samples is needed by multi jittered
|
|
|
|
* sampling pattern and by baking */
|
|
|
|
Integrator *integrator = scene->integrator;
|
|
|
|
BakeManager *bake_manager = scene->bake_manager;
|
|
|
|
|
2020-11-04 11:17:38 +01:00
|
|
|
if (integrator->get_sampling_pattern() != SAMPLING_PATTERN_SOBOL || bake_manager->get_baking()) {
|
Cycles: optimize device updates
This optimizes device updates (during user edits or frame changes in
the viewport) by avoiding unnecessary computations. To achieve this,
we use a combination of the sockets' update flags as well as some new
flags passed to the various managers when tagging for an update to tell
exactly what the tagging is for (e.g. shader was modified, object was
removed, etc.).
Besides avoiding recomputations, we also avoid resending to the devices
unmodified data arrays, thus reducing bandwidth usage. For OptiX and
Embree, BVH packing was also multithreaded.
The performance improvements may vary depending on the used device (CPU
or GPU), and the content of the scene. Simple scenes (e.g. with no adaptive
subdivision or volumes) rendered using OptiX will benefit from this work
the most.
On average, for a variety of animated scenes, this gives a 3x speedup.
Reviewed By: #cycles, brecht
Maniphest Tasks: T79174
Differential Revision: https://developer.blender.org/D9555
2021-01-22 15:01:26 +01:00
|
|
|
integrator->set_aa_samples(tile_manager.num_samples);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
2020-08-18 10:46:12 +02:00
|
|
|
bool kernel_switch_needed = false;
|
|
|
|
if (scene->update(progress, kernel_switch_needed)) {
|
2019-04-17 06:17:24 +02:00
|
|
|
if (kernel_switch_needed) {
|
|
|
|
reset(tile_manager.params, params.samples);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2011-08-29 16:54:13 +00:00
|
|
|
void Session::update_status_time(bool show_pause, bool show_done)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
int progressive_sample = tile_manager.state.sample;
|
|
|
|
int num_samples = tile_manager.get_num_effective_samples();
|
|
|
|
|
|
|
|
int tile = progress.get_rendered_tiles();
|
|
|
|
int num_tiles = tile_manager.state.num_tiles;
|
|
|
|
|
|
|
|
/* update status */
|
|
|
|
string status, substatus;
|
|
|
|
|
|
|
|
if (!params.progressive) {
|
|
|
|
const bool is_cpu = params.device.type == DEVICE_CPU;
|
|
|
|
const bool rendering_finished = (tile == num_tiles);
|
|
|
|
const bool is_last_tile = (tile + 1) == num_tiles;
|
|
|
|
|
|
|
|
substatus = string_printf("Rendered %d/%d Tiles", tile, num_tiles);
|
|
|
|
|
|
|
|
if (!rendering_finished && (device->show_samples() || (is_cpu && is_last_tile))) {
|
|
|
|
/* Some devices automatically support showing the sample number:
|
|
|
|
* - CUDADevice
|
|
|
|
* - OpenCLDevice when using the megakernel (the split kernel renders multiple
|
|
|
|
* samples at the same time, so the current sample isn't really defined)
|
|
|
|
* - CPUDevice when using one thread
|
|
|
|
* For these devices, the current sample is always shown.
|
|
|
|
*
|
|
|
|
* The other option is when the last tile is currently being rendered by the CPU.
|
|
|
|
*/
|
|
|
|
substatus += string_printf(", Sample %d/%d", progress.get_current_sample(), num_samples);
|
|
|
|
}
|
2020-06-01 00:11:17 +02:00
|
|
|
if (params.denoising.use && params.denoising.type != DENOISER_OPENIMAGEDENOISE) {
|
2019-04-17 06:17:24 +02:00
|
|
|
substatus += string_printf(", Denoised %d tiles", progress.get_denoised_tiles());
|
|
|
|
}
|
2020-05-31 23:49:10 +02:00
|
|
|
else if (params.denoising.store_passes && params.denoising.type == DENOISER_NLM) {
|
2019-04-17 06:17:24 +02:00
|
|
|
substatus += string_printf(", Prefiltered %d tiles", progress.get_denoised_tiles());
|
|
|
|
}
|
|
|
|
}
|
2019-07-31 14:32:37 +02:00
|
|
|
else if (tile_manager.num_samples == Integrator::MAX_SAMPLES)
|
2019-04-17 06:17:24 +02:00
|
|
|
substatus = string_printf("Path Tracing Sample %d", progressive_sample + 1);
|
|
|
|
else
|
|
|
|
substatus = string_printf("Path Tracing Sample %d/%d", progressive_sample + 1, num_samples);
|
|
|
|
|
|
|
|
if (show_pause) {
|
|
|
|
status = "Rendering Paused";
|
|
|
|
}
|
|
|
|
else if (show_done) {
|
|
|
|
status = "Rendering Done";
|
|
|
|
progress.set_end_time(); /* Save end time so that further calls to get_time are accurate. */
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
status = substatus;
|
|
|
|
substatus.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
progress.set_status(status, substatus);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 20:27:03 +02:00
|
|
|
bool Session::render_need_denoise(bool &delayed)
|
|
|
|
{
|
|
|
|
delayed = false;
|
|
|
|
|
2020-07-10 20:00:20 +02:00
|
|
|
/* Not supported yet for baking. */
|
|
|
|
if (read_bake_tile_cb) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-04-28 20:27:03 +02:00
|
|
|
/* Denoising enabled? */
|
2020-05-31 23:49:10 +02:00
|
|
|
if (!params.denoising.need_denoising_task()) {
|
2020-04-28 20:27:03 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.background) {
|
|
|
|
/* Background render, only denoise when rendering the last sample. */
|
|
|
|
return tile_manager.done();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Viewport render. */
|
|
|
|
|
|
|
|
/* It can happen that denoising was already enabled, but the scene still needs an update. */
|
2020-11-04 11:17:38 +01:00
|
|
|
if (scene->film->is_modified() || !scene->film->get_denoising_data_offset()) {
|
2020-04-28 20:27:03 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-08-12 17:25:10 +02:00
|
|
|
/* Immediately denoise when we reach the start sample or last sample. */
|
|
|
|
const int num_samples_finished = tile_manager.state.sample + 1;
|
|
|
|
if (num_samples_finished == params.denoising.start_sample ||
|
|
|
|
num_samples_finished == params.samples) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-04-28 20:27:03 +02:00
|
|
|
/* Do not denoise until the sample at which denoising should start is reached. */
|
2020-08-12 17:25:10 +02:00
|
|
|
if (num_samples_finished < params.denoising.start_sample) {
|
2020-04-28 20:27:03 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Avoid excessive denoising in viewport after reaching a certain amount of samples. */
|
|
|
|
delayed = (tile_manager.state.sample >= 20 &&
|
|
|
|
(time_dt() - last_display_time) < params.progressive_update_timeout);
|
|
|
|
return !delayed;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Session::render(bool need_denoise)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
if (buffers && tile_manager.state.sample == tile_manager.range_start_sample) {
|
2020-02-26 16:30:42 +01:00
|
|
|
/* Clear buffers. */
|
2019-04-17 06:17:24 +02:00
|
|
|
buffers->zero();
|
|
|
|
}
|
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
if (tile_manager.state.buffer.width == 0 || tile_manager.state.buffer.height == 0) {
|
|
|
|
return; /* Avoid empty launches. */
|
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Add path trace task. */
|
|
|
|
DeviceTask task(DeviceTask::RENDER);
|
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
task.acquire_tile = function_bind(&Session::acquire_tile, this, _2, _1, _3);
|
2020-04-28 20:27:03 +02:00
|
|
|
task.release_tile = function_bind(&Session::release_tile, this, _1, need_denoise);
|
2020-02-26 16:30:42 +01:00
|
|
|
task.map_neighbor_tiles = function_bind(&Session::map_neighbor_tiles, this, _1, _2);
|
|
|
|
task.unmap_neighbor_tiles = function_bind(&Session::unmap_neighbor_tiles, this, _1, _2);
|
2019-04-17 06:17:24 +02:00
|
|
|
task.get_cancel = function_bind(&Progress::get_cancel, &this->progress);
|
|
|
|
task.update_tile_sample = function_bind(&Session::update_tile_sample, this, _1);
|
|
|
|
task.update_progress_sample = function_bind(&Progress::add_samples, &this->progress, _1, _2);
|
2020-09-24 00:37:23 +02:00
|
|
|
task.get_tile_stolen = function_bind(&Session::get_tile_stolen, this);
|
2019-04-17 06:17:24 +02:00
|
|
|
task.need_finish_queue = params.progressive_refine;
|
2020-11-04 11:17:38 +01:00
|
|
|
task.integrator_branched = scene->integrator->get_method() == Integrator::BRANCHED_PATH;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-11-04 11:17:38 +01:00
|
|
|
task.adaptive_sampling.use = (scene->integrator->get_sampling_pattern() ==
|
|
|
|
SAMPLING_PATTERN_PMJ) &&
|
2020-03-05 12:05:42 +01:00
|
|
|
scene->dscene.data.film.pass_adaptive_aux_buffer;
|
|
|
|
task.adaptive_sampling.min_samples = scene->dscene.data.integrator.adaptive_min_samples;
|
2020-04-07 19:43:51 +02:00
|
|
|
task.adaptive_sampling.adaptive_step = scene->dscene.data.integrator.adaptive_step;
|
2020-03-05 12:05:42 +01:00
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
/* Acquire render tiles by default. */
|
|
|
|
task.tile_types = RenderTile::PATH_TRACE;
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-04-28 20:27:03 +02:00
|
|
|
if (need_denoise) {
|
2020-02-26 16:30:42 +01:00
|
|
|
task.denoising = params.denoising;
|
2020-02-25 13:31:08 +01:00
|
|
|
|
2020-11-04 11:17:38 +01:00
|
|
|
task.pass_stride = scene->film->get_pass_stride();
|
2020-02-26 16:30:42 +01:00
|
|
|
task.target_pass_stride = task.pass_stride;
|
2020-11-04 11:17:38 +01:00
|
|
|
task.pass_denoising_data = scene->film->get_denoising_data_offset();
|
|
|
|
task.pass_denoising_clean = scene->film->get_denoising_clean_offset();
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
task.denoising_from_render = true;
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
if (tile_manager.schedule_denoising) {
|
|
|
|
/* Acquire denoising tiles during rendering. */
|
|
|
|
task.tile_types |= RenderTile::DENOISE;
|
2020-02-14 17:00:44 +01:00
|
|
|
}
|
2020-02-26 16:30:42 +01:00
|
|
|
else {
|
|
|
|
assert(buffers);
|
2020-02-14 17:00:44 +01:00
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
/* Schedule rendering and wait for it to finish. */
|
|
|
|
device->task_add(task);
|
|
|
|
device->task_wait();
|
2020-02-11 16:30:01 +01:00
|
|
|
|
2020-02-26 16:30:42 +01:00
|
|
|
/* Then run denoising on the whole image at once. */
|
|
|
|
task.type = DeviceTask::DENOISE_BUFFER;
|
|
|
|
task.x = tile_manager.state.buffer.full_x;
|
|
|
|
task.y = tile_manager.state.buffer.full_y;
|
|
|
|
task.w = tile_manager.state.buffer.width;
|
|
|
|
task.h = tile_manager.state.buffer.height;
|
|
|
|
task.buffer = buffers->buffer.device_pointer;
|
|
|
|
task.sample = tile_manager.state.sample;
|
|
|
|
task.num_samples = tile_manager.state.num_samples;
|
|
|
|
tile_manager.state.buffer.get_offset_stride(task.offset, task.stride);
|
|
|
|
task.buffers = buffers;
|
|
|
|
}
|
2020-02-11 16:30:01 +01:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
device->task_add(task);
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2019-09-05 12:47:20 +02:00
|
|
|
void Session::copy_to_display_buffer(int sample)
|
2011-04-27 11:58:34 +00:00
|
|
|
{
|
2019-09-05 12:47:20 +02:00
|
|
|
/* add film conversion task */
|
2019-04-17 06:17:24 +02:00
|
|
|
DeviceTask task(DeviceTask::FILM_CONVERT);
|
|
|
|
|
|
|
|
task.x = tile_manager.state.buffer.full_x;
|
|
|
|
task.y = tile_manager.state.buffer.full_y;
|
|
|
|
task.w = tile_manager.state.buffer.width;
|
|
|
|
task.h = tile_manager.state.buffer.height;
|
|
|
|
task.rgba_byte = display->rgba_byte.device_pointer;
|
|
|
|
task.rgba_half = display->rgba_half.device_pointer;
|
|
|
|
task.buffer = buffers->buffer.device_pointer;
|
|
|
|
task.sample = sample;
|
|
|
|
tile_manager.state.buffer.get_offset_stride(task.offset, task.stride);
|
|
|
|
|
|
|
|
if (task.w > 0 && task.h > 0) {
|
|
|
|
device->task_add(task);
|
|
|
|
device->task_wait();
|
|
|
|
|
|
|
|
/* set display to new size */
|
|
|
|
display->draw_set(task.w, task.h);
|
2020-02-11 16:30:01 +01:00
|
|
|
|
|
|
|
last_display_time = time_dt();
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
display_outdated = false;
|
2011-04-27 11:58:34 +00:00
|
|
|
}
|
|
|
|
|
2012-10-13 12:38:32 +00:00
|
|
|
bool Session::update_progressive_refine(bool cancel)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
int sample = tile_manager.state.sample + 1;
|
|
|
|
bool write = sample == tile_manager.num_samples || cancel;
|
|
|
|
|
|
|
|
double current_time = time_dt();
|
|
|
|
|
|
|
|
if (current_time - last_update_time < params.progressive_update_timeout) {
|
2021-06-24 15:56:58 +10:00
|
|
|
/* If last sample was processed, we need to write buffers anyway. */
|
2019-04-17 06:17:24 +02:00
|
|
|
if (!write && sample != 1)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.progressive_refine) {
|
|
|
|
foreach (Tile &tile, tile_manager.state.tiles) {
|
|
|
|
if (!tile.buffers) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
RenderTile rtile;
|
|
|
|
rtile.x = tile_manager.state.buffer.full_x + tile.x;
|
|
|
|
rtile.y = tile_manager.state.buffer.full_y + tile.y;
|
|
|
|
rtile.w = tile.w;
|
|
|
|
rtile.h = tile.h;
|
|
|
|
rtile.sample = sample;
|
|
|
|
rtile.buffers = tile.buffers;
|
|
|
|
|
|
|
|
if (write) {
|
|
|
|
if (write_render_tile_cb)
|
|
|
|
write_render_tile_cb(rtile);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (update_render_tile_cb)
|
|
|
|
update_render_tile_cb(rtile, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
last_update_time = current_time;
|
|
|
|
|
|
|
|
return write;
|
2012-10-13 12:38:32 +00:00
|
|
|
}
|
2011-04-27 11:58:34 +00:00
|
|
|
|
2012-11-09 08:46:53 +00:00
|
|
|
void Session::device_free()
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
scene->device_free();
|
2012-11-09 08:46:53 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
tile_manager.device_free();
|
2012-11-09 08:46:53 +00:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* used from background render only, so no need to
|
|
|
|
* re-create render/display buffers here
|
|
|
|
*/
|
2012-11-09 08:46:53 +00:00
|
|
|
}
|
|
|
|
|
2018-11-29 02:06:30 +01:00
|
|
|
void Session::collect_statistics(RenderStats *render_stats)
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
scene->collect_statistics(render_stats);
|
|
|
|
if (params.use_profiling && (params.device.type == DEVICE_CPU)) {
|
|
|
|
render_stats->collect_profiling(scene, profiler);
|
|
|
|
}
|
2018-11-29 02:06:30 +01:00
|
|
|
}
|
|
|
|
|
2012-10-13 12:38:32 +00:00
|
|
|
CCL_NAMESPACE_END
|