frame range restrictions for dependencies, so slaves only receive point cache files for the frames that they need to render (supports step > 1, of course)
This commit is contained in:
@@ -7,6 +7,110 @@ import netrender.slave as slave
|
||||
import netrender.master as master
|
||||
from netrender.utils import *
|
||||
|
||||
|
||||
def clientSendJob(conn, scene, anim = False, chunks = 5):
|
||||
netsettings = scene.network_render
|
||||
job = netrender.model.RenderJob()
|
||||
|
||||
if anim:
|
||||
for f in range(scene.start_frame, scene.end_frame + 1):
|
||||
job.addFrame(f)
|
||||
else:
|
||||
job.addFrame(scene.current_frame)
|
||||
|
||||
filename = bpy.data.filename
|
||||
job.addFile(filename)
|
||||
|
||||
job_name = netsettings.job_name
|
||||
path, name = os.path.split(filename)
|
||||
if job_name == "[default]":
|
||||
job_name = name
|
||||
|
||||
for lib in bpy.data.libraries:
|
||||
lib_path = lib.filename
|
||||
|
||||
if lib_path.startswith("//"):
|
||||
lib_path = path + os.sep + lib_path[2:]
|
||||
|
||||
job.addFile(lib_path)
|
||||
|
||||
root, ext = os.path.splitext(name)
|
||||
cache_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that
|
||||
|
||||
print("cache:", cache_path)
|
||||
|
||||
if os.path.exists(cache_path):
|
||||
caches = {}
|
||||
pattern = re.compile("([a-zA-Z0-9]+)_([0-9]+)_[0-9]+\.bphys")
|
||||
for cache_file in sorted(os.listdir(cache_path)):
|
||||
match = pattern.match(cache_file)
|
||||
|
||||
if match:
|
||||
cache_id = match.groups()[0]
|
||||
cache_frame = int(match.groups()[1])
|
||||
|
||||
cache_files = caches.get(cache_id, [])
|
||||
cache_files.append((cache_frame, cache_file))
|
||||
caches[cache_id] = cache_files
|
||||
|
||||
for cache in caches.values():
|
||||
cache.sort()
|
||||
|
||||
if len(cache) == 1:
|
||||
cache_frame, cache_file = cache[0]
|
||||
job.addFile(cache_path + cache_file, cache_frame, cache_frame)
|
||||
else:
|
||||
for i in range(len(cache)):
|
||||
current_item = cache[i]
|
||||
next_item = cache[i+1] if i + 1 < len(cache) else None
|
||||
previous_item = cache[i - 1] if i > 0 else None
|
||||
|
||||
current_frame, current_file = current_item
|
||||
|
||||
if not next_item and not previous_item:
|
||||
job.addFile(cache_path + current_file, current_frame, current_frame)
|
||||
elif next_item and not previous_item:
|
||||
next_frame = next_item[0]
|
||||
job.addFile(cache_path + current_file, current_frame, next_frame - 1)
|
||||
elif not next_item and previous_item:
|
||||
previous_frame = previous_item[0]
|
||||
job.addFile(cache_path + current_file, previous_frame + 1, current_frame)
|
||||
else:
|
||||
next_frame = next_item[0]
|
||||
previous_frame = previous_item[0]
|
||||
job.addFile(cache_path + current_file, previous_frame + 1, next_frame - 1)
|
||||
|
||||
print(job.files)
|
||||
|
||||
job.name = job_name
|
||||
|
||||
for slave in scene.network_render.slaves_blacklist:
|
||||
job.blacklist.append(slave.id)
|
||||
|
||||
job.chunks = netsettings.chunks
|
||||
job.priority = netsettings.priority
|
||||
|
||||
# try to send path first
|
||||
conn.request("POST", "job", repr(job.serialize()))
|
||||
response = conn.getresponse()
|
||||
|
||||
job_id = response.getheader("job-id")
|
||||
|
||||
# if not ACCEPTED (but not processed), send files
|
||||
if response.status == http.client.ACCEPTED:
|
||||
for filepath in job.files:
|
||||
f = open(filepath, "rb")
|
||||
conn.request("PUT", "file", f, headers={"job-id": job_id, "job-file": filepath})
|
||||
f.close()
|
||||
response = conn.getresponse()
|
||||
|
||||
# server will reply with NOT_FOUD until all files are found
|
||||
|
||||
return job_id
|
||||
|
||||
def clientRequestResult(conn, scene, job_id):
|
||||
conn.request("GET", "render", headers={"job-id": job_id, "job-frame":str(scene.current_frame)})
|
||||
|
||||
class NetworkRenderEngine(bpy.types.RenderEngine):
|
||||
__idname__ = 'NET_RENDER'
|
||||
__label__ = "Network Render"
|
||||
|
||||
@@ -10,8 +10,10 @@ JOB_PAUSED = 1 # paused by user
|
||||
JOB_QUEUED = 2 # ready to be dispatched
|
||||
|
||||
class MRenderFile:
|
||||
def __init__(self, filepath):
|
||||
def __init__(self, filepath, start, end):
|
||||
self.filepath = filepath
|
||||
self.start = start
|
||||
self.end = end
|
||||
self.found = False
|
||||
|
||||
def test(self):
|
||||
@@ -55,7 +57,7 @@ class MRenderJob(netrender.model.RenderJob):
|
||||
|
||||
# special server properties
|
||||
self.save_path = ""
|
||||
self.files_map = {path: MRenderFile(path) for path in files}
|
||||
self.files_map = {path: MRenderFile(path, start, end) for path, start, end in files}
|
||||
self.status = JOB_WAITING
|
||||
|
||||
def save(self):
|
||||
|
||||
@@ -62,6 +62,9 @@ class RenderJob:
|
||||
self.blacklist = []
|
||||
self.last_dispatched = 0.0
|
||||
|
||||
def addFile(self, file_path, start=-1, end=-1):
|
||||
self.files.append((file_path, start, end))
|
||||
|
||||
def addFrame(self, frame_number):
|
||||
frame = RenderFrame(frame_number)
|
||||
self.frames.append(frame)
|
||||
@@ -98,10 +101,12 @@ class RenderJob:
|
||||
return None
|
||||
|
||||
def serialize(self, frames = None):
|
||||
min_frame = min((f.number for f in frames)) if frames else -1
|
||||
max_frame = max((f.number for f in frames)) if frames else -1
|
||||
return {
|
||||
"id": self.id,
|
||||
"name": self.name,
|
||||
"files": self.files,
|
||||
"files": [f for f in self.files if f[1] == -1 or not frames or (f[1] <= min_frame <= f[2] or f[1] <= max_frame <= f[2])],
|
||||
"frames": [f.serialize() for f in self.frames if not frames or f in frames],
|
||||
"chunks": self.chunks,
|
||||
"priority": self.priority,
|
||||
|
||||
@@ -3,6 +3,7 @@ import sys, os
|
||||
import http, http.client, http.server, urllib
|
||||
|
||||
from netrender.utils import *
|
||||
import netrender.client as client
|
||||
import netrender.model
|
||||
|
||||
class RENDER_OT_netclientsend(bpy.types.Operator):
|
||||
@@ -27,7 +28,7 @@ class RENDER_OT_netclientsend(bpy.types.Operator):
|
||||
|
||||
if conn:
|
||||
# Sending file
|
||||
scene.network_render.job_id = clientSendJob(conn, scene, True)
|
||||
scene.network_render.job_id = client.clientSendJob(conn, scene, True)
|
||||
|
||||
return ('FINISHED',)
|
||||
|
||||
|
||||
@@ -83,7 +83,7 @@ def render_slave(engine, scene):
|
||||
if not os.path.exists(JOB_PREFIX):
|
||||
os.mkdir(JOB_PREFIX)
|
||||
|
||||
job_path = job.files[0]
|
||||
job_path = job.files[0][0] # data in files have format (path, start, end)
|
||||
main_path, main_file = os.path.split(job_path)
|
||||
|
||||
job_full_path = testFile(conn, JOB_PREFIX, job_path)
|
||||
@@ -91,7 +91,8 @@ def render_slave(engine, scene):
|
||||
print("File:", main_file, "and %i other files" % (len(job.files) - 1,))
|
||||
engine.update_stats("", "Render File", main_file, "for job", job.id)
|
||||
|
||||
for file_path in job.files[1:]:
|
||||
for file_path, start, end in job.files[1:]:
|
||||
print("\t", file_path)
|
||||
testFile(conn, JOB_PREFIX, file_path, main_path)
|
||||
|
||||
frame_args = []
|
||||
|
||||
@@ -6,7 +6,7 @@ import subprocess, shutil, time, hashlib
|
||||
|
||||
import netrender.model
|
||||
|
||||
VERSION = b"0.3"
|
||||
VERSION = b"0.5"
|
||||
|
||||
QUEUED = 0
|
||||
DISPATCHED = 1
|
||||
@@ -41,79 +41,6 @@ def clientVerifyVersion(conn):
|
||||
|
||||
return True
|
||||
|
||||
def clientSendJob(conn, scene, anim = False, chunks = 5):
|
||||
netsettings = scene.network_render
|
||||
job = netrender.model.RenderJob()
|
||||
|
||||
if anim:
|
||||
for f in range(scene.start_frame, scene.end_frame + 1):
|
||||
job.addFrame(f)
|
||||
else:
|
||||
job.addFrame(scene.current_frame)
|
||||
|
||||
filename = bpy.data.filename
|
||||
job.files.append(filename)
|
||||
|
||||
job_name = netsettings.job_name
|
||||
path, name = os.path.split(filename)
|
||||
if job_name == "[default]":
|
||||
job_name = name
|
||||
|
||||
for lib in bpy.data.libraries:
|
||||
lib_path = lib.filename
|
||||
|
||||
if lib_path.startswith("//"):
|
||||
lib_path = path + os.sep + lib_path[2:]
|
||||
|
||||
job.files.append(lib_path)
|
||||
|
||||
root, ext = os.path.splitext(name)
|
||||
cache_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that
|
||||
|
||||
print("cache:", cache_path)
|
||||
|
||||
if os.path.exists(cache_path):
|
||||
pattern = re.compile("[a-zA-Z0-9]+_([0-9]+)_[0-9]+\.bphys")
|
||||
for cache_name in sorted(os.listdir(cache_path)):
|
||||
match = pattern.match(cache_name)
|
||||
|
||||
if match:
|
||||
print("Frame:", int(match.groups()[0]), cache_name)
|
||||
|
||||
job.files.append(cache_path + cache_name)
|
||||
|
||||
#print(job.files)
|
||||
|
||||
job.name = job_name
|
||||
|
||||
for slave in scene.network_render.slaves_blacklist:
|
||||
job.blacklist.append(slave.id)
|
||||
|
||||
job.chunks = netsettings.chunks
|
||||
job.priority = netsettings.priority
|
||||
|
||||
# try to send path first
|
||||
conn.request("POST", "job", repr(job.serialize()))
|
||||
response = conn.getresponse()
|
||||
|
||||
job_id = response.getheader("job-id")
|
||||
|
||||
# if not ACCEPTED (but not processed), send files
|
||||
if response.status == http.client.ACCEPTED:
|
||||
for filepath in job.files:
|
||||
f = open(filepath, "rb")
|
||||
conn.request("PUT", "file", f, headers={"job-id": job_id, "job-file": filepath})
|
||||
f.close()
|
||||
response = conn.getresponse()
|
||||
|
||||
# server will reply with NOT_FOUD until all files are found
|
||||
|
||||
return job_id
|
||||
|
||||
def clientRequestResult(conn, scene, job_id):
|
||||
conn.request("GET", "render", headers={"job-id": job_id, "job-frame":str(scene.current_frame)})
|
||||
|
||||
|
||||
def prefixPath(prefix_directory, file_path, prefix_path):
|
||||
if os.path.isabs(file_path):
|
||||
# if an absolute path, make sure path exists, if it doesn't, use relative local path
|
||||
|
||||
Reference in New Issue
Block a user