diff --git a/stammer.py b/stammer.py index abc8d76..d2bc359 100755 --- a/stammer.py +++ b/stammer.py @@ -107,7 +107,7 @@ def tesselate_composite(match_row, basis_coefficients, i): used_coeffs = [(j, coefficient) for j, coefficient in enumerate(basis_coefficients) if coefficient != 0] for k, coeff in used_coeffs: frame_num = min(match_row[k], video_handler.framecount - 1) - tiles.append(Image.open(video_handler.get_frame(frame_num+1))) + tiles.append(Image.open(video_handler.get_frame(frame_num))) hot_bits,_ = fraction_bits.as_array(coeff) bits.append(hot_bits) tesselation = image_tiling.Tiling(height=tiles[0].height,width=tiles[0].width) @@ -133,7 +133,7 @@ def tesselate_composite(match_row, basis_coefficients, i): best_matches = matcher.get_best_matches() if type(matcher) in (BasicAudioMatcher, UniqueAudioMatcher): - for video_frame_i in range(int(len(best_matches) * audio_frame_length / video_frame_length)): + for video_frame_i in range(video_handler.best_match_count): elapsed_time = video_frame_i * video_frame_length audio_frame_i = int(elapsed_time / audio_frame_length) time_past_start_of_audio_frame = elapsed_time - (audio_frame_i * audio_frame_length) @@ -141,11 +141,11 @@ def tesselate_composite(match_row, basis_coefficients, i): elapsed_time_in_carrier = match_num * audio_frame_length + time_past_start_of_audio_frame carrier_video_frame = int(elapsed_time_in_carrier / video_frame_length) carrier_video_frame = min(carrier_video_frame, int(video_handler.framecount - 1)) - video_handler.write_frame(video_frame_i,video_handler.get_frame(carrier_video_frame+1)) + video_handler.write_frame(video_frame_i,video_handler.get_frame(carrier_video_frame)) elif type(matcher) == CombinedFrameAudioMatcher: basis_coefficients = matcher.get_basis_coefficients() - for video_frame_i in range(int(len(best_matches) * audio_frame_length / video_frame_length)): + for video_frame_i in range(video_handler.best_match_count): elapsed_time = video_frame_i * video_frame_length audio_frame_i = int(elapsed_time / audio_frame_length) time_past_start_of_audio_frame = elapsed_time - (audio_frame_i * audio_frame_length) @@ -259,8 +259,6 @@ def process(carrier_path, modulator_path, output_path, custom_frame_length, matc handler.set_min_cached_frames(min_cached_frames) elif video_mode == "disk": handler = VideoHandlerDisk(carrier_path,output_path,TEMP_DIR,matcher,carrier_framecount,video_frame_length,color_mode) - outframes_dir = TEMP_DIR / 'outframes' - outframes_dir.mkdir() build_output_video(handler, matcher) else: diff --git a/video_out.py b/video_out.py index d5bac9f..5524a6b 100644 --- a/video_out.py +++ b/video_out.py @@ -27,8 +27,7 @@ def __init__(self, carrier_path: Path, output_path: Path, temp_dir: Path, matche self.carrier_path = carrier_path self.output_path = output_path self.temp_dir = temp_dir - self.frames_dir = self.temp_dir / 'frames' - self.outframes_dir = self.temp_dir / 'outframes' + self.frames_dir = self.temp_dir / 'frames' self.framecount = int(framecount) self.frame_length = frame_length @@ -36,10 +35,21 @@ def __init__(self, carrier_path: Path, output_path: Path, temp_dir: Path, matche self.color_mode = color_mode self.frames_written = 0 + self.out_proc = self.create_output_proc() def get_frame(self,idx): - assert(idx < self.framecount) - + try: + assert(idx < self.framecount) + except AssertionError: + print("ERROR:") + print(f"STAMMER just tried to use carrier frame {idx}") + print(f"but carrier only has {self.framecount} frames.") + print() + print("This is a critical known issue with how carrier frames are handled.") + print("Please report STAMMER's output at this link:\nhttps://github.com/ArdenButterfield/stammer/issues/62") + print("\nQuitting.") + quit() + def write_frame(self): self.frames_written += 1 self.print_progress() @@ -61,54 +71,65 @@ def progress_strings_separated(self): def print_progress(self): print(self.progress_strings_separated(),end=' \r') -def get_output_cmd(handler: VideoHandler,input): - cmd = [ - 'ffmpeg', - '-v', 'quiet', - '-y', - '-framerate', str(1.0/handler.frame_length), - '!inputs!', - '-c:a', 'aac', - '-c:v', 'libx264', - '-crf', '20', - '-pix_fmt', 'yuv420p', - '-shortest', - str(handler.output_path) - ] - - def replace(value, list): - idx = cmd.index(value) - cmd.pop(idx) - for i, x in enumerate(list): cmd.insert(idx+i,x) - - replace('!inputs!',input) + def get_output_cmd(self,input = None): + if input == None: + input = [ + '-f', 'image2pipe', '-i', 'pipe:', + '-i', str(self.temp_dir / 'out.wav') + ] + cmd = [ + 'ffmpeg', + '-v', 'quiet', + '-y', + '-framerate', str(1.0/self.frame_length), + '!inputs!', + '-c:a', 'aac', + '-c:v', 'libx264', + '-crf', '24', + '-pix_fmt', 'yuv420p', + '-shortest', + str(self.output_path) + ] + + def replace(value, list): + idx = cmd.index(value) + cmd.pop(idx) + for i, x in enumerate(list): cmd.insert(idx+i,x) + + replace('!inputs!',input) + + return cmd - return cmd + def create_output_proc(self): + call = self.get_output_cmd() + + return subprocess.Popen( + call, + stdin=subprocess.PIPE, + stdout=subprocess.DEVNULL + ) + class VideoHandlerDisk(VideoHandler): + def __init__(self, *args): + super().__init__(*args) + def get_frame(self,idx): super().get_frame(idx) - f = open(self.frames_dir / f"frame{idx:06d}.png", 'rb') - return f + + # Video frame filenames start at 1, not 0 + idx += 1 + return open(self.frames_dir / f"frame{idx:06d}.png", 'rb') def write_frame(self,idx,frame: io.BytesIO): super().write_frame() frame.seek(0) - f = open(self.outframes_dir / f"frame{idx:06d}.png", 'wb') - f.write(frame.read()) - f.close() + self.out_proc.stdin.write(frame.read()) def complete(self): super().complete() - call = get_output_cmd( - self, - input=[ - '-stats', - '-i', str(self.outframes_dir / 'frame%06d.png'), - '-i', str(self.temp_dir / 'out.wav') - ] - ) - subprocess.run(call,check=True) + + self.out_proc.communicate() PNG_MAGIC = int("89504e47",16).to_bytes(4,byteorder='big') @@ -123,8 +144,6 @@ def __init__(self, *args): self.frame_length_max = self.frame_length / max(self.frame_length,self.matcher.frame_length) self.frames_backtrack = 0 self.frames_lookahead = int(max(1.0/self.frame_length_max,2)) - - self.out_proc = self.__create_output_proc() def set_min_cached_frames(self,mcf): # if a decayed frame is about to be used, we fetch the frame + this amount of frames around it @@ -217,20 +236,6 @@ def get_progress_strings(self): strs.append(f"{self.framecount-self.cache.decayed_items}/{self.framecount} cached frames") return strs - def __create_output_proc(self): - call = get_output_cmd( - self, - [ - '-f', 'image2pipe', '-i', 'pipe:', - '-i', str(self.temp_dir / 'out.wav') - ] - ) - - return subprocess.Popen( - call, - stdin=subprocess.PIPE, - stdout=subprocess.DEVNULL - ) def complete(self): super().complete()