From ec8cd66ebbfc0b1642fa0e6fd3c272eed3bab2a1 Mon Sep 17 00:00:00 2001 From: RGM Date: Tue, 13 Sep 2022 15:34:56 -0300 Subject: [PATCH 01/34] Fix first line also on preview --- app/NtscApp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 730a198..3063309 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -565,7 +565,7 @@ def render_video(self): self.thread.start() def nt_process(self, frame) -> ndarray: - _ = self.nt.composite_layer(frame, frame, field=2, fieldno=2) + _ = self.nt.composite_layer(frame, frame, field=0, fieldno=1) ntsc_out_image = cv2.convertScaleAbs(_) ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 return ntsc_out_image From 7dffa5d25f6d18ce9956a487f0e3584e38840814 Mon Sep 17 00:00:00 2001 From: RGM Date: Tue, 13 Sep 2022 15:44:54 -0300 Subject: [PATCH 02/34] Commenting audio filtering function (atm) --- app/NtscApp.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 730a198..7306df8 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -240,14 +240,14 @@ def lossless_exporting(self): except AttributeError: pass - def audio_filtering(self): - state = self.ProcessAudioCheckBox.isChecked() - self.ProcessAudio = state - try: - self.videoRenderer.process_audio = state - logger.debug(f"Process audio: {str(state)}") - except AttributeError: - pass + #def audio_filtering(self): + # state = self.ProcessAudioCheckBox.isChecked() + # self.ProcessAudio = state + # try: + # self.videoRenderer.process_audio = state + # logger.debug(f"Process audio: {str(state)}") + # except AttributeError: + # pass @QtCore.pyqtSlot(int) def update_seed(self, seed): From d91dd4d5379cd088febdf8f6255b813045e71816 Mon Sep 17 00:00:00 2001 From: RGM Date: Tue, 13 Sep 2022 15:53:55 -0300 Subject: [PATCH 03/34] Another comment --- app/NtscApp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 139ed50..30880b4 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -559,7 +559,7 @@ def render_video(self): self.setup_renderer() self.toggle_main_effect() self.lossless_exporting() - self.audio_filtering() + #self.audio_filtering() self.progressBar.setValue(1) self.videoRenderer.render_data = render_data self.thread.start() From 96aad7fbfa2f32f5d011ff23ebac86c81777c88c Mon Sep 17 00:00:00 2001 From: RGM Date: Tue, 25 Oct 2022 18:17:50 -0300 Subject: [PATCH 04/34] First add to feature --- app/NtscApp.py | 71 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 23 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 30880b4..167ec1e 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -21,6 +21,7 @@ class NtscApp(QtWidgets.QMainWindow, mainWindow.Ui_MainWindow): def __init__(self): self.current_frame: numpy.ndarray = False + self.next_frame: numpy.ndarray = False self.preview: numpy.ndarray = False self.scale_pixmap = False self.input_video = {} @@ -43,6 +44,8 @@ def __init__(self): self.setupUi(self) # Это нужно для инициализации нашего дизайна self.strings = { "_composite_preemphasis": self.tr("Composite preemphasis"), + "_subcarrier_amplitude": self.tr("Subcarrier amplitude"), + "_subcarrier_amplitude_back": self.tr("Subcarrier turnback amplitude"), "_vhs_out_sharpen": self.tr("VHS out sharpen"), "_vhs_edge_wave": self.tr("Edge wave"), "_output_vhs_tape_speed": self.tr("VHS tape speed"), @@ -74,6 +77,8 @@ def __init__(self): "_black_line_cut": self.tr("Cut 2% black line"), } self.add_slider("_composite_preemphasis", 0, 10, float) + self.add_slider("_subcarrier_amplitude", 0, 16384, pro=True) + self.add_slider("_subcarrier_amplitude_back", 0, 16384, pro=True) self.add_slider("_vhs_out_sharpen", 1, 5) self.add_slider("_vhs_edge_wave", 0, 10) # self.add_slider("_output_vhs_tape_speed", 0, 10) @@ -89,7 +94,7 @@ def __init__(self): self.add_slider("_video_chroma_loss", 0, 800) self.add_slider("_video_noise", 0, 4200) self.add_slider("_video_scanline_phase_shift", 0, 270, pro=True) - self.add_slider("_video_scanline_phase_shift_offset", 0, 3, pro=True) + #self.add_slider("_video_scanline_phase_shift_offset", 0, 3, pro=True) self.add_slider("_head_switching_speed", 0, 100) @@ -107,7 +112,7 @@ def __init__(self): self.add_checkbox("_black_line_cut", (1, 2), pro=False) self.renderHeightBox.valueChanged.connect( - lambda: self.set_current_frame(self.current_frame) + lambda: self.set_current_frame(self.current_frame,self.next_frame) ) self.openFile.clicked.connect(self.open_file) self.renderVideoButton.clicked.connect(self.render_video) @@ -397,26 +402,39 @@ def get_current_video_frame(self): return None frame_no = self.videoTrackSlider.value() self.input_video["cap"].set(1, frame_no) - ret, frame = self.input_video["cap"].read() - return frame + ret, frame1 = self.input_video["cap"].read() - def set_current_frame(self, frame): - current_frame_valid = isinstance(frame, ndarray) + if(frame_no == self.input_video["frames_count"]): + frame2 = frame1 + else: + self.input_video["cap"].set(1, frame_no+1) + frame2 = self.input_video["cap"].read() + self.input_video["cap"].set(1, frame_no) + + return frame1, frame2 + + def set_current_frame(self, frame1, frame2): + current_frame_valid = isinstance(frame1, ndarray) + preview_h = self.renderHeightBox.value() if not current_frame_valid or preview_h < 10: self.update_status("Trying to set invalid current frame") return None - self.current_frame = frame + self.current_frame = frame1 + self.next_frame = frame2 + try: crop_wh = resize_to_height(self.orig_wh, preview_h) - self.preview = cv2.resize(frame, crop_wh) + self.preview1 = cv2.resize(frame1, crop_wh) + self.preview2 = cv2.resize(frame2, crop_wh) except ZeroDivisionError: self.update_status("ZeroDivisionError :DDDDDD") pass if self.preview.shape[1] % 4 != 0: - self.preview = trim_to_4width(self.preview) + self.preview1 = trim_to_4width(self.preview1) + self.preview2 = trim_to_4width(self.preview2) self.nt_update_preview() @@ -466,7 +484,7 @@ def set_image_mode(self): self.livePreviewCheckbox.hide() self.renderVideoButton.hide() - def set_render_heigth(self, height): + def set_render_height(self, height): if height > 600: self.renderHeightBox.setValue(600) self.update_status( @@ -478,9 +496,9 @@ def open_image(self, img: numpy.ndarray): height, width, channels = img.shape self.orig_wh = width, height - self.set_render_heigth(height) + self.set_render_height(height) - self.set_current_frame(img) + self.set_current_frame(img,img) def nt_get_config(self): values = {} @@ -516,12 +534,12 @@ def open_video(self, path: Path): } logger.debug(f"selfinput: {self.input_video}") self.orig_wh = (int(self.input_video["width"]), int(self.input_video["height"])) - self.set_render_heigth(self.input_video["height"]) - self.set_current_frame(self.get_current_video_frame()) + self.set_render_height(self.input_video["height"]) + self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) self.videoTrackSlider.setMinimum(1) self.videoTrackSlider.setMaximum(self.input_video["frames_count"]) self.videoTrackSlider.valueChanged.connect( - lambda: self.set_current_frame(self.get_current_video_frame()) + lambda: self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) ) self.progressBar.setMaximum(self.input_video["frames_count"]) @@ -534,7 +552,7 @@ def render_image(self): image = cv2.resize(self.current_frame, crop_wh) if image.shape[1] % 4 != 0: image = trim_to_4width(image) - image = self.nt_process(image) + image = self.nt_process(image,image) is_success, im_buf_arr = cv2.imencode(".png", image) if not is_success: self.update_status("Error while saving (!is_success)") @@ -564,10 +582,17 @@ def render_video(self): self.videoRenderer.render_data = render_data self.thread.start() - def nt_process(self, frame) -> ndarray: - _ = self.nt.composite_layer(frame, frame, field=0, fieldno=1) - ntsc_out_image = cv2.convertScaleAbs(_) - ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 + def nt_process(self, frame1: ndarray, frame2: ndarray) -> ndarray: + moire_pos = 0 + + f1 = self.nt.composite_layer(frame1, frame1, field=0, fieldno=2, moirepos=moire_pos) + f2 = self.nt.composite_layer(frame2, frame2, field=2, fieldno=2, moirepos=moire_pos) + f1_out = cv2.convertScaleAbs(f1) + f2_out = cv2.convertScaleAbs(f2) + + ntsc_out_image = f1_out + #ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 + ntsc_out_image[1::2,:] = f2_out[::2,:] return ntsc_out_image def nt_update_preview(self): @@ -577,14 +602,14 @@ def nt_update_preview(self): return None if not self.mainEffect: - self.render_preview(self.preview) + self.render_preview(self.preview1) return None - ntsc_out_image = self.nt_process(self.preview) + ntsc_out_image = self.nt_process(self.preview1,self.preview2) if self.compareMode: ntsc_out_image = numpy.concatenate( - (self.preview[:self.preview.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) + (self.preview1[:self.preview.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) ) self.render_preview(ntsc_out_image) From 1b6771623f40318898982249aba18e6db9f9b53a Mon Sep 17 00:00:00 2001 From: RGM Date: Tue, 25 Oct 2022 18:31:36 -0300 Subject: [PATCH 05/34] Add some thing --- app/NtscApp.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 167ec1e..1249654 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -432,8 +432,9 @@ def set_current_frame(self, frame1, frame2): self.update_status("ZeroDivisionError :DDDDDD") pass - if self.preview.shape[1] % 4 != 0: + if self.preview1.shape[1] % 4 != 0: self.preview1 = trim_to_4width(self.preview1) + if self.preview2.shape[1] % 4 != 0: self.preview2 = trim_to_4width(self.preview2) self.nt_update_preview() From a3f1e823c25f883ad3e17768e573dce738b72255 Mon Sep 17 00:00:00 2001 From: RGM Date: Fri, 28 Oct 2022 19:45:21 -0300 Subject: [PATCH 06/34] Composite layer fix --- app/ntsc.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/app/ntsc.py b/app/ntsc.py index 78901da..dd7d6c0 100755 --- a/app/ntsc.py +++ b/app/ntsc.py @@ -644,9 +644,11 @@ def emulate_vhs(self, yiq: numpy.ndarray, field: int, fieldno: int): self.chroma_into_luma(yiq, field, fieldno, self._subcarrier_amplitude) self.chroma_from_luma(yiq, field, fieldno, self._subcarrier_amplitude) - def composite_layer(self, dst: numpy.ndarray, src: numpy.ndarray, field: int, fieldno: int): + def composite_layer(self, dst: numpy.ndarray, src: numpy.ndarray, field: int, fieldno: int, moirepos: int): assert dst.shape == src.shape, "dst and src images must be of same shape" + self._video_scanline_phase_shift_offset = moirepos + if self._black_line_cut: cut_black_line_border(src) From 00e44fc9d985278fee19a2360d9d14a77a80f9d6 Mon Sep 17 00:00:00 2001 From: RGM Date: Fri, 28 Oct 2022 19:47:09 -0300 Subject: [PATCH 07/34] New fix --- app/NtscApp.py | 6 ++---- app/ntsc.py | 4 +--- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 1249654..c19562c 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -584,10 +584,8 @@ def render_video(self): self.thread.start() def nt_process(self, frame1: ndarray, frame2: ndarray) -> ndarray: - moire_pos = 0 - - f1 = self.nt.composite_layer(frame1, frame1, field=0, fieldno=2, moirepos=moire_pos) - f2 = self.nt.composite_layer(frame2, frame2, field=2, fieldno=2, moirepos=moire_pos) + f1 = self.nt.composite_layer(frame1, frame1, field=0, fieldno=2) + f2 = self.nt.composite_layer(frame2, frame2, field=2, fieldno=2) f1_out = cv2.convertScaleAbs(f1) f2_out = cv2.convertScaleAbs(f2) diff --git a/app/ntsc.py b/app/ntsc.py index dd7d6c0..78901da 100755 --- a/app/ntsc.py +++ b/app/ntsc.py @@ -644,11 +644,9 @@ def emulate_vhs(self, yiq: numpy.ndarray, field: int, fieldno: int): self.chroma_into_luma(yiq, field, fieldno, self._subcarrier_amplitude) self.chroma_from_luma(yiq, field, fieldno, self._subcarrier_amplitude) - def composite_layer(self, dst: numpy.ndarray, src: numpy.ndarray, field: int, fieldno: int, moirepos: int): + def composite_layer(self, dst: numpy.ndarray, src: numpy.ndarray, field: int, fieldno: int): assert dst.shape == src.shape, "dst and src images must be of same shape" - self._video_scanline_phase_shift_offset = moirepos - if self._black_line_cut: cut_black_line_border(src) From a9d403a69dd95335cb11aef4a69fa441da373fcb Mon Sep 17 00:00:00 2001 From: RGM Date: Fri, 28 Oct 2022 19:50:26 -0300 Subject: [PATCH 08/34] Another fix --- app/NtscApp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index c19562c..834f0fd 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -94,7 +94,7 @@ def __init__(self): self.add_slider("_video_chroma_loss", 0, 800) self.add_slider("_video_noise", 0, 4200) self.add_slider("_video_scanline_phase_shift", 0, 270, pro=True) - #self.add_slider("_video_scanline_phase_shift_offset", 0, 3, pro=True) + self.add_slider("_video_scanline_phase_shift_offset", 0, 3, pro=True) self.add_slider("_head_switching_speed", 0, 100) From e32a5609b55e57c1e517d2d7532ce0e367eaa669 Mon Sep 17 00:00:00 2001 From: RGM Date: Sat, 10 Dec 2022 13:54:25 -0300 Subject: [PATCH 09/34] Finished! - Version 1 --- app/NtscApp.py | 56 ++++++++++++++++++++++++++++++------------- app/Renderer.py | 63 +++++++++++++++++++++++++++++++++++++------------ 2 files changed, 87 insertions(+), 32 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 834f0fd..a91d9d8 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -396,26 +396,45 @@ def add_slider(self, param_name, min_val, max_val, slider_value_type: Union[int, self.nt_controls[param_name] = slider self.slidersLayout.addWidget(slider_frame) + def set_frames(self, cap, frame_no, interlaced: bool): + cap.set(1, frame_no) + ret1, frame1 = cap.read() + + if(interlaced): + if(frame_no == self.input_video["frames_count"]): + frame2 = frame1 + else: + cap.set(1, frame_no+1) + ret2, frame2 = cap.read() + cap.set(1, frame_no) + else: + frame2 = frame1 + + return frame1, frame2 + def get_current_video_frame(self): preview_h = self.renderHeightBox.value() if not self.input_video or preview_h < 10: return None frame_no = self.videoTrackSlider.value() - self.input_video["cap"].set(1, frame_no) - ret, frame1 = self.input_video["cap"].read() - - if(frame_no == self.input_video["frames_count"]): - frame2 = frame1 - else: - self.input_video["cap"].set(1, frame_no+1) - frame2 = self.input_video["cap"].read() - self.input_video["cap"].set(1, frame_no) + #self.input_video["cap"].set(1, frame_no) + #ret1, frame1 = self.input_video["cap"].read() + + #if(frame_no == self.input_video["frames_count"]): + # frame2 = frame1 + #else: + # self.input_video["cap"].set(1, frame_no+1) + # ret2, frame2 = self.input_video["cap"].read() + # self.input_video["cap"].set(1, frame_no) - return frame1, frame2 + #return frame1, frame2 + + return self.set_frames(cap=self.input_video["cap"],frame_no=frame_no,interlaced=True) def set_current_frame(self, frame1, frame2): - current_frame_valid = isinstance(frame1, ndarray) - + #current_frame_valid = isinstance(frame1, ndarray) + current_frame_valid = True + preview_h = self.renderHeightBox.value() if not current_frame_valid or preview_h < 10: self.update_status("Trying to set invalid current frame") @@ -539,10 +558,12 @@ def open_video(self, path: Path): self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) self.videoTrackSlider.setMinimum(1) self.videoTrackSlider.setMaximum(self.input_video["frames_count"]) + self.videoTrackSlider.setSingleStep(2) + print(str(self.videoTrackSlider.singleStep())) self.videoTrackSlider.valueChanged.connect( lambda: self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) ) - self.progressBar.setMaximum(self.input_video["frames_count"]) + self.progressBar.setMaximum(round(self.input_video["frames_count"] / 2)) def render_image(self): target_file = pick_save_file(self, title='Save frame as', suffix='.png') @@ -585,13 +606,14 @@ def render_video(self): def nt_process(self, frame1: ndarray, frame2: ndarray) -> ndarray: f1 = self.nt.composite_layer(frame1, frame1, field=0, fieldno=2) - f2 = self.nt.composite_layer(frame2, frame2, field=2, fieldno=2) + f2_in = cv2.warpAffine(frame2, numpy.float32([[1, 0, 0], [0, 1, 1]]), (frame2.shape[1], frame2.shape[0]+2)) + f2 = self.nt.composite_layer(f2_in, f2_in, field=2, fieldno=2) f1_out = cv2.convertScaleAbs(f1) f2_out = cv2.convertScaleAbs(f2) ntsc_out_image = f1_out - #ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 - ntsc_out_image[1::2,:] = f2_out[::2,:] + #ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + f2_out[2::2] / 2 + ntsc_out_image[1::2] = f2_out[2::2] return ntsc_out_image def nt_update_preview(self): @@ -608,7 +630,7 @@ def nt_update_preview(self): if self.compareMode: ntsc_out_image = numpy.concatenate( - (self.preview1[:self.preview.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) + (self.preview1[:self.preview1.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) ) self.render_preview(ntsc_out_image) diff --git a/app/Renderer.py b/app/Renderer.py index 4b9ab16..d253554 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -9,6 +9,7 @@ from app.logs import logger from app.funcs import resize_to_height, trim_to_4width, expand_to_4width +import numpy class Renderer(QtCore.QObject): running = False @@ -64,7 +65,7 @@ def run(self): open_result = video.open( filename=str(tmp_output.resolve()), fourcc=fourcc_choice, - fps=self.render_data["input_video"]["orig_fps"], + fps=(self.render_data["input_video"]["orig_fps"] / 2), frameSize=container_wh, ) logger.debug(f'Output video open result: {open_result}') @@ -75,37 +76,64 @@ def run(self): #logger.debug(f'Process audio: {self.process_audio}') frame_index = 0 + og_frame_index = 0 self.renderStateChanged.emit(True) - cap = FileVideoStream( - path=str(self.render_data["input_video"]["path"]), - queue_size=322 - ).start() + #cap = FileVideoStream( + # path=str(self.render_data["input_video"]["path"]), + # queue_size=322 + #) + cap = self.render_data["input_video"]["cap"] - while cap.more(): + print(int(self.render_data["input_video"]["frames_count"] / 2)) + + while (frame_index <= self.render_data["input_video"]["frames_count"]): if self.pause: self.sendStatus.emit(f"{status_string} [P]") time.sleep(0.3) continue - frame_index += 1 - frame = cap.read() - if frame is None or not self.running: + #frame_index += 1 + #frame = cap.read() + + cap.set(1, frame_index) + ret1, frame1 = cap.read() + + if(frame_index == self.render_data["input_video"]["frames_count"]): + frame2 = frame1 + else: + cap.set(1, frame_index+1) + ret2, frame2 = cap.read() + cap.set(1, frame_index) + + if frame1 is None or not self.running: self.sendStatus.emit(f'Render stopped. ret(debug):') break self.increment_progress.emit() if orig_wh != render_wh: - frame = cv2.resize(frame, render_wh) + frame1 = cv2.resize(frame1, render_wh) + frame2 = cv2.resize(frame2, render_wh) # crash workaround if render_wh[0] % 4 != 0: - frame = expand_to_4width(frame) + frame1 = expand_to_4width(frame1) + frame2 = expand_to_4width(frame2) if self.mainEffect: - frame = self.render_data["nt"].composite_layer(frame, frame, field=0, fieldno=1) - frame = cv2.convertScaleAbs(frame) - frame[1:-1:2] = frame[0:-2:2] / 2 + frame[2::2] / 2 + #frame = self.render_data["nt"].composite_layer(frame, frame, field=0, fieldno=1) + #frame = cv2.convertScaleAbs(frame) + #frame[1:-1:2] = frame[0:-2:2] / 2 + frame[2::2] / 2 + + f1 = self.render_data["nt"].composite_layer(frame1, frame1, field=0, fieldno=2) + f2_in = cv2.warpAffine(frame2, numpy.float32([[1, 0, 0], [0, 1, 1]]), (frame2.shape[1], frame2.shape[0]+2)) + f2 = self.render_data["nt"].composite_layer(f2_in, f2_in, field=2, fieldno=2) + f1_out = cv2.convertScaleAbs(f1) + f2_out = cv2.convertScaleAbs(f2) + + frame = f1_out + #ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 + frame[1::2,:] = f2_out[2::2,:] frame = frame[:, 0:render_wh[0]] @@ -116,10 +144,15 @@ def run(self): if upscale_2x: frame = cv2.resize(frame, dsize=container_wh, interpolation=cv2.INTER_NEAREST) - status_string = f'[CV2] Render progress: {frame_index}/{self.render_data["input_video"]["frames_count"]}' + status_string = f'[CV2] Render progress: {og_frame_index}/{round(self.render_data["input_video"]["frames_count"] / 2)}' self.sendStatus.emit(status_string) video.write(frame) + frame_index += 2 + og_frame_index += 1 + if((frame_index > self.render_data["input_video"]["frames_count"]) or (frame_index+2 > self.render_data["input_video"]["frames_count"])): + frame_index = self.render_data["input_video"]["frames_count"] + video.release() orig_path = str(self.render_data["input_video"]["path"].resolve()) From abadb6c5872044d5cb5a6196e540d1973eb4de7b Mon Sep 17 00:00:00 2001 From: RGM Date: Mon, 12 Dec 2022 17:47:49 -0300 Subject: [PATCH 10/34] Interlacing - First Fix --- app/NtscApp.py | 13 ++++++++++--- app/Renderer.py | 14 +++++--------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index a91d9d8..b1e75cf 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -401,7 +401,7 @@ def set_frames(self, cap, frame_no, interlaced: bool): ret1, frame1 = cap.read() if(interlaced): - if(frame_no == self.input_video["frames_count"]): + if((frame_no == self.input_video["frames_count"]-2) or (frame_no+1 == self.input_video["frames_count"]-2)): frame2 = frame1 else: cap.set(1, frame_no+1) @@ -417,6 +417,7 @@ def get_current_video_frame(self): if not self.input_video or preview_h < 10: return None frame_no = self.videoTrackSlider.value() + #self.input_video["cap"].set(1, frame_no) #ret1, frame1 = self.input_video["cap"].read() @@ -552,18 +553,24 @@ def open_video(self, path: Path): "path": path, "suffix": path.suffix.lower(), } + + if(self.input_video["frames_count"] % 2 == 1): + new_frame_count = self.input_video["frames_count"]-1 + else: + new_frame_count = self.input_video["frames_count"] + logger.debug(f"selfinput: {self.input_video}") self.orig_wh = (int(self.input_video["width"]), int(self.input_video["height"])) self.set_render_height(self.input_video["height"]) self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) self.videoTrackSlider.setMinimum(1) - self.videoTrackSlider.setMaximum(self.input_video["frames_count"]) + self.videoTrackSlider.setMaximum(new_frame_count) self.videoTrackSlider.setSingleStep(2) print(str(self.videoTrackSlider.singleStep())) self.videoTrackSlider.valueChanged.connect( lambda: self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) ) - self.progressBar.setMaximum(round(self.input_video["frames_count"] / 2)) + self.progressBar.setMaximum(self.input_video["frames_count"] // 2) def render_image(self): target_file = pick_save_file(self, title='Save frame as', suffix='.png') diff --git a/app/Renderer.py b/app/Renderer.py index d253554..86e3d90 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -84,22 +84,17 @@ def run(self): #) cap = self.render_data["input_video"]["cap"] - print(int(self.render_data["input_video"]["frames_count"] / 2)) - - while (frame_index <= self.render_data["input_video"]["frames_count"]): + while (frame_index <= self.render_data["input_video"]["frames_count"]-2): if self.pause: self.sendStatus.emit(f"{status_string} [P]") time.sleep(0.3) continue - #frame_index += 1 - #frame = cap.read() - cap.set(1, frame_index) ret1, frame1 = cap.read() - if(frame_index == self.render_data["input_video"]["frames_count"]): + if((frame_index == self.render_data["input_video"]["frames_count"]-2) or (frame_index+1 == self.render_data["input_video"]["frames_count"]-2)): frame2 = frame1 else: cap.set(1, frame_index+1) @@ -144,13 +139,14 @@ def run(self): if upscale_2x: frame = cv2.resize(frame, dsize=container_wh, interpolation=cv2.INTER_NEAREST) - status_string = f'[CV2] Render progress: {og_frame_index}/{round(self.render_data["input_video"]["frames_count"] / 2)}' + status_string = f'[CV2] Render progress: {og_frame_index}/{self.render_data["input_video"]["frames_count"] // 2}' self.sendStatus.emit(status_string) video.write(frame) frame_index += 2 og_frame_index += 1 - if((frame_index > self.render_data["input_video"]["frames_count"]) or (frame_index+2 > self.render_data["input_video"]["frames_count"])): + + if((frame_index > self.render_data["input_video"]["frames_count"]) or (frame_index+1 > self.render_data["input_video"]["frames_count"])): frame_index = self.render_data["input_video"]["frames_count"] video.release() From d6372298143126b0e630a69bc359d9ea67739a9c Mon Sep 17 00:00:00 2001 From: RGM Date: Mon, 12 Dec 2022 17:50:12 -0300 Subject: [PATCH 11/34] Revert subcarrier thing --- app/NtscApp.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index b1e75cf..54f0bab 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -44,8 +44,6 @@ def __init__(self): self.setupUi(self) # Это нужно для инициализации нашего дизайна self.strings = { "_composite_preemphasis": self.tr("Composite preemphasis"), - "_subcarrier_amplitude": self.tr("Subcarrier amplitude"), - "_subcarrier_amplitude_back": self.tr("Subcarrier turnback amplitude"), "_vhs_out_sharpen": self.tr("VHS out sharpen"), "_vhs_edge_wave": self.tr("Edge wave"), "_output_vhs_tape_speed": self.tr("VHS tape speed"), @@ -77,8 +75,6 @@ def __init__(self): "_black_line_cut": self.tr("Cut 2% black line"), } self.add_slider("_composite_preemphasis", 0, 10, float) - self.add_slider("_subcarrier_amplitude", 0, 16384, pro=True) - self.add_slider("_subcarrier_amplitude_back", 0, 16384, pro=True) self.add_slider("_vhs_out_sharpen", 1, 5) self.add_slider("_vhs_edge_wave", 0, 10) # self.add_slider("_output_vhs_tape_speed", 0, 10) From ebbbf384ff8024cacb921ac8383ba2a8ff67aa0d Mon Sep 17 00:00:00 2001 From: RGM Date: Mon, 12 Dec 2022 18:05:51 -0300 Subject: [PATCH 12/34] Fix interlacing for non-double frame counts --- app/Renderer.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/app/Renderer.py b/app/Renderer.py index 86e3d90..cea72fe 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -83,8 +83,13 @@ def run(self): # queue_size=322 #) cap = self.render_data["input_video"]["cap"] + + if(self.input_video["frames_count"] % 2 == 1): + new_frame_count = self.render_data["input_video"]["frames_count"]-3 + else: + new_frame_count = self.render_data["input_video"]["frames_count"]-2 - while (frame_index <= self.render_data["input_video"]["frames_count"]-2): + while (frame_index <= new_frame_count): if self.pause: self.sendStatus.emit(f"{status_string} [P]") From 4f479c18ff5e8cd7577a27d9c1696397ba5d2d5d Mon Sep 17 00:00:00 2001 From: RGM Date: Mon, 12 Dec 2022 18:06:28 -0300 Subject: [PATCH 13/34] Fix part 2 - interlacing --- app/NtscApp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 54f0bab..d3f4d2f 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -551,7 +551,7 @@ def open_video(self, path: Path): } if(self.input_video["frames_count"] % 2 == 1): - new_frame_count = self.input_video["frames_count"]-1 + new_frame_count = self.input_video["frames_count"]-3 else: new_frame_count = self.input_video["frames_count"] From 6c95dd26b8c5b2afafb8f7cff24f64684a1e96e3 Mon Sep 17 00:00:00 2001 From: RGM Date: Mon, 12 Dec 2022 18:10:28 -0300 Subject: [PATCH 14/34] Fix part 3 - interlacing --- app/Renderer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/Renderer.py b/app/Renderer.py index cea72fe..db4f7ae 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -84,7 +84,7 @@ def run(self): #) cap = self.render_data["input_video"]["cap"] - if(self.input_video["frames_count"] % 2 == 1): + if(self.render_data["input_video"]["frames_count"] % 2 == 1): new_frame_count = self.render_data["input_video"]["frames_count"]-3 else: new_frame_count = self.render_data["input_video"]["frames_count"]-2 From 3ee03480fde8c89b9edf5b7a9f582e6bdf7aae01 Mon Sep 17 00:00:00 2001 From: Pavel Khorikov Date: Mon, 2 Jan 2023 13:52:45 +0700 Subject: [PATCH 15/34] Refactor to be more absctract --- app/InterlacedRenderer.py | 10 +++ app/NtscApp.py | 83 ++++++++++++------- app/Renderer.py | 168 ++++++++++++++++++++++++++------------ 3 files changed, 182 insertions(+), 79 deletions(-) create mode 100755 app/InterlacedRenderer.py diff --git a/app/InterlacedRenderer.py b/app/InterlacedRenderer.py new file mode 100755 index 0000000..497a0ef --- /dev/null +++ b/app/InterlacedRenderer.py @@ -0,0 +1,10 @@ +from app.Renderer import DefaultRenderer +from app.ntsc import Ntsc + + +class InterlacedRenderer(DefaultRenderer): + @staticmethod + def apply_main_effect(nt: Ntsc, frame1, frame2=None): + raise NotImplementedError() + # TODO: RGM + return frame diff --git a/app/NtscApp.py b/app/NtscApp.py index 730a198..cf40bcd 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -9,9 +9,10 @@ from PyQt5.QtWidgets import QSlider, QHBoxLayout, QLabel, QCheckBox, QInputDialog, QPushButton from numpy import ndarray +from app.InterlacedRenderer import InterlacedRenderer from app.config_dialog import ConfigDialog from app.logs import logger -from app.Renderer import Renderer +from app.Renderer import DefaultRenderer from app.funcs import resize_to_height, pick_save_file, trim_to_4width from app.ntsc import random_ntsc, Ntsc from ui import mainWindow @@ -20,8 +21,9 @@ class NtscApp(QtWidgets.QMainWindow, mainWindow.Ui_MainWindow): def __init__(self): + self.videoRenderer: DefaultRenderer = None self.current_frame: numpy.ndarray = False - self.preview: numpy.ndarray = False + self.next_frame: numpy.ndarray = False self.scale_pixmap = False self.input_video = {} self.templates = {} @@ -107,7 +109,7 @@ def __init__(self): self.add_checkbox("_black_line_cut", (1, 2), pro=False) self.renderHeightBox.valueChanged.connect( - lambda: self.set_current_frame(self.current_frame) + lambda: self.set_current_frames(*self.get_current_video_frames()) ) self.openFile.clicked.connect(self.open_file) self.renderVideoButton.clicked.connect(self.render_video) @@ -161,6 +163,13 @@ def add_builtin_templates(self): button.clicked.connect(set_values) self.templatesLayout.addWidget(button) + def get_render_class(self): + is_interlaced = False # Get state from UI choice + if is_interlaced: + return InterlacedRenderer + else: + return DefaultRenderer + def setup_renderer(self): try: self.update_status("Terminating prev renderer") @@ -174,7 +183,8 @@ def setup_renderer(self): # создадим поток self.thread = QtCore.QThread() # создадим объект для выполнения кода в другом потоке - self.videoRenderer = Renderer() + RendererClass = self.get_render_class() + self.videoRenderer = RendererClass() # перенесём объект в другой поток self.videoRenderer.moveToThread(self.thread) # после чего подключим все сигналы и слоты @@ -241,10 +251,11 @@ def lossless_exporting(self): pass def audio_filtering(self): - state = self.ProcessAudioCheckBox.isChecked() + # state = self.ProcessAudioCheckBox.isChecked() + state = False self.ProcessAudio = state try: - self.videoRenderer.process_audio = state + self.videoRenderer.audio_process = state logger.debug(f"Process audio: {str(state)}") except AttributeError: pass @@ -391,32 +402,46 @@ def add_slider(self, param_name, min_val, max_val, slider_value_type: Union[int, self.nt_controls[param_name] = slider self.slidersLayout.addWidget(slider_frame) - def get_current_video_frame(self): + def get_current_video_frames(self): preview_h = self.renderHeightBox.value() if not self.input_video or preview_h < 10: - return None + return None, None frame_no = self.videoTrackSlider.value() self.input_video["cap"].set(1, frame_no) - ret, frame = self.input_video["cap"].read() - return frame + ret, frame1 = self.input_video["cap"].read() - def set_current_frame(self, frame): - current_frame_valid = isinstance(frame, ndarray) - preview_h = self.renderHeightBox.value() - if not current_frame_valid or preview_h < 10: - self.update_status("Trying to set invalid current frame") - return None + # Read next frame + ret, frame2 = self.input_video["cap"].read() + if not ret: + frame2 = frame1 + + return frame1, frame2 - self.current_frame = frame + def resize_to_preview_frame(self, frame): + preview_h = self.renderHeightBox.value() try: crop_wh = resize_to_height(self.orig_wh, preview_h) - self.preview = cv2.resize(frame, crop_wh) + frame = cv2.resize(frame, crop_wh) except ZeroDivisionError: self.update_status("ZeroDivisionError :DDDDDD") - pass - if self.preview.shape[1] % 4 != 0: - self.preview = trim_to_4width(self.preview) + if frame.shape[1] % 4 != 0: + frame = trim_to_4width(frame) + + return frame + + def set_current_frames(self, frame1: ndarray, frame2=None): + if frame2 is None: + frame2 = frame1.copy() + + current_frame_valid = isinstance(frame1, ndarray) + preview_h = self.renderHeightBox.value() + if not current_frame_valid or preview_h < 10: + self.update_status("Trying to set invalid current frame") + return None + + self.current_frame = self.resize_to_preview_frame(frame1) + self.next_frame = self.resize_to_preview_frame(frame2) self.nt_update_preview() @@ -475,12 +500,13 @@ def set_render_heigth(self, height): self.renderHeightBox.setValue(height // 120 * 120) def open_image(self, img: numpy.ndarray): + self.setup_renderer() height, width, channels = img.shape self.orig_wh = width, height self.set_render_heigth(height) - self.set_current_frame(img) + self.set_current_frames(img) def nt_get_config(self): values = {} @@ -502,6 +528,7 @@ def nt_set_config(self, values: List[Dict[str, Union[int, float]]]): self.sync_nt_to_sliders() def open_video(self, path: Path): + self.setup_renderer() logger.debug(f"file: {path}") cap = cv2.VideoCapture(str(path.resolve())) logger.debug(f"cap: {cap} isOpened: {cap.isOpened()}") @@ -517,11 +544,11 @@ def open_video(self, path: Path): logger.debug(f"selfinput: {self.input_video}") self.orig_wh = (int(self.input_video["width"]), int(self.input_video["height"])) self.set_render_heigth(self.input_video["height"]) - self.set_current_frame(self.get_current_video_frame()) + self.set_current_frames(*self.get_current_video_frames()) self.videoTrackSlider.setMinimum(1) self.videoTrackSlider.setMaximum(self.input_video["frames_count"]) self.videoTrackSlider.valueChanged.connect( - lambda: self.set_current_frame(self.get_current_video_frame()) + lambda: self.set_current_frames(*self.get_current_video_frames()) ) self.progressBar.setMaximum(self.input_video["frames_count"]) @@ -534,7 +561,7 @@ def render_image(self): image = cv2.resize(self.current_frame, crop_wh) if image.shape[1] % 4 != 0: image = trim_to_4width(image) - image = self.nt_process(image) + image = self.videoRenderer.apply_main_effect(self.nt, frame1=image) is_success, im_buf_arr = cv2.imencode(".png", image) if not is_success: self.update_status("Error while saving (!is_success)") @@ -577,14 +604,14 @@ def nt_update_preview(self): return None if not self.mainEffect: - self.render_preview(self.preview) + self.render_preview(self.current_frame) return None - ntsc_out_image = self.nt_process(self.preview) + ntsc_out_image = self.videoRenderer.apply_main_effect(self.nt, self.current_frame, self.next_frame) if self.compareMode: ntsc_out_image = numpy.concatenate( - (self.preview[:self.preview.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) + (self.current_frame[:self.current_frame.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) ) self.render_preview(ntsc_out_image) diff --git a/app/Renderer.py b/app/Renderer.py index 4b9ab16..829d33b 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -1,5 +1,7 @@ +import abc import time import os +from typing import Tuple, TypedDict import cv2 from PyQt5 import QtCore @@ -8,9 +10,31 @@ from app.logs import logger from app.funcs import resize_to_height, trim_to_4width, expand_to_4width +from app.ntsc import Ntsc -class Renderer(QtCore.QObject): +class Config(TypedDict): + orig_wh: Tuple[int, int] + render_wh: Tuple[int, int] + container_wh: Tuple[int, int] + upscale_2x: bool + lossless: bool + + audio_process: bool + audio_sat_beforevol: float + audio_lowpass: int + audio_noise_volume: float + + +class AbstractRenderer(QtCore.QObject): + + @staticmethod + @abc.abstractmethod + def apply_main_effect(nt: Ntsc, frame1, frame2=None): + raise NotImplementedError() + + +class DefaultRenderer(AbstractRenderer): running = False mainEffect = True pause = False @@ -21,38 +45,99 @@ class Renderer(QtCore.QObject): sendStatus = QtCore.pyqtSignal(str) increment_progress = QtCore.pyqtSignal() render_data = {} + current_frame_index = 0 + cap = None + + @staticmethod + def apply_main_effect(nt: Ntsc, frame1, frame2=None): + if frame2 is None: + frame2 = frame1 + + frame = nt.composite_layer(frame1, frame2, field=0, fieldno=1) + frame = cv2.convertScaleAbs(frame) + frame[1:-1:2] = frame[0:-2:2] / 2 + frame[2::2] / 2 + return frame + + def produce_frame(self): + frame = self.cap.read() + if frame is None or not self.running: + self.sendStatus.emit(f'Render stopped. ret(debug):') + return False + + orig_wh = self.config.get("orig_wh") + render_wh = self.config.get("render_wh") + upscale_2x = self.config.get("upscale_2x") + + self.increment_progress.emit() + if orig_wh != render_wh: + frame = cv2.resize(frame, render_wh) + + # crash workaround + if render_wh[0] % 4 != 0: + frame = expand_to_4width(frame) + + if self.mainEffect: + frame = self.apply_main_effect( + nt=self.render_data.get("nt"), + frame1=frame, + ) - lossless = False - - process_audio = False - audio_sat_beforevol = 4.5 - audio_lowpass = 10896 - audio_noise_volume = 0.03 + frame = frame[:, 0:render_wh[0]] - def run(self): - self.running = True + if self.current_frame_index % 10 == 0 or self.liveView: + self.frameMoved.emit(self.current_frame_index) + self.newFrame.emit(frame) - suffix = '.mkv' - - tmp_output = self.render_data['target_file'].parent / f'tmp_{self.render_data["target_file"].stem}{suffix}' + if upscale_2x: + container_wh = self.config.get("container_wh") + frame = cv2.resize(frame, dsize=container_wh, interpolation=cv2.INTER_NEAREST) - upscale_2x = self.render_data["upscale_2x"] + return frame - orig_wh = (self.render_data["input_video"]["width"], self.render_data["input_video"]["height"]) + def set_up(self): + orig_wh = ( + self.render_data["input_video"]["width"], + self.render_data["input_video"]["height"] + ) render_wh = resize_to_height(orig_wh, self.render_data["input_heigth"]) container_wh = render_wh + + upscale_2x = self.render_data["upscale_2x"] if upscale_2x: container_wh = ( render_wh[0] * 2, render_wh[1] * 2, ) + self.config = Config( + upscale_2x=upscale_2x, + container_wh=container_wh, + render_wh=render_wh, + orig_wh=orig_wh, + + lossless=False, + + audio_process=False, + audio_sat_beforevol=4.5, + audio_lowpass=10896, + audio_noise_volume=0.03, + ) + + + def run(self): + self.set_up() + self.running = True + + suffix = '.mkv' + + tmp_output = self.render_data['target_file'].parent / f'tmp_{self.render_data["target_file"].stem}{suffix}' + fourccs = [ cv2.VideoWriter_fourcc(*'mp4v'), # doesn't work on mac os cv2.VideoWriter_fourcc(*'H264') ] - if(self.lossless): + if self.config.get("lossless"): fourcc_choice = cv2.VideoWriter_fourcc(*'FFV1') else: fourcc_choice = fourccs.pop(0) @@ -65,7 +150,7 @@ def run(self): filename=str(tmp_output.resolve()), fourcc=fourcc_choice, fps=self.render_data["input_video"]["orig_fps"], - frameSize=container_wh, + frameSize=self.config.get("container_wh"), ) logger.debug(f'Output video open result: {open_result}') @@ -74,49 +159,30 @@ def run(self): logger.debug(f'Output video: {str(self.render_data["target_file"].resolve())}') #logger.debug(f'Process audio: {self.process_audio}') - frame_index = 0 + self.current_frame_index = 0 self.renderStateChanged.emit(True) - cap = FileVideoStream( + self.cap = FileVideoStream( path=str(self.render_data["input_video"]["path"]), queue_size=322 ).start() - while cap.more(): - + while self.cap.more(): if self.pause: self.sendStatus.emit(f"{status_string} [P]") time.sleep(0.3) continue - frame_index += 1 - frame = cap.read() - if frame is None or not self.running: - self.sendStatus.emit(f'Render stopped. ret(debug):') - break - - self.increment_progress.emit() - if orig_wh != render_wh: - frame = cv2.resize(frame, render_wh) - - # crash workaround - if render_wh[0] % 4 != 0: - frame = expand_to_4width(frame) - - if self.mainEffect: - frame = self.render_data["nt"].composite_layer(frame, frame, field=0, fieldno=1) - frame = cv2.convertScaleAbs(frame) - frame[1:-1:2] = frame[0:-2:2] / 2 + frame[2::2] / 2 - - frame = frame[:, 0:render_wh[0]] + self.current_frame_index += 1 + frame = self.produce_frame() - if frame_index % 10 == 0 or self.liveView: - self.frameMoved.emit(frame_index) - self.newFrame.emit(frame) - - if upscale_2x: - frame = cv2.resize(frame, dsize=container_wh, interpolation=cv2.INTER_NEAREST) + status_string = '[CV2] Render progress: {current_frame_index}/{total}'.format( + current_frame_index=self.current_frame_index, + total=self.render_data["input_video"]["frames_count"], + ) + if frame is False: + logger.info(f"Video end or render error {status_string}") + break - status_string = f'[CV2] Render progress: {frame_index}/{self.render_data["input_video"]["frames_count"]}' self.sendStatus.emit(status_string) video.write(frame) @@ -135,7 +201,7 @@ def run(self): final_audio = orig.audio - if(self.process_audio == True): + if(self.audio_process == True): self.sendStatus.emit(f'[FFMPEG] Preparing audio filtering') #tmp_audio = self.render_data['target_file'].parent / f'tmp_audio_{self.render_data["target_file"].stem}.wav' @@ -179,12 +245,12 @@ def run(self): temp_video_stream = ffmpeg.input(str(tmp_output.resolve())) # render_streams.append(temp_video_stream.video) - if self.process_audio: + if self.audio_process: acodec = 'flac' if target_suffix == '.mkv' else 'copy' ff_command = ffmpeg.output(temp_video_stream.video, final_audio, result_path, shortest=None, vcodec='copy', acodec=acodec) else: ff_command = ffmpeg.output(temp_video_stream.video, final_audio, result_path, shortest=None, vcodec='copy', acodec='copy') - + logger.debug(ff_command) logger.debug(' '.join(ff_command.compile())) try: @@ -199,7 +265,7 @@ def run(self): self.sendStatus.emit('[FFMPEG] Audio copy done') tmp_output.unlink() - if self.process_audio: + if self.audio_process: if os.path.exists(tmp_audio): os.remove(tmp_audio) From 8b402d574066a9f0da3b8658c22738f04740c383 Mon Sep 17 00:00:00 2001 From: Pavel Khorikov Date: Mon, 2 Jan 2023 14:41:59 +0700 Subject: [PATCH 16/34] Add read 2 ahead frame buffer --- app/Renderer.py | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/app/Renderer.py b/app/Renderer.py index 829d33b..3cd26f0 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -1,12 +1,14 @@ import abc import time import os +from collections import defaultdict from typing import Tuple, TypedDict import cv2 from PyQt5 import QtCore import ffmpeg from imutils.video import FileVideoStream +from numpy import ndarray from app.logs import logger from app.funcs import resize_to_height, trim_to_4width, expand_to_4width @@ -47,6 +49,7 @@ class DefaultRenderer(AbstractRenderer): render_data = {} current_frame_index = 0 cap = None + buffer: dict[int, ndarray] = defaultdict(lambda: None) @staticmethod def apply_main_effect(nt: Ntsc, frame1, frame2=None): @@ -58,17 +61,25 @@ def apply_main_effect(nt: Ntsc, frame1, frame2=None): frame[1:-1:2] = frame[0:-2:2] / 2 + frame[2::2] / 2 return frame - def produce_frame(self): - frame = self.cap.read() - if frame is None or not self.running: - self.sendStatus.emit(f'Render stopped. ret(debug):') - return False + def update_buffer(self): + buf = self.buffer + current_index = self.current_frame_index + + if buf[current_index] is None: + current_frame = self.cap.read() + else: + current_frame = buf[current_index] + next_frame = self.cap.read() + if current_index > 0: + del buf[current_index-1] + buf[current_index] = current_frame + buf[current_index+1] = next_frame + + def prepare_frame(self, frame): orig_wh = self.config.get("orig_wh") render_wh = self.config.get("render_wh") - upscale_2x = self.config.get("upscale_2x") - self.increment_progress.emit() if orig_wh != render_wh: frame = cv2.resize(frame, render_wh) @@ -76,10 +87,23 @@ def produce_frame(self): if render_wh[0] % 4 != 0: frame = expand_to_4width(frame) + return frame + + def produce_frame(self): + frame = self.buffer[self.current_frame_index] + if frame is None or not self.running: + self.sendStatus.emit(f'Render stopped. ret(debug):') + return False + + render_wh = self.config.get("render_wh") + upscale_2x = self.config.get("upscale_2x") + + self.increment_progress.emit() + if self.mainEffect: frame = self.apply_main_effect( nt=self.render_data.get("nt"), - frame1=frame, + frame1=self.prepare_frame(frame), ) frame = frame[:, 0:render_wh[0]] @@ -159,7 +183,7 @@ def run(self): logger.debug(f'Output video: {str(self.render_data["target_file"].resolve())}') #logger.debug(f'Process audio: {self.process_audio}') - self.current_frame_index = 0 + self.current_frame_index = -1 self.renderStateChanged.emit(True) self.cap = FileVideoStream( path=str(self.render_data["input_video"]["path"]), @@ -173,6 +197,7 @@ def run(self): continue self.current_frame_index += 1 + self.update_buffer() frame = self.produce_frame() status_string = '[CV2] Render progress: {current_frame_index}/{total}'.format( From 688fdba677a767499d7b683d982bff2006149def Mon Sep 17 00:00:00 2001 From: Pavel Khorikov Date: Mon, 2 Jan 2023 14:55:23 +0700 Subject: [PATCH 17/34] Add next frame context setting --- app/Renderer.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/app/Renderer.py b/app/Renderer.py index 3cd26f0..dbfff76 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -22,6 +22,8 @@ class Config(TypedDict): upscale_2x: bool lossless: bool + next_frame_context: bool + audio_process: bool audio_sat_beforevol: float audio_lowpass: int @@ -100,10 +102,17 @@ def produce_frame(self): self.increment_progress.emit() + frame1 = self.prepare_frame(frame) + if self.config.get('next_frame_context'): + frame2 = self.prepare_frame(self.buffer[self.current_frame_index+1]) + else: + frame2 = None + if self.mainEffect: frame = self.apply_main_effect( nt=self.render_data.get("nt"), - frame1=self.prepare_frame(frame), + frame1=frame1, + frame2=frame2, ) frame = frame[:, 0:render_wh[0]] @@ -140,6 +149,7 @@ def set_up(self): orig_wh=orig_wh, lossless=False, + next_frame_context=True, audio_process=False, audio_sat_beforevol=4.5, From 3d95451de69a9fad61bff6a7751c12590b49e395 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:18 +0700 Subject: [PATCH 18/34] Revert "Fix part 3 - interlacing" This reverts commit 6c95dd26b8c5b2afafb8f7cff24f64684a1e96e3. --- app/Renderer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/Renderer.py b/app/Renderer.py index db4f7ae..cea72fe 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -84,7 +84,7 @@ def run(self): #) cap = self.render_data["input_video"]["cap"] - if(self.render_data["input_video"]["frames_count"] % 2 == 1): + if(self.input_video["frames_count"] % 2 == 1): new_frame_count = self.render_data["input_video"]["frames_count"]-3 else: new_frame_count = self.render_data["input_video"]["frames_count"]-2 From 4aaa55718c4161a377bb21c0fabe62e1c53dc820 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:18 +0700 Subject: [PATCH 19/34] Revert "Fix part 2 - interlacing" This reverts commit 4f479c18ff5e8cd7577a27d9c1696397ba5d2d5d. --- app/NtscApp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index d3f4d2f..54f0bab 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -551,7 +551,7 @@ def open_video(self, path: Path): } if(self.input_video["frames_count"] % 2 == 1): - new_frame_count = self.input_video["frames_count"]-3 + new_frame_count = self.input_video["frames_count"]-1 else: new_frame_count = self.input_video["frames_count"] From 1fc4c1de8589ed6f75531c69485ece409c2e8029 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:18 +0700 Subject: [PATCH 20/34] Revert "Fix interlacing for non-double frame counts" This reverts commit ebbbf384ff8024cacb921ac8383ba2a8ff67aa0d. --- app/Renderer.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/app/Renderer.py b/app/Renderer.py index cea72fe..86e3d90 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -83,13 +83,8 @@ def run(self): # queue_size=322 #) cap = self.render_data["input_video"]["cap"] - - if(self.input_video["frames_count"] % 2 == 1): - new_frame_count = self.render_data["input_video"]["frames_count"]-3 - else: - new_frame_count = self.render_data["input_video"]["frames_count"]-2 - while (frame_index <= new_frame_count): + while (frame_index <= self.render_data["input_video"]["frames_count"]-2): if self.pause: self.sendStatus.emit(f"{status_string} [P]") From b6c0f022c95879714b4c481f7c3c992e75af6d01 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:18 +0700 Subject: [PATCH 21/34] Revert "Revert subcarrier thing" This reverts commit d6372298143126b0e630a69bc359d9ea67739a9c. --- app/NtscApp.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/app/NtscApp.py b/app/NtscApp.py index 54f0bab..b1e75cf 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -44,6 +44,8 @@ def __init__(self): self.setupUi(self) # Это нужно для инициализации нашего дизайна self.strings = { "_composite_preemphasis": self.tr("Composite preemphasis"), + "_subcarrier_amplitude": self.tr("Subcarrier amplitude"), + "_subcarrier_amplitude_back": self.tr("Subcarrier turnback amplitude"), "_vhs_out_sharpen": self.tr("VHS out sharpen"), "_vhs_edge_wave": self.tr("Edge wave"), "_output_vhs_tape_speed": self.tr("VHS tape speed"), @@ -75,6 +77,8 @@ def __init__(self): "_black_line_cut": self.tr("Cut 2% black line"), } self.add_slider("_composite_preemphasis", 0, 10, float) + self.add_slider("_subcarrier_amplitude", 0, 16384, pro=True) + self.add_slider("_subcarrier_amplitude_back", 0, 16384, pro=True) self.add_slider("_vhs_out_sharpen", 1, 5) self.add_slider("_vhs_edge_wave", 0, 10) # self.add_slider("_output_vhs_tape_speed", 0, 10) From cb3b8693aa3abb949394c631ad0871e7560301f5 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:19 +0700 Subject: [PATCH 22/34] Revert "Interlacing - First Fix" This reverts commit abadb6c5872044d5cb5a6196e540d1973eb4de7b. --- app/NtscApp.py | 13 +++---------- app/Renderer.py | 14 +++++++++----- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index b1e75cf..a91d9d8 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -401,7 +401,7 @@ def set_frames(self, cap, frame_no, interlaced: bool): ret1, frame1 = cap.read() if(interlaced): - if((frame_no == self.input_video["frames_count"]-2) or (frame_no+1 == self.input_video["frames_count"]-2)): + if(frame_no == self.input_video["frames_count"]): frame2 = frame1 else: cap.set(1, frame_no+1) @@ -417,7 +417,6 @@ def get_current_video_frame(self): if not self.input_video or preview_h < 10: return None frame_no = self.videoTrackSlider.value() - #self.input_video["cap"].set(1, frame_no) #ret1, frame1 = self.input_video["cap"].read() @@ -553,24 +552,18 @@ def open_video(self, path: Path): "path": path, "suffix": path.suffix.lower(), } - - if(self.input_video["frames_count"] % 2 == 1): - new_frame_count = self.input_video["frames_count"]-1 - else: - new_frame_count = self.input_video["frames_count"] - logger.debug(f"selfinput: {self.input_video}") self.orig_wh = (int(self.input_video["width"]), int(self.input_video["height"])) self.set_render_height(self.input_video["height"]) self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) self.videoTrackSlider.setMinimum(1) - self.videoTrackSlider.setMaximum(new_frame_count) + self.videoTrackSlider.setMaximum(self.input_video["frames_count"]) self.videoTrackSlider.setSingleStep(2) print(str(self.videoTrackSlider.singleStep())) self.videoTrackSlider.valueChanged.connect( lambda: self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) ) - self.progressBar.setMaximum(self.input_video["frames_count"] // 2) + self.progressBar.setMaximum(round(self.input_video["frames_count"] / 2)) def render_image(self): target_file = pick_save_file(self, title='Save frame as', suffix='.png') diff --git a/app/Renderer.py b/app/Renderer.py index 86e3d90..d253554 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -84,17 +84,22 @@ def run(self): #) cap = self.render_data["input_video"]["cap"] - while (frame_index <= self.render_data["input_video"]["frames_count"]-2): + print(int(self.render_data["input_video"]["frames_count"] / 2)) + + while (frame_index <= self.render_data["input_video"]["frames_count"]): if self.pause: self.sendStatus.emit(f"{status_string} [P]") time.sleep(0.3) continue + #frame_index += 1 + #frame = cap.read() + cap.set(1, frame_index) ret1, frame1 = cap.read() - if((frame_index == self.render_data["input_video"]["frames_count"]-2) or (frame_index+1 == self.render_data["input_video"]["frames_count"]-2)): + if(frame_index == self.render_data["input_video"]["frames_count"]): frame2 = frame1 else: cap.set(1, frame_index+1) @@ -139,14 +144,13 @@ def run(self): if upscale_2x: frame = cv2.resize(frame, dsize=container_wh, interpolation=cv2.INTER_NEAREST) - status_string = f'[CV2] Render progress: {og_frame_index}/{self.render_data["input_video"]["frames_count"] // 2}' + status_string = f'[CV2] Render progress: {og_frame_index}/{round(self.render_data["input_video"]["frames_count"] / 2)}' self.sendStatus.emit(status_string) video.write(frame) frame_index += 2 og_frame_index += 1 - - if((frame_index > self.render_data["input_video"]["frames_count"]) or (frame_index+1 > self.render_data["input_video"]["frames_count"])): + if((frame_index > self.render_data["input_video"]["frames_count"]) or (frame_index+2 > self.render_data["input_video"]["frames_count"])): frame_index = self.render_data["input_video"]["frames_count"] video.release() From 2086cd0c222ab5e9adc996d44945bc9b6bbe1225 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:19 +0700 Subject: [PATCH 23/34] Revert "Finished! - Version 1" This reverts commit e32a5609b55e57c1e517d2d7532ce0e367eaa669. --- app/NtscApp.py | 56 +++++++++++++------------------------------ app/Renderer.py | 63 ++++++++++++------------------------------------- 2 files changed, 32 insertions(+), 87 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index a91d9d8..834f0fd 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -396,45 +396,26 @@ def add_slider(self, param_name, min_val, max_val, slider_value_type: Union[int, self.nt_controls[param_name] = slider self.slidersLayout.addWidget(slider_frame) - def set_frames(self, cap, frame_no, interlaced: bool): - cap.set(1, frame_no) - ret1, frame1 = cap.read() - - if(interlaced): - if(frame_no == self.input_video["frames_count"]): - frame2 = frame1 - else: - cap.set(1, frame_no+1) - ret2, frame2 = cap.read() - cap.set(1, frame_no) - else: - frame2 = frame1 - - return frame1, frame2 - def get_current_video_frame(self): preview_h = self.renderHeightBox.value() if not self.input_video or preview_h < 10: return None frame_no = self.videoTrackSlider.value() - #self.input_video["cap"].set(1, frame_no) - #ret1, frame1 = self.input_video["cap"].read() - - #if(frame_no == self.input_video["frames_count"]): - # frame2 = frame1 - #else: - # self.input_video["cap"].set(1, frame_no+1) - # ret2, frame2 = self.input_video["cap"].read() - # self.input_video["cap"].set(1, frame_no) - - #return frame1, frame2 + self.input_video["cap"].set(1, frame_no) + ret, frame1 = self.input_video["cap"].read() - return self.set_frames(cap=self.input_video["cap"],frame_no=frame_no,interlaced=True) + if(frame_no == self.input_video["frames_count"]): + frame2 = frame1 + else: + self.input_video["cap"].set(1, frame_no+1) + frame2 = self.input_video["cap"].read() + self.input_video["cap"].set(1, frame_no) + + return frame1, frame2 def set_current_frame(self, frame1, frame2): - #current_frame_valid = isinstance(frame1, ndarray) - current_frame_valid = True - + current_frame_valid = isinstance(frame1, ndarray) + preview_h = self.renderHeightBox.value() if not current_frame_valid or preview_h < 10: self.update_status("Trying to set invalid current frame") @@ -558,12 +539,10 @@ def open_video(self, path: Path): self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) self.videoTrackSlider.setMinimum(1) self.videoTrackSlider.setMaximum(self.input_video["frames_count"]) - self.videoTrackSlider.setSingleStep(2) - print(str(self.videoTrackSlider.singleStep())) self.videoTrackSlider.valueChanged.connect( lambda: self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) ) - self.progressBar.setMaximum(round(self.input_video["frames_count"] / 2)) + self.progressBar.setMaximum(self.input_video["frames_count"]) def render_image(self): target_file = pick_save_file(self, title='Save frame as', suffix='.png') @@ -606,14 +585,13 @@ def render_video(self): def nt_process(self, frame1: ndarray, frame2: ndarray) -> ndarray: f1 = self.nt.composite_layer(frame1, frame1, field=0, fieldno=2) - f2_in = cv2.warpAffine(frame2, numpy.float32([[1, 0, 0], [0, 1, 1]]), (frame2.shape[1], frame2.shape[0]+2)) - f2 = self.nt.composite_layer(f2_in, f2_in, field=2, fieldno=2) + f2 = self.nt.composite_layer(frame2, frame2, field=2, fieldno=2) f1_out = cv2.convertScaleAbs(f1) f2_out = cv2.convertScaleAbs(f2) ntsc_out_image = f1_out - #ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + f2_out[2::2] / 2 - ntsc_out_image[1::2] = f2_out[2::2] + #ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 + ntsc_out_image[1::2,:] = f2_out[::2,:] return ntsc_out_image def nt_update_preview(self): @@ -630,7 +608,7 @@ def nt_update_preview(self): if self.compareMode: ntsc_out_image = numpy.concatenate( - (self.preview1[:self.preview1.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) + (self.preview1[:self.preview.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) ) self.render_preview(ntsc_out_image) diff --git a/app/Renderer.py b/app/Renderer.py index d253554..4b9ab16 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -9,7 +9,6 @@ from app.logs import logger from app.funcs import resize_to_height, trim_to_4width, expand_to_4width -import numpy class Renderer(QtCore.QObject): running = False @@ -65,7 +64,7 @@ def run(self): open_result = video.open( filename=str(tmp_output.resolve()), fourcc=fourcc_choice, - fps=(self.render_data["input_video"]["orig_fps"] / 2), + fps=self.render_data["input_video"]["orig_fps"], frameSize=container_wh, ) logger.debug(f'Output video open result: {open_result}') @@ -76,64 +75,37 @@ def run(self): #logger.debug(f'Process audio: {self.process_audio}') frame_index = 0 - og_frame_index = 0 self.renderStateChanged.emit(True) - #cap = FileVideoStream( - # path=str(self.render_data["input_video"]["path"]), - # queue_size=322 - #) - cap = self.render_data["input_video"]["cap"] + cap = FileVideoStream( + path=str(self.render_data["input_video"]["path"]), + queue_size=322 + ).start() - print(int(self.render_data["input_video"]["frames_count"] / 2)) - - while (frame_index <= self.render_data["input_video"]["frames_count"]): + while cap.more(): if self.pause: self.sendStatus.emit(f"{status_string} [P]") time.sleep(0.3) continue - #frame_index += 1 - #frame = cap.read() - - cap.set(1, frame_index) - ret1, frame1 = cap.read() - - if(frame_index == self.render_data["input_video"]["frames_count"]): - frame2 = frame1 - else: - cap.set(1, frame_index+1) - ret2, frame2 = cap.read() - cap.set(1, frame_index) - - if frame1 is None or not self.running: + frame_index += 1 + frame = cap.read() + if frame is None or not self.running: self.sendStatus.emit(f'Render stopped. ret(debug):') break self.increment_progress.emit() if orig_wh != render_wh: - frame1 = cv2.resize(frame1, render_wh) - frame2 = cv2.resize(frame2, render_wh) + frame = cv2.resize(frame, render_wh) # crash workaround if render_wh[0] % 4 != 0: - frame1 = expand_to_4width(frame1) - frame2 = expand_to_4width(frame2) + frame = expand_to_4width(frame) if self.mainEffect: - #frame = self.render_data["nt"].composite_layer(frame, frame, field=0, fieldno=1) - #frame = cv2.convertScaleAbs(frame) - #frame[1:-1:2] = frame[0:-2:2] / 2 + frame[2::2] / 2 - - f1 = self.render_data["nt"].composite_layer(frame1, frame1, field=0, fieldno=2) - f2_in = cv2.warpAffine(frame2, numpy.float32([[1, 0, 0], [0, 1, 1]]), (frame2.shape[1], frame2.shape[0]+2)) - f2 = self.render_data["nt"].composite_layer(f2_in, f2_in, field=2, fieldno=2) - f1_out = cv2.convertScaleAbs(f1) - f2_out = cv2.convertScaleAbs(f2) - - frame = f1_out - #ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 - frame[1::2,:] = f2_out[2::2,:] + frame = self.render_data["nt"].composite_layer(frame, frame, field=0, fieldno=1) + frame = cv2.convertScaleAbs(frame) + frame[1:-1:2] = frame[0:-2:2] / 2 + frame[2::2] / 2 frame = frame[:, 0:render_wh[0]] @@ -144,15 +116,10 @@ def run(self): if upscale_2x: frame = cv2.resize(frame, dsize=container_wh, interpolation=cv2.INTER_NEAREST) - status_string = f'[CV2] Render progress: {og_frame_index}/{round(self.render_data["input_video"]["frames_count"] / 2)}' + status_string = f'[CV2] Render progress: {frame_index}/{self.render_data["input_video"]["frames_count"]}' self.sendStatus.emit(status_string) video.write(frame) - frame_index += 2 - og_frame_index += 1 - if((frame_index > self.render_data["input_video"]["frames_count"]) or (frame_index+2 > self.render_data["input_video"]["frames_count"])): - frame_index = self.render_data["input_video"]["frames_count"] - video.release() orig_path = str(self.render_data["input_video"]["path"].resolve()) From 61567f33a92527ce7c3151f5eb73f5548f41d402 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:20 +0700 Subject: [PATCH 24/34] Revert "Another fix" This reverts commit a9d403a69dd95335cb11aef4a69fa441da373fcb. --- app/NtscApp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 834f0fd..c19562c 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -94,7 +94,7 @@ def __init__(self): self.add_slider("_video_chroma_loss", 0, 800) self.add_slider("_video_noise", 0, 4200) self.add_slider("_video_scanline_phase_shift", 0, 270, pro=True) - self.add_slider("_video_scanline_phase_shift_offset", 0, 3, pro=True) + #self.add_slider("_video_scanline_phase_shift_offset", 0, 3, pro=True) self.add_slider("_head_switching_speed", 0, 100) From 1a6d8918bb7de875280835016f1376106d1c3f6f Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:20 +0700 Subject: [PATCH 25/34] Revert "New fix" This reverts commit 00e44fc9d985278fee19a2360d9d14a77a80f9d6. --- app/NtscApp.py | 6 ++++-- app/ntsc.py | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index c19562c..1249654 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -584,8 +584,10 @@ def render_video(self): self.thread.start() def nt_process(self, frame1: ndarray, frame2: ndarray) -> ndarray: - f1 = self.nt.composite_layer(frame1, frame1, field=0, fieldno=2) - f2 = self.nt.composite_layer(frame2, frame2, field=2, fieldno=2) + moire_pos = 0 + + f1 = self.nt.composite_layer(frame1, frame1, field=0, fieldno=2, moirepos=moire_pos) + f2 = self.nt.composite_layer(frame2, frame2, field=2, fieldno=2, moirepos=moire_pos) f1_out = cv2.convertScaleAbs(f1) f2_out = cv2.convertScaleAbs(f2) diff --git a/app/ntsc.py b/app/ntsc.py index 78901da..dd7d6c0 100755 --- a/app/ntsc.py +++ b/app/ntsc.py @@ -644,9 +644,11 @@ def emulate_vhs(self, yiq: numpy.ndarray, field: int, fieldno: int): self.chroma_into_luma(yiq, field, fieldno, self._subcarrier_amplitude) self.chroma_from_luma(yiq, field, fieldno, self._subcarrier_amplitude) - def composite_layer(self, dst: numpy.ndarray, src: numpy.ndarray, field: int, fieldno: int): + def composite_layer(self, dst: numpy.ndarray, src: numpy.ndarray, field: int, fieldno: int, moirepos: int): assert dst.shape == src.shape, "dst and src images must be of same shape" + self._video_scanline_phase_shift_offset = moirepos + if self._black_line_cut: cut_black_line_border(src) From 9af5fd8215a1dda1d63b0697d976ff2ec6462791 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:21 +0700 Subject: [PATCH 26/34] Revert "Composite layer fix" This reverts commit a3f1e823c25f883ad3e17768e573dce738b72255. --- app/ntsc.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/app/ntsc.py b/app/ntsc.py index dd7d6c0..78901da 100755 --- a/app/ntsc.py +++ b/app/ntsc.py @@ -644,11 +644,9 @@ def emulate_vhs(self, yiq: numpy.ndarray, field: int, fieldno: int): self.chroma_into_luma(yiq, field, fieldno, self._subcarrier_amplitude) self.chroma_from_luma(yiq, field, fieldno, self._subcarrier_amplitude) - def composite_layer(self, dst: numpy.ndarray, src: numpy.ndarray, field: int, fieldno: int, moirepos: int): + def composite_layer(self, dst: numpy.ndarray, src: numpy.ndarray, field: int, fieldno: int): assert dst.shape == src.shape, "dst and src images must be of same shape" - self._video_scanline_phase_shift_offset = moirepos - if self._black_line_cut: cut_black_line_border(src) From c11fe411f8e0e82266fa733eca0423e4059b6f69 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:21 +0700 Subject: [PATCH 27/34] Revert "Add some thing" This reverts commit 1b6771623f40318898982249aba18e6db9f9b53a. --- app/NtscApp.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 1249654..167ec1e 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -432,9 +432,8 @@ def set_current_frame(self, frame1, frame2): self.update_status("ZeroDivisionError :DDDDDD") pass - if self.preview1.shape[1] % 4 != 0: + if self.preview.shape[1] % 4 != 0: self.preview1 = trim_to_4width(self.preview1) - if self.preview2.shape[1] % 4 != 0: self.preview2 = trim_to_4width(self.preview2) self.nt_update_preview() From 182b9934e45c382dba38dc4a99c13a5fb7aae210 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:22 +0700 Subject: [PATCH 28/34] Revert "First add to feature" This reverts commit 96aad7fbfa2f32f5d011ff23ebac86c81777c88c. --- app/NtscApp.py | 71 ++++++++++++++++---------------------------------- 1 file changed, 23 insertions(+), 48 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 167ec1e..30880b4 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -21,7 +21,6 @@ class NtscApp(QtWidgets.QMainWindow, mainWindow.Ui_MainWindow): def __init__(self): self.current_frame: numpy.ndarray = False - self.next_frame: numpy.ndarray = False self.preview: numpy.ndarray = False self.scale_pixmap = False self.input_video = {} @@ -44,8 +43,6 @@ def __init__(self): self.setupUi(self) # Это нужно для инициализации нашего дизайна self.strings = { "_composite_preemphasis": self.tr("Composite preemphasis"), - "_subcarrier_amplitude": self.tr("Subcarrier amplitude"), - "_subcarrier_amplitude_back": self.tr("Subcarrier turnback amplitude"), "_vhs_out_sharpen": self.tr("VHS out sharpen"), "_vhs_edge_wave": self.tr("Edge wave"), "_output_vhs_tape_speed": self.tr("VHS tape speed"), @@ -77,8 +74,6 @@ def __init__(self): "_black_line_cut": self.tr("Cut 2% black line"), } self.add_slider("_composite_preemphasis", 0, 10, float) - self.add_slider("_subcarrier_amplitude", 0, 16384, pro=True) - self.add_slider("_subcarrier_amplitude_back", 0, 16384, pro=True) self.add_slider("_vhs_out_sharpen", 1, 5) self.add_slider("_vhs_edge_wave", 0, 10) # self.add_slider("_output_vhs_tape_speed", 0, 10) @@ -94,7 +89,7 @@ def __init__(self): self.add_slider("_video_chroma_loss", 0, 800) self.add_slider("_video_noise", 0, 4200) self.add_slider("_video_scanline_phase_shift", 0, 270, pro=True) - #self.add_slider("_video_scanline_phase_shift_offset", 0, 3, pro=True) + self.add_slider("_video_scanline_phase_shift_offset", 0, 3, pro=True) self.add_slider("_head_switching_speed", 0, 100) @@ -112,7 +107,7 @@ def __init__(self): self.add_checkbox("_black_line_cut", (1, 2), pro=False) self.renderHeightBox.valueChanged.connect( - lambda: self.set_current_frame(self.current_frame,self.next_frame) + lambda: self.set_current_frame(self.current_frame) ) self.openFile.clicked.connect(self.open_file) self.renderVideoButton.clicked.connect(self.render_video) @@ -402,39 +397,26 @@ def get_current_video_frame(self): return None frame_no = self.videoTrackSlider.value() self.input_video["cap"].set(1, frame_no) - ret, frame1 = self.input_video["cap"].read() + ret, frame = self.input_video["cap"].read() + return frame - if(frame_no == self.input_video["frames_count"]): - frame2 = frame1 - else: - self.input_video["cap"].set(1, frame_no+1) - frame2 = self.input_video["cap"].read() - self.input_video["cap"].set(1, frame_no) - - return frame1, frame2 - - def set_current_frame(self, frame1, frame2): - current_frame_valid = isinstance(frame1, ndarray) - + def set_current_frame(self, frame): + current_frame_valid = isinstance(frame, ndarray) preview_h = self.renderHeightBox.value() if not current_frame_valid or preview_h < 10: self.update_status("Trying to set invalid current frame") return None - self.current_frame = frame1 - self.next_frame = frame2 - + self.current_frame = frame try: crop_wh = resize_to_height(self.orig_wh, preview_h) - self.preview1 = cv2.resize(frame1, crop_wh) - self.preview2 = cv2.resize(frame2, crop_wh) + self.preview = cv2.resize(frame, crop_wh) except ZeroDivisionError: self.update_status("ZeroDivisionError :DDDDDD") pass if self.preview.shape[1] % 4 != 0: - self.preview1 = trim_to_4width(self.preview1) - self.preview2 = trim_to_4width(self.preview2) + self.preview = trim_to_4width(self.preview) self.nt_update_preview() @@ -484,7 +466,7 @@ def set_image_mode(self): self.livePreviewCheckbox.hide() self.renderVideoButton.hide() - def set_render_height(self, height): + def set_render_heigth(self, height): if height > 600: self.renderHeightBox.setValue(600) self.update_status( @@ -496,9 +478,9 @@ def open_image(self, img: numpy.ndarray): height, width, channels = img.shape self.orig_wh = width, height - self.set_render_height(height) + self.set_render_heigth(height) - self.set_current_frame(img,img) + self.set_current_frame(img) def nt_get_config(self): values = {} @@ -534,12 +516,12 @@ def open_video(self, path: Path): } logger.debug(f"selfinput: {self.input_video}") self.orig_wh = (int(self.input_video["width"]), int(self.input_video["height"])) - self.set_render_height(self.input_video["height"]) - self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) + self.set_render_heigth(self.input_video["height"]) + self.set_current_frame(self.get_current_video_frame()) self.videoTrackSlider.setMinimum(1) self.videoTrackSlider.setMaximum(self.input_video["frames_count"]) self.videoTrackSlider.valueChanged.connect( - lambda: self.set_current_frame(self.get_current_video_frame()[0],self.get_current_video_frame()[1]) + lambda: self.set_current_frame(self.get_current_video_frame()) ) self.progressBar.setMaximum(self.input_video["frames_count"]) @@ -552,7 +534,7 @@ def render_image(self): image = cv2.resize(self.current_frame, crop_wh) if image.shape[1] % 4 != 0: image = trim_to_4width(image) - image = self.nt_process(image,image) + image = self.nt_process(image) is_success, im_buf_arr = cv2.imencode(".png", image) if not is_success: self.update_status("Error while saving (!is_success)") @@ -582,17 +564,10 @@ def render_video(self): self.videoRenderer.render_data = render_data self.thread.start() - def nt_process(self, frame1: ndarray, frame2: ndarray) -> ndarray: - moire_pos = 0 - - f1 = self.nt.composite_layer(frame1, frame1, field=0, fieldno=2, moirepos=moire_pos) - f2 = self.nt.composite_layer(frame2, frame2, field=2, fieldno=2, moirepos=moire_pos) - f1_out = cv2.convertScaleAbs(f1) - f2_out = cv2.convertScaleAbs(f2) - - ntsc_out_image = f1_out - #ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 - ntsc_out_image[1::2,:] = f2_out[::2,:] + def nt_process(self, frame) -> ndarray: + _ = self.nt.composite_layer(frame, frame, field=0, fieldno=1) + ntsc_out_image = cv2.convertScaleAbs(_) + ntsc_out_image[1:-1:2] = ntsc_out_image[0:-2:2] / 2 + ntsc_out_image[2::2] / 2 return ntsc_out_image def nt_update_preview(self): @@ -602,14 +577,14 @@ def nt_update_preview(self): return None if not self.mainEffect: - self.render_preview(self.preview1) + self.render_preview(self.preview) return None - ntsc_out_image = self.nt_process(self.preview1,self.preview2) + ntsc_out_image = self.nt_process(self.preview) if self.compareMode: ntsc_out_image = numpy.concatenate( - (self.preview1[:self.preview.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) + (self.preview[:self.preview.shape[0] // 2], ntsc_out_image[ntsc_out_image.shape[0] // 2:]) ) self.render_preview(ntsc_out_image) From 8c2ed4979c2668d36eba14e8b2eb93e30de2edcb Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 15:30:22 +0700 Subject: [PATCH 29/34] Revert "Another comment" This reverts commit d91dd4d5379cd088febdf8f6255b813045e71816. --- app/NtscApp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 30880b4..139ed50 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -559,7 +559,7 @@ def render_video(self): self.setup_renderer() self.toggle_main_effect() self.lossless_exporting() - #self.audio_filtering() + self.audio_filtering() self.progressBar.setValue(1) self.videoRenderer.render_data = render_data self.thread.start() From 5e11678d5c56b6f4a9f5413efb5d7c01197142c0 Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 20:24:03 +0700 Subject: [PATCH 30/34] fix corners --- app/Renderer.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/app/Renderer.py b/app/Renderer.py index dbfff76..17a6020 100755 --- a/app/Renderer.py +++ b/app/Renderer.py @@ -83,7 +83,11 @@ def prepare_frame(self, frame): render_wh = self.config.get("render_wh") if orig_wh != render_wh: - frame = cv2.resize(frame, render_wh) + try: + frame = cv2.resize(frame, render_wh) + except Exception as e: + logger.exception(e) + raise e # crash workaround if render_wh[0] % 4 != 0: @@ -104,7 +108,11 @@ def produce_frame(self): frame1 = self.prepare_frame(frame) if self.config.get('next_frame_context'): - frame2 = self.prepare_frame(self.buffer[self.current_frame_index+1]) + fr = self.buffer[self.current_frame_index + 1] + if fr is not None: + frame2 = self.prepare_frame(fr) + else: + frame2 = None else: frame2 = None @@ -114,6 +122,8 @@ def produce_frame(self): frame1=frame1, frame2=frame2, ) + else: + frame = frame1 frame = frame[:, 0:render_wh[0]] From f7887c3874c18b41badac2e0c75f5341c59e148f Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 20:28:01 +0700 Subject: [PATCH 31/34] move ui-setter to helper func --- app/NtscApp.py | 7 +------ app/funcs.py | 9 +++++++++ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index a45ef88..1792c3d 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -301,12 +301,7 @@ def sync_nt_to_sliders(self): if isinstance(element, QSlider) and isinstance(value, float): value = int(value) - element.blockSignals(True) - if isinstance(value, bool): - element.setChecked(value) - elif isinstance(value, (int, float)): - element.setValue(value) - element.blockSignals(False) + set_ui_element(element, value) related_label = element.parent().findChild(QLabel, parameter_name) if related_label: diff --git a/app/funcs.py b/app/funcs.py index a85ce86..29172c8 100755 --- a/app/funcs.py +++ b/app/funcs.py @@ -51,3 +51,12 @@ def expand_to_4width(img: numpy.ndarray) -> numpy.ndarray: height, width, channels = img.shape logger.debug(f"┗FIX to wh: {width}x{height} w%4={width % 4}") return img + + +def set_ui_element(element, value): + element.blockSignals(True) + if isinstance(value, bool): + element.setChecked(value) + elif isinstance(value, (int, float)): + element.setValue(value) + element.blockSignals(False) From 901f375cbddd79eddafa946a58deba352537702c Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 20:35:38 +0700 Subject: [PATCH 32/34] Fix last frame preview --- app/NtscApp.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/app/NtscApp.py b/app/NtscApp.py index 1792c3d..c510127 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -426,8 +426,6 @@ def resize_to_preview_frame(self, frame): return frame def set_current_frames(self, frame1: ndarray, frame2=None): - if frame2 is None: - frame2 = frame1.copy() current_frame_valid = isinstance(frame1, ndarray) preview_h = self.renderHeightBox.value() @@ -435,6 +433,9 @@ def set_current_frames(self, frame1: ndarray, frame2=None): self.update_status("Trying to set invalid current frame") return None + if frame2 is None: + frame2 = frame1.copy() + self.current_frame = self.resize_to_preview_frame(frame1) self.next_frame = self.resize_to_preview_frame(frame2) From cee9781f06539c356e5b15594ce424cf2c26d1bf Mon Sep 17 00:00:00 2001 From: JargeZ Date: Mon, 2 Jan 2023 20:36:16 +0700 Subject: [PATCH 33/34] Implement InterlacedRenderer --- app/InterlacedRenderer.py | 33 ++++++++++++++++++++--- app/NtscApp.py | 55 ++++++++++++++++++++++++++++----------- 2 files changed, 70 insertions(+), 18 deletions(-) mode change 100755 => 100644 app/InterlacedRenderer.py diff --git a/app/InterlacedRenderer.py b/app/InterlacedRenderer.py old mode 100755 new mode 100644 index 497a0ef..2310846 --- a/app/InterlacedRenderer.py +++ b/app/InterlacedRenderer.py @@ -1,3 +1,4 @@ +import cv2 from app.Renderer import DefaultRenderer from app.ntsc import Ntsc @@ -5,6 +6,32 @@ class InterlacedRenderer(DefaultRenderer): @staticmethod def apply_main_effect(nt: Ntsc, frame1, frame2=None): - raise NotImplementedError() - # TODO: RGM - return frame + if frame2 is None: + frame2 = frame1 + + frame1 = nt.composite_layer(frame1, frame1, field=0, fieldno=0) + frame1 = cv2.convertScaleAbs(frame1) + frame2 = nt.composite_layer(frame2, frame2, field=2, fieldno=1) + frame2 = cv2.convertScaleAbs(frame2) + + # import numpy as np + # debug1 = np.concatenate((frame1.copy(), frame2), axis=1) + # debug2 = np.concatenate((frame1[0:-2:2], frame2[2::2]), axis=1) + # frame1[1:-1:2] = frame1[0:-2:2] / 2 + frame2[2::2] / 2 + # debug3 = np.concatenate( + # ( + # frame1, + # frame1 + # ), axis=1) + # + # debug = cv2.vconcat((debug1, debug2, debug3)) + # return debug + # TODO: Ensure, that we combine + # N N+1 RESULT + # [A, A, A] [b, b, b] [A, A, A] + # [A, A, A] [b, b, b] [b, b, b] + # [A, A, A] [b, b, b] [A, A, A] + # for now im not sure in field and fieldno behaviour + + frame1[1:-1:2] = frame1[0:-2:2] / 2 + frame2[2::2] / 2 + return frame1 diff --git a/app/NtscApp.py b/app/NtscApp.py index c510127..a56273f 100755 --- a/app/NtscApp.py +++ b/app/NtscApp.py @@ -13,14 +13,17 @@ from app.config_dialog import ConfigDialog from app.logs import logger from app.Renderer import DefaultRenderer -from app.funcs import resize_to_height, pick_save_file, trim_to_4width +from app.funcs import resize_to_height, pick_save_file, set_ui_element, trim_to_4width from app.ntsc import random_ntsc, Ntsc from ui import mainWindow from ui.DoubleSlider import DoubleSlider + + class NtscApp(QtWidgets.QMainWindow, mainWindow.Ui_MainWindow): def __init__(self): + self.RendererClass = DefaultRenderer self.videoRenderer: DefaultRenderer = None self.current_frame: numpy.ndarray = False self.next_frame: numpy.ndarray = False @@ -74,6 +77,7 @@ def __init__(self): "_vhs_svideo_out": self.tr("VHS svideo out"), "_output_ntsc": self.tr("NTSC output"), "_black_line_cut": self.tr("Cut 2% black line"), + "interlaced": self.tr("Interlaced"), } self.add_slider("_composite_preemphasis", 0, 10, float) self.add_slider("_vhs_out_sharpen", 1, 5) @@ -107,9 +111,10 @@ def __init__(self): self.add_checkbox("_vhs_svideo_out", (5, 2), pro=True) self.add_checkbox("_output_ntsc", (6, 1), pro=True) self.add_checkbox("_black_line_cut", (1, 2), pro=False) + self.add_checkbox("interlaced", (1, 2), pro=False) self.renderHeightBox.valueChanged.connect( - lambda: self.set_current_frames(*self.get_current_video_frames()) + lambda: self.set_current_frames(self.current_frame, self.next_frame) ) self.openFile.clicked.connect(self.open_file) self.renderVideoButton.clicked.connect(self.render_video) @@ -163,12 +168,17 @@ def add_builtin_templates(self): button.clicked.connect(set_values) self.templatesLayout.addWidget(button) - def get_render_class(self): - is_interlaced = False # Get state from UI choice + def update_render_class(self): + is_interlaced = self.nt_controls["interlaced"].isChecked() if is_interlaced: - return InterlacedRenderer + logger.debug("Use InterlacedRenderer") + Cls = InterlacedRenderer else: - return DefaultRenderer + logger.debug("Use DefaultRenderer") + Cls = DefaultRenderer + + self.RendererClass = Cls + self.nt_update_preview() def setup_renderer(self): try: @@ -183,8 +193,7 @@ def setup_renderer(self): # создадим поток self.thread = QtCore.QThread() # создадим объект для выполнения кода в другом потоке - RendererClass = self.get_render_class() - self.videoRenderer = RendererClass() + self.videoRenderer = self.RendererClass() # перенесём объект в другой поток self.videoRenderer.moveToThread(self.thread) # после чего подключим все сигналы и слоты @@ -293,7 +302,10 @@ def set_render_state(self, is_render_active): def sync_nt_to_sliders(self): for parameter_name, element in self.nt_controls.items(): - value = getattr(self.nt, parameter_name) + if parameter_name.startswith("_"): + value = getattr(self.nt, parameter_name) + else: + continue # This is necessary because some parameters that have a real float type, but in the interface, # the slide is simplified to int. However, when setting the initial parameters that occur here, @@ -321,6 +333,9 @@ def value_changed_slot(self): elif isinstance(element, QCheckBox): value = element.isChecked() + if parameter_name == "interlaced": + self.update_render_class() + logger.debug(f"Set {parameter_name} to {value}") setattr(self.nt, parameter_name, value) self.nt_update_preview() @@ -400,6 +415,7 @@ def add_slider(self, param_name, min_val, max_val, slider_value_type: Union[int, def get_current_video_frames(self): preview_h = self.renderHeightBox.value() if not self.input_video or preview_h < 10: + logger.debug(f"{self.input_video=} {preview_h=}") return None, None frame_no = self.videoTrackSlider.value() self.input_video["cap"].set(1, frame_no) @@ -493,16 +509,21 @@ def set_render_heigth(self, height): self.update_status( self.tr('The image resolution is large. For the best effect, the output height is set to 600')) else: - self.renderHeightBox.setValue(height // 120 * 120) + h = height // 120 * 120 + if h < 10: + self.renderHeightBox.setValue(120) + else: + self.renderHeightBox.setValue(h) + def open_image(self, img: numpy.ndarray): - self.setup_renderer() + self.update_render_class() height, width, channels = img.shape self.orig_wh = width, height self.set_render_heigth(height) - self.set_current_frames(img) + self.set_current_frames(img, None) def nt_get_config(self): values = {} @@ -519,12 +540,16 @@ def nt_get_config(self): def nt_set_config(self, values: List[Dict[str, Union[int, float]]]): for parameter_name, value in values.items(): - setattr(self.nt, parameter_name, value) + if parameter_name.startswith("_"): + setattr(self.nt, parameter_name, value) + else: + element = self.nt_controls[parameter_name] + set_ui_element(element, value) self.sync_nt_to_sliders() def open_video(self, path: Path): - self.setup_renderer() + self.update_render_class() logger.debug(f"file: {path}") cap = cv2.VideoCapture(str(path.resolve())) logger.debug(f"cap: {cap} isOpened: {cap.isOpened()}") @@ -603,7 +628,7 @@ def nt_update_preview(self): self.render_preview(self.current_frame) return None - ntsc_out_image = self.videoRenderer.apply_main_effect(self.nt, self.current_frame, self.next_frame) + ntsc_out_image = self.RendererClass.apply_main_effect(self.nt, self.current_frame, self.next_frame) if self.compareMode: ntsc_out_image = numpy.concatenate( From 57b9b161be59a74b9097b899e3d244fd7fa729d1 Mon Sep 17 00:00:00 2001 From: RGM <40833244+rgm89git@users.noreply.github.com> Date: Thu, 23 Feb 2023 08:42:17 -0300 Subject: [PATCH 34/34] Using copyMakeBorder instead of warpAffine --- app/InterlacedRenderer.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/app/InterlacedRenderer.py b/app/InterlacedRenderer.py index 2310846..7fd5814 100644 --- a/app/InterlacedRenderer.py +++ b/app/InterlacedRenderer.py @@ -11,8 +11,14 @@ def apply_main_effect(nt: Ntsc, frame1, frame2=None): frame1 = nt.composite_layer(frame1, frame1, field=0, fieldno=0) frame1 = cv2.convertScaleAbs(frame1) - frame2 = nt.composite_layer(frame2, frame2, field=2, fieldno=1) + #frame2 = nt.composite_layer(frame2, frame2, field=2, fieldno=1) + #frame2 = cv2.convertScaleAbs(frame2) + + frame2 = cv2.copyMakeBorder(frame2,1,0,0,0,cv2.BORDER_CONSTANT) + frame2 = nt.composite_layer(frame2, frame2, field=2, fieldno=2) frame2 = cv2.convertScaleAbs(frame2) + frame = frame1 + frame[1::2,:] = frame2[2::2,:] # import numpy as np # debug1 = np.concatenate((frame1.copy(), frame2), axis=1) @@ -33,5 +39,5 @@ def apply_main_effect(nt: Ntsc, frame1, frame2=None): # [A, A, A] [b, b, b] [A, A, A] # for now im not sure in field and fieldno behaviour - frame1[1:-1:2] = frame1[0:-2:2] / 2 + frame2[2::2] / 2 - return frame1 + # frame1[1:-1:2] = frame1[0:-2:2] / 2 + frame2[2::2] / 2 + return frame