diff --git a/.gitignore b/.gitignore index a1e5691..3d795b9 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ *.pdb *.bak **/*.bkp.* - +pipfile # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/dev_src/_fs_utils.py b/dev_src/_fs_utils.py index 48e0200..37f5082 100644 --- a/dev_src/_fs_utils.py +++ b/dev_src/_fs_utils.py @@ -199,7 +199,7 @@ def _get_tree_size(path, limit=None, must_read=False): return total -def _get_tree_path_n_size(path, limit=-1, must_read=False, path_type="full"): +def _get_tree_path_n_size(path, limit=-1, must_read=False, path_type="full", add_dirs=False): """ returns a list of files[size, path] in a directory and its subdirectories. [ [`path`, size], ... ] @@ -212,18 +212,20 @@ def _get_tree_path_n_size(path, limit=-1, must_read=False, path_type="full"): total = 0 start_path = path - for entry in walk_dir(path): - try: - size = entry.stat(follow_symlinks=False).st_size - except OSError: - continue - total += size + for entry in walk_dir(path, yield_dir=add_dirs): + size = 0 + if not entry.is_dir(): + try: + size = entry.stat(follow_symlinks=False).st_size + except OSError: + continue + total += size - if limit>0 and total>limit: - raise LimitExceed + if limit>0 and total>limit: + raise LimitExceed - if must_read and not check_access(entry.path): - continue + if must_read and not check_access(entry.path): + continue if path_type == "full": @@ -376,7 +378,8 @@ def dir_navigator(path): for i in range(1, len(dirs)-1): dir = dirs[i] - urls.append(urls[i-1] + urllib.parse.quote(dir, errors='surrogatepass' )+ '/' if not dir.endswith('/') else "") + # urls.append(urls[i-1] + urllib.parse.quote(dir, errors='surrogatepass' )+ '/' if not dir.endswith('/') else "") + urls.append(urls[i-1] + dir+ '/' if not dir.endswith('/') else "") names.append(dir) for i in range(len(names)): diff --git a/dev_src/_page_templates.py b/dev_src/_page_templates.py index 68b6221..1c6f53f 100644 --- a/dev_src/_page_templates.py +++ b/dev_src/_page_templates.py @@ -79,6 +79,9 @@ def admin_page_script(): def error_page_script(): return get_template("script_error_page.js") +def zip_page_script(): + return get_template("script_zip_page.js") + diff --git a/dev_src/_zipfly_manager.py b/dev_src/_zipfly_manager.py index b0c14d6..2595fc1 100644 --- a/dev_src/_zipfly_manager.py +++ b/dev_src/_zipfly_manager.py @@ -97,6 +97,21 @@ def generator(self): # arcname will be default path path[self.arcname] = path[self.filesystem] + + if os.path.isdir(path[self.filesystem]): + print(path[self.filesystem]) + if os.listdir(path[self.filesystem]): + continue # not empty + print("empty") + # Write empty directory: + z_info = zipfile.ZipInfo(path[self.arcname] + '/') + z_info.compress_type = zipfile.ZIP_STORED + + + zf.writestr(z_info, b'') + + yield stream.get(), self.ezs + continue z_info = zipfile.ZipInfo.from_file( path[self.filesystem], path[self.arcname], @@ -137,15 +152,15 @@ def __call__(self, *key): class FixSizeOrderedDict(OrderedDict, Callable_dict): - def __init__(self, *args, max=0, **kwargs): - self._max = max - super().__init__(*args, **kwargs) + def __init__(self, *args, max=0, **kwargs): + self._max = max + super().__init__(*args, **kwargs) - def __setitem__(self, key, value): - OrderedDict.__setitem__(self, key, value) - if self._max > 0: - if len(self) > self._max: - self.popitem(False) + def __setitem__(self, key, value): + OrderedDict.__setitem__(self, key, value) + if self._max > 0: + if len(self) > self._max: + self.popitem(False) class ZIP_Manager: def __init__(self, zip_allowed, size_limit=-1) -> None: @@ -193,7 +208,7 @@ def get_id(self, path, size=None): source_m_time = get_dir_m_time(path) if size is None: try: - fs = _get_tree_path_n_size(path, must_read=True, limit=self.size_limit, path_type="both") + fs = _get_tree_path_n_size(path, must_read=True, limit=self.size_limit, path_type="both", add_dirs=True) except LimitExceed as e: self.calculating.pop(path) # make sure to remove calculating flag (MAJOR BUG) raise e @@ -255,7 +270,7 @@ def err(msg): if not self.calculation_cache(zid): try: - fs = _get_tree_path_n_size(path, must_read=True, path_type="both", limit=self.size_limit) + fs = _get_tree_path_n_size(path, must_read=True, path_type="both", limit=self.size_limit, add_dirs=True) except LimitExceed as e: return err("DIRECTORY SIZE LIMIT EXCEED") source_size = sum(i[1] for i in fs) @@ -322,3 +337,22 @@ def err(msg): def archive_thread(self, path, zid, size=None): return threading.Thread(target=self.archive, args=(path, zid, size)) + + +if __name__ == "__main__": + paths = [ + { + 'fs': 'test(hahah)' + }, + ] + + zfly = ZipFly(paths = paths) + + generator = zfly.generator() + print (generator) + # + + + with open("large.zip", "wb") as f: + for i in generator: + f.write(i[0]) \ No newline at end of file diff --git a/dev_src/clone.py b/dev_src/clone.py index 74fe62f..d3d78f9 100644 --- a/dev_src/clone.py +++ b/dev_src/clone.py @@ -34,6 +34,7 @@ def get_list_dir(path): def check_exist(url, path, check_method): # print(check_method) + print("CHECK: ", url) try: header = session.head(url).headers @@ -56,7 +57,7 @@ def check_exist(url, path, check_method): if not os.path.isfile(path): return False - if check_method =="date": + if check_method.startswith("date"): tt = os.path.getmtime(path) # tt = fs.st_mtime @@ -67,7 +68,10 @@ def check_exist(url, path, check_method): # remove microseconds, like in If-Modified-Since local_last_modify = local_last_modify.replace(microsecond=0) - if local_last_modify == original_modify: + if local_last_modify == original_modify and check_method == "date": + return True + + if local_last_modify >= original_modify and check_method == "date+": return True # print("LOCAL: ", path, "==", local_last_modify) @@ -133,7 +137,7 @@ def dl(url, path, overwrite, check_method): from concurrent.futures import ThreadPoolExecutor, as_completed -executor = ThreadPoolExecutor(8) +executor = ThreadPoolExecutor(6) futures = [] @@ -144,6 +148,9 @@ def clone(url, path = "./", overwrite = False, check_exist = "date", delete_extr overwrite: overwrite existing files reguardless of checking existance (False) check_exist: check if file exists by "date" or "size" or None to ignore totally (date) """ + if url[-1] != "/": + url += "/" + Q = Queue() def get_json(url): @@ -162,12 +169,13 @@ def run_Q(url, path = "./", overwrite = False, check_exist = "date", delete_extr if path[-1] != "/": path += "/" + + os.makedirs(path, exist_ok=True) # make sure the directory exists even if it's empty json = get_json(url) if not json: return - os.makedirs(path, exist_ok=True) remote_list = [] @@ -208,7 +216,13 @@ def run_Q(url, path = "./", overwrite = False, check_exist = "date", delete_extr if __name__ == "__main__": - clone("SOURCE_DIR", "DESTINATION_DIR", False, "date", True) + clone( + url="http://192.168.0.108:6969/7%2C%2CVP%20%20424-425/", + path="./", + overwrite=False, + check_exist="date+", + delete_extras=False + ) for future in as_completed(futures): bool(future.result()) diff --git a/dev_src/html_page.html b/dev_src/html_page.html index 44161eb..0046c9a 100644 --- a/dev_src/html_page.html +++ b/dev_src/html_page.html @@ -157,6 +157,12 @@

+
+

ZIPPING FOLDER

+

Progress

+

+
+

Admin Page

@@ -209,6 +215,8 @@

This page requires JS enabled

+ + diff --git a/dev_src/html_zip_page.html b/dev_src/html_zip_page.html deleted file mode 100644 index 93675a0..0000000 --- a/dev_src/html_zip_page.html +++ /dev/null @@ -1,62 +0,0 @@ -

ZIPPING FOLDER

-

Progress

-

- - - - -

pyroBox UI v4 - I ❤️ emoji!

\ No newline at end of file diff --git a/dev_src/local_server_pyrobox.py b/dev_src/local_server_pyrobox.py index f8c2dee..62c23f7 100644 --- a/dev_src/local_server_pyrobox.py +++ b/dev_src/local_server_pyrobox.py @@ -551,6 +551,52 @@ def get_size_n_count(self: SH, *args, **kwargs): "count": count}, cookie=cookie) +@SH.on_req('HEAD', hasQ=("zip_id", "czip")) +def get_zip_id(self: SH, *args, **kwargs): + """Return ZIP ID status""" + user, cookie = Authorize_user(self) + + if not user: # guest or not will be handled in Authentication + return self.send_text(pt.login_page(), HTTPStatus.UNAUTHORIZED, cookie=cookie) + + if not user.ZIP: + return self.send_error(HTTPStatus.UNAUTHORIZED, "You are not authorized to perform this action", cookie=cookie) + + if not (user.DOWNLOAD and user.VIEW): + return self.send_error(HTTPStatus.UNAUTHORIZED, "You are not authorized to perform this action", cookie=cookie) + + if CoreConfig.disabled_func["zip"]: + return self.return_txt("ERROR: ZIP FEATURE IS UNAVAILABLE !", HTTPStatus.INTERNAL_SERVER_ERROR, cookie=cookie) + + + path = kwargs.get('path', '') + os_path = self.translate_path(path) + spathsplit = kwargs.get('spathsplit', '') + filename = spathsplit[-2] + ".zip" + + zid = None + status = False + message = '' + + try: + zid = zip_manager.get_id(os_path) + status = True + + except LimitExceed: + message = 'Directory size limit exceed' + + except Exception: + self.log_error(traceback.format_exc()) + message = 'Failed to create zip' + + return self.send_json({ + "status": status, + "message": message, + "zid": zid, + "filename": filename + }, cookie=cookie) + + @SH.on_req('HEAD', hasQ="czip") def create_zip(self: SH, *args, **kwargs): """Create ZIP task and return ID @@ -562,14 +608,17 @@ def create_zip(self: SH, *args, **kwargs): if not user: # guest or not will be handled in Authentication return self.send_text(pt.login_page(), HTTPStatus.UNAUTHORIZED, cookie=cookie) + if not user.ZIP: + return self.send_error(HTTPStatus.UNAUTHORIZED, "You are not authorized to perform this action", cookie=cookie) + + if not (user.DOWNLOAD and user.VIEW): + return self.send_error(HTTPStatus.UNAUTHORIZED, "You are not authorized to perform this action", cookie=cookie) + if CoreConfig.disabled_func["zip"]: + return self.return_txt("ERROR: ZIP FEATURE IS UNAVAILABLE !", HTTPStatus.INTERNAL_SERVER_ERROR, cookie=cookie) - path = kwargs.get('path', '') url_path = kwargs.get('url_path', '') - spathsplit = kwargs.get('spathsplit', '') - if CoreConfig.disabled_func["zip"] or (not user.ZIP): - return self.return_txt("ERROR: ZIP FEATURE IS UNAVAILABLE !", HTTPStatus.INTERNAL_SERVER_ERROR, cookie=cookie) # dir_size = get_dir_size(path, limit=6*1024*1024*1024) @@ -578,27 +627,16 @@ def create_zip(self: SH, *args, **kwargs): # return self.return_txt(HTTPStatus.OK, msg) displaypath = self.get_displaypath(url_path) - filename = spathsplit[-2] + ".zip" title = "Creating ZIP" - head = pt.directory_explorer_header().safe_substitute(PY_PAGE_TITLE=title, + data = pt.directory_explorer_header().safe_substitute(PY_PAGE_TITLE=title, PY_PUBLIC_URL=CoreConfig.address(), PY_DIR_TREE_NO_JS=dir_navigator(displaypath)) + + return self.return_txt(data, cookie=cookie) - try: - zid = zip_manager.get_id(path) - - tail = pt.zip_script().safe_substitute(PY_ZIP_ID = zid, - PY_ZIP_NAME = filename) - return self.return_txt(f"{head} {tail}", cookie=cookie) - except LimitExceed: - tail = "

Directory size is too large, please contact the host

" - return self.return_txt(f"{head} {tail}", HTTPStatus.SERVICE_UNAVAILABLE, cookie=cookie) - except Exception: - self.log_error(traceback.format_exc()) - return self.return_txt("ERROR", cookie=cookie) @SH.on_req('HEAD', hasQ="zip") def get_zip(self: SH, *args, **kwargs): @@ -609,9 +647,19 @@ def get_zip(self: SH, *args, **kwargs): if not user: # guest or not will be handled in Authentication return self.send_text(pt.login_page(), HTTPStatus.UNAUTHORIZED, cookie=cookie) + if not user.ZIP: + return self.send_error(HTTPStatus.UNAUTHORIZED, "You are not authorized to perform this action", cookie=cookie) + + if not (user.DOWNLOAD and user.VIEW): + return self.send_error(HTTPStatus.UNAUTHORIZED, "You are not authorized to perform this action", cookie=cookie) + + if CoreConfig.disabled_func["zip"]: + return self.return_txt("ERROR: ZIP FEATURE IS UNAVAILABLE !", HTTPStatus.INTERNAL_SERVER_ERROR, cookie=cookie) + path = kwargs.get('path', '') + os_path = self.translate_path(path) spathsplit = kwargs.get('spathsplit', '') query = self.query @@ -623,7 +671,7 @@ def reply(status, msg=""): "message": msg }, cookie=cookie) - if not os.path.isdir(path): + if not os.path.isdir(os_path): msg = "Folder not found. Failed to create zip" self.log_error(msg) return reply("ERROR", msg) @@ -638,7 +686,7 @@ def reply(status, msg=""): return reply("CALCULATING") if not zip_manager.zip_id_status(id): - t = zip_manager.archive_thread(path, id) + t = zip_manager.archive_thread(os_path, id) t.start() return reply("SUCCESS", "ARCHIVING") @@ -646,9 +694,9 @@ def reply(status, msg=""): if zip_manager.zip_id_status[id] == "DONE": if query("download"): - path = zip_manager.zip_ids[id] + zip_path = zip_manager.zip_ids[id] - return self.return_file(path, filename, True, cookie=cookie) + return self.return_file(zip_path, filename, True, cookie=cookie) if query("progress"): @@ -828,6 +876,11 @@ def send_error_page_script(self: SH, *args, **kwargs): """Send error page script""" return self.send_script(pt.error_page_script()) +@SH.on_req('HEAD', hasQ="zip_page_script") +def send_zip_page_script(self: SH, *args, **kwargs): + """Send zip page script""" + return self.send_script(pt.zip_page_script()) + @SH.on_req('HEAD', hasQ="login") diff --git a/dev_src/pyroDB.py b/dev_src/pyroDB.py index a814794..bc998ec 100644 --- a/dev_src/pyroDB.py +++ b/dev_src/pyroDB.py @@ -428,17 +428,17 @@ def deldb(self): # DUMMY CLASS FOR TYPING -# class PickleTable: -# pass +class PickleTable(dict): + pass -# class _PickleTCell: -# pass +class _PickleTCell: + pass -# class _PickleTRow: -# pass +class _PickleTRow(dict): + pass -# class _PickleTColumn: -# pass +class _PickleTColumn(list): + pass @@ -550,7 +550,7 @@ def column(self, name) -> list: '''Return a copy list of all values in column''' self.rescan() return self._pk.db[name].copy() - + def get_column(self, name) -> list: '''Return the list pointer to the column (unsafe)''' return self._pk.db[name] @@ -601,6 +601,9 @@ def add(name): self._pk.db[name].extend([None] * tsize) + if isinstance(names[0], Iterable) and not isinstance(names[0], str) and not isinstance(names[0], bytes) and len(names) == 1: + names = names[0] + for name in names: add(name) @@ -737,7 +740,7 @@ def check(item, is_in): yield ret(col=col, row=r) - def find_1st(self, kw, column=None , row=None, full_match=False, return_obj=True) -> Union["_PickleTCell", None]: + def find_1st(self, kw, column=None , row=None, full_match=False, return_obj=True) -> Union[_PickleTCell, None]: """ search a keyword in a cell/row/column/entire sheet and return the 1st matched cell object """ @@ -837,9 +840,26 @@ def del_row_id(self, row_id:int, AD=True): """ self.del_row(self.ids.index(row_id), AD=AD) + def clear(self, AD=True): + """ + Delete all rows + AD: auto dump + """ + self.rescan() + + for c in self.column_names: + self._pk.db[c].clear() - def _add_row(self, row:Union[dict, "_PickleTRow"], position:int="last") -> "_PickleTRow": + self.ids.clear() + self.height = 0 + + if AD: + self.auto_dump() + + + + def _add_row(self, row:Union[dict, _PickleTRow], position:int="last") -> _PickleTRow: """ # row: row must be a dict or _PickleTRow containing column names and values """ @@ -871,7 +891,7 @@ def _add_row(self, row:Union[dict, "_PickleTRow"], position:int="last") -> "_Pic return self.row_obj_by_id(row_id) - def add_row(self, row:Union[dict, "_PickleTRow"], position="last", AD=True) -> "_PickleTRow": + def add_row(self, row:Union[dict, _PickleTRow], position="last", AD=True) -> _PickleTRow: """ @ locked * row: row must be a dict|TableRow containing column names and values @@ -885,7 +905,7 @@ def add_row(self, row:Union[dict, "_PickleTRow"], position="last", AD=True) -> " return row_obj - def insert_row(self, row:Union[dict, "_PickleTRow"], position:int, AD=True) -> "_PickleTRow": + def insert_row(self, row:Union[dict, _PickleTRow], position:int, AD=True) -> _PickleTRow: """ @ locked * row: row must be a dict|_PickleTRow containing column names and values @@ -896,7 +916,7 @@ def insert_row(self, row:Union[dict, "_PickleTRow"], position:int, AD=True) -> " - def add_row_as_list(self, row:list, position:int="last", AD=True) -> "_PickleTRow": + def add_row_as_list(self, row:list, position:int="last", AD=True) -> _PickleTRow: """ @ locked * row: row must be a list containing values @@ -925,10 +945,11 @@ def dump(self): def auto_dump(self): self._pk._autodumpdb() - def to_csv(self, filename): + def to_csv(self, filename, write_header=True): with open(filename, "w", newline='', encoding='utf8') as f: writer = csv.writer(f) - writer.writerow(self.column_names) # header + if write_header: + writer.writerow(self.column_names) # header for row in self.rows(): writer.writerow([row[k] for k in self.column_names]) @@ -998,7 +1019,7 @@ def set(self, value, AD=True): def row(self): self.source_check() - return self.source.row_by_id(self.id) + return self.source.ids.index(self.id) def row_obj(self): """ @@ -1237,95 +1258,95 @@ def del_column(self): +if __name__ == "__main__": - -import string -def Lower_string(length): # define the function and pass the length as argument - # Print the string in Lowercase - result = ''.join((random.choice(string.ascii_lowercase) for x in range(length))) # run loop until the define length - return result + import string + def Lower_string(length): # define the function and pass the length as argument + # Print the string in Lowercase + result = ''.join((random.choice(string.ascii_lowercase) for x in range(length))) # run loop until the define length + return result -def test(): - st = time.perf_counter() - tb = PickleTable("__test.pdb") - tt = time.perf_counter() - print(f"load time: {tt-st}s") + def test(): + st = time.perf_counter() + tb = PickleTable("__test.pdb") + tt = time.perf_counter() + print(f"load time: {tt-st}s") - print("Existing table height:", tb.height) + print("Existing table height:", tb.height) - tb.add_column("x", exist_ok=1, AD=False) # no dumps - tb.add_column("Ysz", exist_ok=1, AD=False ) # no dumps - tb.add_column("Y", exist_ok=1, AD=False) # no dumps + tb.add_column("x", exist_ok=1, AD=False) # no dumps + tb.add_column("Ysz", exist_ok=1, AD=False ) # no dumps + tb.add_column("Y", exist_ok=1, AD=False) # no dumps - print("adding") - for n in range(int(100)): - tb._add_row({"x":n, "Y":'🍎'}) + print("adding") + for n in range(int(100)): + tb._add_row({"x":n, "Y":'🍎'}) - #print(n) + #print(n) - tb.add_column("m", exist_ok=1, AD=False) # no dumps + tb.add_column("m", exist_ok=1, AD=False) # no dumps - print(tb) - dt = time.perf_counter() - tb.dump() - tt = time.perf_counter() - print(f"dump time: {tt-dt}s") + print(tb) + dt = time.perf_counter() + tb.dump() + tt = time.perf_counter() + print(f"dump time: {tt-dt}s") - dt = time.perf_counter() - # col = tb.column_obj("x") - # for i in range(10,20,2): - # col.remove(i) + dt = time.perf_counter() + # col = tb.column_obj("x") + # for i in range(10,20,2): + # col.remove(i) - tb.find_1st(20, column="x").set(1000) - tt = time.perf_counter() - print(f"remove time: {tt-dt}s") + tb.find_1st(20, column="x").set(1000) + tt = time.perf_counter() + print(f"remove time: {tt-dt}s") - print(tb) - #print("Total cells", tb.height * len(tb.column_names)) + print(tb) + #print("Total cells", tb.height * len(tb.column_names)) - et = time.perf_counter() - print(f"Load and dump test in {et - st}s\n") + et = time.perf_counter() + print(f"Load and dump test in {et - st}s\n") - print("="*50) + print("="*50) - print("\n Assign random string in first 1,000 rows test") - print("="*50) - st = time.perf_counter() + print("\n Assign random string in first 1,000 rows test") + print("="*50) + st = time.perf_counter() - for row_ in tb.rows_obj(0, 100): - row_.update({"m": Lower_string(100)}, AD=False) + for row_ in tb.rows_obj(0, 100): + row_.update({"m": Lower_string(100)}, AD=False) - et = time.perf_counter() + et = time.perf_counter() - print(f"Assigned test in {et - st}s") - # print(tb) - dt = time.perf_counter() - tb.dump() - tt = time.perf_counter() - print(f"dump time: {tt-dt}s") + print(f"Assigned test in {et - st}s") + # print(tb) + dt = time.perf_counter() + tb.dump() + tt = time.perf_counter() + print(f"dump time: {tt-dt}s") - print("="*50) + print("="*50) - print("\n\n Search test") - st = time.perf_counter() + print("\n\n Search test") + st = time.perf_counter() - for cell in tb.search_iter(kw="abc", column="m"): - print(cell) + for cell in tb.search_iter(kw="abc", column="m"): + print(cell) - et = time.perf_counter() + et = time.perf_counter() - print(f"Search 'abc' test in {et - st}s") + print(f"Search 'abc' test in {et - st}s") - for row in tb: - print(row) + for row in tb: + print(row) - tb.to_csv("test.csv") + tb.to_csv("test.csv") -if __name__ == "__main__": +# if __name__ == "__main__": for i in range(1): try: os.remove("__test.pdb") diff --git a/dev_src/script_page_handler.js b/dev_src/script_page_handler.js index da0d75e..1dbf5ad 100644 --- a/dev_src/script_page_handler.js +++ b/dev_src/script_page_handler.js @@ -90,6 +90,8 @@ class Page{ this.handler = video_page; } else if (type == "admin") { this.handler = admin_page; + } else if (type == "zip") { + this.handler = zip_page; } if (this.handler){ @@ -153,7 +155,8 @@ class Page{ for (let i = 1; i < dirs.length - 1; i++) { const dir = dirs[i]; - urls.push(urls[i - 1] + encodeURIComponent(dir).replace(/'/g, "%27").replace(/"/g, "%22") + (dir.endsWith('/') ? '' : '/')); + // urls.push(urls[i - 1] + encodeURIComponent(dir).replace(/'/g, "%27").replace(/"/g, "%22") + (dir.endsWith('/') ? '' : '/')); + urls.push(urls[i - 1] + dir + '/'); names.push(decodeURIComponent(dir)); } diff --git a/dev_src/script_video_player.js b/dev_src/script_video_player.js index ff39693..e3cb435 100644 --- a/dev_src/script_video_player.js +++ b/dev_src/script_video_player.js @@ -58,7 +58,7 @@ class Video_Page { this.player_title.innerText = title - this.player_warning.innerText = warning + this.player_warning.innerHTML = warning this.video_dl_url.href = video page.set_title(title) @@ -98,7 +98,7 @@ class Video_Page { this.player_source.src = "" this.player_source.type = "" this.player_title.innerText = "" - this.player_warning.innerText = "" + this.player_warning.innerHTML = "" this.video_dl_url.href = "" } diff --git a/dev_src/script_zip_page.js b/dev_src/script_zip_page.js new file mode 100644 index 0000000..4c56086 --- /dev/null +++ b/dev_src/script_zip_page.js @@ -0,0 +1,127 @@ + + +// const id = "${PY_ZIP_ID}"; +// const filename = "${PY_ZIP_NAME}"; + + + +class Zip_Page { + constructor() { + this.type = "zip" + + this.my_part = document.getElementById("zip-page") + + this.message = document.getElementById("zip-prog") + this.percentage = document.getElementById("zip-perc") + } + + async initialize() { + page.hide_actions_button(); // Hide actions button, not needed here + + this.dl_now = false + this.check_prog = true + + this.prog_timer = null + + var url = tools.add_query_here("zip_id") + + var data = await fetch(url) + .then(data => {return data.json()}) + .catch(err => {console.error(err)}) + + // { + // "status": status, + // "message": message, + // "zid": zid, + // "filename": filename + // } + + var status = data.status + var message = data.message + this.zid = data.zid + this.filename = data.filename + + const that = this + + if (status) { + this.prog_timer = setInterval(function() { + that.ping(window.location.pathname + "?zip&zid=" + that.zid + "&progress")}, 500) + } else { + this.message.innerHTML = "Error"; + this.percentage.innerText = message; + } + + + } + + hide() { + this.my_part.classList.remove("active"); + } + + show() { + this.my_part.classList.add("active"); + } + + clear() { + this.message.innerHTML = "" + this.percentage.innerText = "" + this.dl_now = false + this.check_prog = true + this.zid = null + this.filename = null + if(this.prog_timer){ + clearTimeout(this.prog_timer) + this.prog_timer = null + } + } + + + ping(url) { + const that = this + var xhttp = new XMLHttpRequest(); + xhttp.onreadystatechange = function() { + if (that.dl_now) { + return + } + if (this.readyState == 4 && this.status == 200) { + // Typical action to be performed when the document is ready: + //document.getElementById("demo").innerHTML = xhttp.responseText; + // json of the response + var resp = safeJSONParse(this.response, ["status", "message"], 5000); + // console.log(resp) + + if (resp.status=="SUCCESS"){ + that.check_prog = true; + } else if (resp.status=="DONE"){ + that.message.innerHTML = "Downloading"; + that.percentage.innerText = ""; + that.dl_now = true; + clearTimeout(that.prog_timer) + that.run_dl() + } else if (resp.status=="ERROR"){ + that.message.innerHTML = "Error"; + that.percentage.innerText = resp.message; + clearTimeout(that.prog_timer) + } else if (resp.status=="PROGRESS"){ + that.percentage.innerText = resp.message + "%"; + } else { + that.percentage.innerText = resp.status + ": " + resp.message; + if(that.prog_timer){ + clearTimeout(that.prog_timer) + that.prog_timer = null + } + } + } + }; + xhttp.open("GET", url, true); + xhttp.send(); + } + + + run_dl() { + tools.download(window.location.pathname + "?zip&zid=" + this.zid + "&download", this.filename, true) + } + +} + +var zip_page = new Zip_Page(); \ No newline at end of file diff --git a/dev_src/te-st/videos/Candle fake.mkv b/dev_src/te-st/videos/Candle fake.mkv new file mode 100644 index 0000000..fd6b4b4 Binary files /dev/null and b/dev_src/te-st/videos/Candle fake.mkv differ diff --git a/src/pyroDB.py b/src/pyroDB.py index f12446e..0425ca4 100644 --- a/src/pyroDB.py +++ b/src/pyroDB.py @@ -428,17 +428,17 @@ def deldb(self): # DUMMY CLASS FOR TYPING -# class PickleTable: -# pass +class PickleTable(dict): + pass -# class _PickleTCell: -# pass +class _PickleTCell: + pass -# class _PickleTRow: -# pass +class _PickleTRow(dict): + pass -# class _PickleTColumn: -# pass +class _PickleTColumn(list): + pass @@ -550,7 +550,7 @@ def column(self, name) -> list: '''Return a copy list of all values in column''' self.rescan() return self._pk.db[name].copy() - + def get_column(self, name) -> list: '''Return the list pointer to the column (unsafe)''' return self._pk.db[name] @@ -737,7 +737,7 @@ def check(item, is_in): yield ret(col=col, row=r) - def find_1st(self, kw, column=None , row=None, full_match=False, return_obj=True) -> Union["_PickleTCell", None]: + def find_1st(self, kw, column=None , row=None, full_match=False, return_obj=True) -> Union[_PickleTCell, None]: """ search a keyword in a cell/row/column/entire sheet and return the 1st matched cell object """ @@ -837,9 +837,26 @@ def del_row_id(self, row_id:int, AD=True): """ self.del_row(self.ids.index(row_id), AD=AD) + def clear(self, AD=True): + """ + Delete all rows + + AD: auto dump + """ + self.rescan() + + for c in self.column_names: + self._pk.db[c].clear() + + self.ids.clear() + self.height = 0 + + if AD: + self.auto_dump() - def _add_row(self, row:Union[dict, "_PickleTRow"], position:int="last") -> "_PickleTRow": + + def _add_row(self, row:Union[dict, _PickleTRow], position:int="last") -> _PickleTRow: """ # row: row must be a dict or _PickleTRow containing column names and values """ @@ -871,7 +888,7 @@ def _add_row(self, row:Union[dict, "_PickleTRow"], position:int="last") -> "_Pic return self.row_obj_by_id(row_id) - def add_row(self, row:Union[dict, "_PickleTRow"], position="last", AD=True) -> "_PickleTRow": + def add_row(self, row:Union[dict, _PickleTRow], position="last", AD=True) -> _PickleTRow: """ @ locked * row: row must be a dict|TableRow containing column names and values @@ -885,7 +902,7 @@ def add_row(self, row:Union[dict, "_PickleTRow"], position="last", AD=True) -> " return row_obj - def insert_row(self, row:Union[dict, "_PickleTRow"], position:int, AD=True) -> "_PickleTRow": + def insert_row(self, row:Union[dict, _PickleTRow], position:int, AD=True) -> _PickleTRow: """ @ locked * row: row must be a dict|_PickleTRow containing column names and values @@ -896,7 +913,7 @@ def insert_row(self, row:Union[dict, "_PickleTRow"], position:int, AD=True) -> " - def add_row_as_list(self, row:list, position:int="last", AD=True) -> "_PickleTRow": + def add_row_as_list(self, row:list, position:int="last", AD=True) -> _PickleTRow: """ @ locked * row: row must be a list containing values @@ -998,7 +1015,7 @@ def set(self, value, AD=True): def row(self): self.source_check() - return self.source.row_by_id(self.id) + return self.source.ids.index(self.id) def row_obj(self): """ @@ -1237,95 +1254,95 @@ def del_column(self): +if __name__ == "__main__": - -import string -def Lower_string(length): # define the function and pass the length as argument - # Print the string in Lowercase - result = ''.join((random.choice(string.ascii_lowercase) for x in range(length))) # run loop until the define length - return result + import string + def Lower_string(length): # define the function and pass the length as argument + # Print the string in Lowercase + result = ''.join((random.choice(string.ascii_lowercase) for x in range(length))) # run loop until the define length + return result -def test(): - st = time.perf_counter() - tb = PickleTable("__test.pdb") - tt = time.perf_counter() - print(f"load time: {tt-st}s") + def test(): + st = time.perf_counter() + tb = PickleTable("__test.pdb") + tt = time.perf_counter() + print(f"load time: {tt-st}s") - print("Existing table height:", tb.height) + print("Existing table height:", tb.height) - tb.add_column("x", exist_ok=1, AD=False) # no dumps - tb.add_column("Ysz", exist_ok=1, AD=False ) # no dumps - tb.add_column("Y", exist_ok=1, AD=False) # no dumps + tb.add_column("x", exist_ok=1, AD=False) # no dumps + tb.add_column("Ysz", exist_ok=1, AD=False ) # no dumps + tb.add_column("Y", exist_ok=1, AD=False) # no dumps - print("adding") - for n in range(int(100)): - tb._add_row({"x":n, "Y":'🍎'}) + print("adding") + for n in range(int(100)): + tb._add_row({"x":n, "Y":'🍎'}) - #print(n) + #print(n) - tb.add_column("m", exist_ok=1, AD=False) # no dumps + tb.add_column("m", exist_ok=1, AD=False) # no dumps - print(tb) - dt = time.perf_counter() - tb.dump() - tt = time.perf_counter() - print(f"dump time: {tt-dt}s") + print(tb) + dt = time.perf_counter() + tb.dump() + tt = time.perf_counter() + print(f"dump time: {tt-dt}s") - dt = time.perf_counter() - # col = tb.column_obj("x") - # for i in range(10,20,2): - # col.remove(i) + dt = time.perf_counter() + # col = tb.column_obj("x") + # for i in range(10,20,2): + # col.remove(i) - tb.find_1st(20, column="x").set(1000) - tt = time.perf_counter() - print(f"remove time: {tt-dt}s") + tb.find_1st(20, column="x").set(1000) + tt = time.perf_counter() + print(f"remove time: {tt-dt}s") - print(tb) - #print("Total cells", tb.height * len(tb.column_names)) + print(tb) + #print("Total cells", tb.height * len(tb.column_names)) - et = time.perf_counter() - print(f"Load and dump test in {et - st}s\n") + et = time.perf_counter() + print(f"Load and dump test in {et - st}s\n") - print("="*50) + print("="*50) - print("\n Assign random string in first 1,000 rows test") - print("="*50) - st = time.perf_counter() + print("\n Assign random string in first 1,000 rows test") + print("="*50) + st = time.perf_counter() - for row_ in tb.rows_obj(0, 100): - row_.update({"m": Lower_string(100)}, AD=False) + for row_ in tb.rows_obj(0, 100): + row_.update({"m": Lower_string(100)}, AD=False) - et = time.perf_counter() + et = time.perf_counter() - print(f"Assigned test in {et - st}s") - # print(tb) - dt = time.perf_counter() - tb.dump() - tt = time.perf_counter() - print(f"dump time: {tt-dt}s") + print(f"Assigned test in {et - st}s") + # print(tb) + dt = time.perf_counter() + tb.dump() + tt = time.perf_counter() + print(f"dump time: {tt-dt}s") - print("="*50) + print("="*50) - print("\n\n Search test") - st = time.perf_counter() + print("\n\n Search test") + st = time.perf_counter() - for cell in tb.search_iter(kw="abc", column="m"): - print(cell) + for cell in tb.search_iter(kw="abc", column="m"): + print(cell) - et = time.perf_counter() + et = time.perf_counter() - print(f"Search 'abc' test in {et - st}s") + print(f"Search 'abc' test in {et - st}s") - for row in tb: - print(row) + for row in tb: + print(row) - tb.to_csv("test.csv") + tb.to_csv("test.csv") -if __name__ == "__main__": +# if __name__ == "__main__": for i in range(1): try: os.remove("__test.pdb")