comparison sat_frontends/jp/common.py @ 4037:524856bd7b19

massive refactoring to switch from camelCase to snake_case: historically, Libervia (SàT before) was using camelCase as allowed by PEP8 when using a pre-PEP8 code, to use the same coding style as in Twisted. However, snake_case is more readable and it's better to follow PEP8 best practices, so it has been decided to move on full snake_case. Because Libervia has a huge codebase, this ended with a ugly mix of camelCase and snake_case. To fix that, this patch does a big refactoring by renaming every function and method (including bridge) that are not coming from Twisted or Wokkel, to use fully snake_case. This is a massive change, and may result in some bugs.
author Goffi <goffi@goffi.org>
date Sat, 08 Apr 2023 13:54:42 +0200
parents 742e466fa000
children 4b842c1fb686
comparison
equal deleted inserted replaced
4036:c4464d7ae97b 4037:524856bd7b19
63 return time.strftime(fmt, time.localtime(float(timestamp))) 63 return time.strftime(fmt, time.localtime(float(timestamp)))
64 64
65 65
66 def ansi_ljust(s, width): 66 def ansi_ljust(s, width):
67 """ljust method handling ANSI escape codes""" 67 """ljust method handling ANSI escape codes"""
68 cleaned = regex.ansiRemove(s) 68 cleaned = regex.ansi_remove(s)
69 return s + " " * (width - len(cleaned)) 69 return s + " " * (width - len(cleaned))
70 70
71 71
72 def ansi_center(s, width): 72 def ansi_center(s, width):
73 """ljust method handling ANSI escape codes""" 73 """ljust method handling ANSI escape codes"""
74 cleaned = regex.ansiRemove(s) 74 cleaned = regex.ansi_remove(s)
75 diff = width - len(cleaned) 75 diff = width - len(cleaned)
76 half = diff / 2 76 half = diff / 2
77 return half * " " + s + (half + diff % 2) * " " 77 return half * " " + s + (half + diff % 2) * " "
78 78
79 79
80 def ansi_rjust(s, width): 80 def ansi_rjust(s, width):
81 """ljust method handling ANSI escape codes""" 81 """ljust method handling ANSI escape codes"""
82 cleaned = regex.ansiRemove(s) 82 cleaned = regex.ansi_remove(s)
83 return " " * (width - len(cleaned)) + s 83 return " " * (width - len(cleaned)) + s
84 84
85 85
86 def getTmpDir(sat_conf, cat_dir, sub_dir=None): 86 def get_tmp_dir(sat_conf, cat_dir, sub_dir=None):
87 """Return directory used to store temporary files 87 """Return directory used to store temporary files
88 88
89 @param sat_conf(ConfigParser.ConfigParser): instance opened on sat configuration 89 @param sat_conf(ConfigParser.ConfigParser): instance opened on sat configuration
90 @param cat_dir(str): directory of the category (e.g. "blog") 90 @param cat_dir(str): directory of the category (e.g. "blog")
91 @param sub_dir(str): sub directory where data need to be put 91 @param sub_dir(str): sub directory where data need to be put
92 profile can be used here, or special directory name 92 profile can be used here, or special directory name
93 sub_dir will be escaped to be usable in path (use regex.pathUnescape to find 93 sub_dir will be escaped to be usable in path (use regex.path_unescape to find
94 initial str) 94 initial str)
95 @return (Path): path to the dir 95 @return (Path): path to the dir
96 """ 96 """
97 local_dir = config.getConfig(sat_conf, "", "local_dir", Exception) 97 local_dir = config.config_get(sat_conf, "", "local_dir", Exception)
98 path_elts = [local_dir, cat_dir] 98 path_elts = [local_dir, cat_dir]
99 if sub_dir is not None: 99 if sub_dir is not None:
100 path_elts.append(regex.pathEscape(sub_dir)) 100 path_elts.append(regex.path_escape(sub_dir))
101 return Path(*path_elts) 101 return Path(*path_elts)
102 102
103 103
104 def parse_args(host, cmd_line, **format_kw): 104 def parse_args(host, cmd_line, **format_kw):
105 """Parse command arguments 105 """Parse command arguments
139 """ 139 """
140 self.host = host 140 self.host = host
141 self.cat_dir = cat_dir 141 self.cat_dir = cat_dir
142 self.use_metadata = use_metadata 142 self.use_metadata = use_metadata
143 143
144 def secureUnlink(self, path): 144 def secure_unlink(self, path):
145 """Unlink given path after keeping it for a while 145 """Unlink given path after keeping it for a while
146 146
147 This method is used to prevent accidental deletion of a draft 147 This method is used to prevent accidental deletion of a draft
148 If there are more file in SECURE_UNLINK_DIR than SECURE_UNLINK_MAX, 148 If there are more file in SECURE_UNLINK_DIR than SECURE_UNLINK_MAX,
149 older file are deleted 149 older file are deleted
150 @param path(Path, str): file to unlink 150 @param path(Path, str): file to unlink
151 """ 151 """
152 path = Path(path).resolve() 152 path = Path(path).resolve()
153 if not path.is_file: 153 if not path.is_file:
154 raise OSError("path must link to a regular file") 154 raise OSError("path must link to a regular file")
155 if path.parent != getTmpDir(self.sat_conf, self.cat_dir): 155 if path.parent != get_tmp_dir(self.sat_conf, self.cat_dir):
156 self.disp( 156 self.disp(
157 f"File {path} is not in SàT temporary hierarchy, we do not remove " f"it", 157 f"File {path} is not in SàT temporary hierarchy, we do not remove " f"it",
158 2, 158 2,
159 ) 159 )
160 return 160 return
161 # we have 2 files per draft with use_metadata, so we double max 161 # we have 2 files per draft with use_metadata, so we double max
162 unlink_max = SECURE_UNLINK_MAX * 2 if self.use_metadata else SECURE_UNLINK_MAX 162 unlink_max = SECURE_UNLINK_MAX * 2 if self.use_metadata else SECURE_UNLINK_MAX
163 backup_dir = getTmpDir(self.sat_conf, self.cat_dir, SECURE_UNLINK_DIR) 163 backup_dir = get_tmp_dir(self.sat_conf, self.cat_dir, SECURE_UNLINK_DIR)
164 if not os.path.exists(backup_dir): 164 if not os.path.exists(backup_dir):
165 os.makedirs(backup_dir) 165 os.makedirs(backup_dir)
166 filename = os.path.basename(path) 166 filename = os.path.basename(path)
167 backup_path = os.path.join(backup_dir, filename) 167 backup_path = os.path.join(backup_dir, filename)
168 # we move file to backup dir 168 # we move file to backup dir
177 backup_files.sort(key=lambda path: os.stat(path).st_mtime) 177 backup_files.sort(key=lambda path: os.stat(path).st_mtime)
178 for path in backup_files[: len(backup_files) - unlink_max]: 178 for path in backup_files[: len(backup_files) - unlink_max]:
179 self.host.disp("Purging backup file {}".format(path), 2) 179 self.host.disp("Purging backup file {}".format(path), 2)
180 os.unlink(path) 180 os.unlink(path)
181 181
182 async def runEditor( 182 async def run_editor(
183 self, 183 self,
184 editor_args_opt, 184 editor_args_opt,
185 content_file_path, 185 content_file_path,
186 content_file_obj, 186 content_file_obj,
187 meta_file_path=None, 187 meta_file_path=None,
208 content_file_obj.seek(0) 208 content_file_obj.seek(0)
209 tmp_ori_hash = hashlib.sha1(content_file_obj.read()).digest() 209 tmp_ori_hash = hashlib.sha1(content_file_obj.read()).digest()
210 content_file_obj.close() 210 content_file_obj.close()
211 211
212 # we prepare arguments 212 # we prepare arguments
213 editor = config.getConfig(self.sat_conf, C.CONFIG_SECTION, "editor") or os.getenv( 213 editor = config.config_get(self.sat_conf, C.CONFIG_SECTION, "editor") or os.getenv(
214 "EDITOR", "vi" 214 "EDITOR", "vi"
215 ) 215 )
216 try: 216 try:
217 # is there custom arguments in sat.conf ? 217 # is there custom arguments in sat.conf ?
218 editor_args = config.getConfig( 218 editor_args = config.config_get(
219 self.sat_conf, C.CONFIG_SECTION, editor_args_opt, Exception 219 self.sat_conf, C.CONFIG_SECTION, editor_args_opt, Exception
220 ) 220 )
221 except (NoOptionError, NoSectionError): 221 except (NoOptionError, NoSectionError):
222 # no, we check if we know the editor and have special arguments 222 # no, we check if we know the editor and have special arguments
223 if self.use_metadata: 223 if self.use_metadata:
289 ) 289 )
290 self.host.quit() 290 self.host.quit()
291 291
292 if len(content) == 0: 292 if len(content) == 0:
293 self.disp("Content is empty, cancelling the edition") 293 self.disp("Content is empty, cancelling the edition")
294 if content_file_path.parent != getTmpDir(self.sat_conf, self.cat_dir): 294 if content_file_path.parent != get_tmp_dir(self.sat_conf, self.cat_dir):
295 self.disp( 295 self.disp(
296 "File are not in SàT temporary hierarchy, we do not remove them", 296 "File are not in SàT temporary hierarchy, we do not remove them",
297 2, 297 2,
298 ) 298 )
299 self.host.quit() 299 self.host.quit()
333 f"kept at {content_file_path}: {e}", 333 f"kept at {content_file_path}: {e}",
334 error=True, 334 error=True,
335 ) 335 )
336 self.host.quit(1) 336 self.host.quit(1)
337 337
338 self.secureUnlink(content_file_path) 338 self.secure_unlink(content_file_path)
339 if self.use_metadata: 339 if self.use_metadata:
340 self.secureUnlink(meta_file_path) 340 self.secure_unlink(meta_file_path)
341 341
342 async def publish(self, content): 342 async def publish(self, content):
343 # if metadata is needed, publish will be called with it last argument 343 # if metadata is needed, publish will be called with it last argument
344 raise NotImplementedError 344 raise NotImplementedError
345 345
346 def getTmpFile(self): 346 def get_tmp_file(self):
347 """Create a temporary file 347 """Create a temporary file
348 348
349 @return (tuple(file, Path)): opened (w+b) file object and file path 349 @return (tuple(file, Path)): opened (w+b) file object and file path
350 """ 350 """
351 suff = "." + self.getTmpSuff() 351 suff = "." + self.get_tmp_suff()
352 cat_dir_str = self.cat_dir 352 cat_dir_str = self.cat_dir
353 tmp_dir = getTmpDir(self.sat_conf, self.cat_dir, self.profile) 353 tmp_dir = get_tmp_dir(self.sat_conf, self.cat_dir, self.profile)
354 if not tmp_dir.exists(): 354 if not tmp_dir.exists():
355 try: 355 try:
356 tmp_dir.mkdir(parents=True) 356 tmp_dir.mkdir(parents=True)
357 except OSError as e: 357 except OSError as e:
358 self.disp( 358 self.disp(
370 return os.fdopen(fd, "w+b"), Path(path) 370 return os.fdopen(fd, "w+b"), Path(path)
371 except OSError as e: 371 except OSError as e:
372 self.disp(f"Can't create temporary file: {e}", error=True) 372 self.disp(f"Can't create temporary file: {e}", error=True)
373 self.host.quit(1) 373 self.host.quit(1)
374 374
375 def getCurrentFile(self, profile): 375 def get_current_file(self, profile):
376 """Get most recently edited file 376 """Get most recently edited file
377 377
378 @param profile(unicode): profile linked to the draft 378 @param profile(unicode): profile linked to the draft
379 @return(Path): full path of current file 379 @return(Path): full path of current file
380 """ 380 """
381 # we guess the item currently edited by choosing 381 # we guess the item currently edited by choosing
382 # the most recent file corresponding to temp file pattern 382 # the most recent file corresponding to temp file pattern
383 # in tmp_dir, excluding metadata files 383 # in tmp_dir, excluding metadata files
384 tmp_dir = getTmpDir(self.sat_conf, self.cat_dir, profile) 384 tmp_dir = get_tmp_dir(self.sat_conf, self.cat_dir, profile)
385 available = [ 385 available = [
386 p 386 p
387 for p in tmp_dir.glob(f"{self.cat_dir}_*") 387 for p in tmp_dir.glob(f"{self.cat_dir}_*")
388 if not p.match(f"*{METADATA_SUFF}") 388 if not p.match(f"*{METADATA_SUFF}")
389 ] 389 ]
393 error=True, 393 error=True,
394 ) 394 )
395 self.host.quit(1) 395 self.host.quit(1)
396 return max(available, key=lambda p: p.stat().st_mtime) 396 return max(available, key=lambda p: p.stat().st_mtime)
397 397
398 async def getItemData(self, service, node, item): 398 async def get_item_data(self, service, node, item):
399 """return formatted content, metadata (or not if use_metadata is false), and item id""" 399 """return formatted content, metadata (or not if use_metadata is false), and item id"""
400 raise NotImplementedError 400 raise NotImplementedError
401 401
402 def getTmpSuff(self): 402 def get_tmp_suff(self):
403 """return suffix used for content file""" 403 """return suffix used for content file"""
404 return "xml" 404 return "xml"
405 405
406 async def getItemPath(self): 406 async def get_item_path(self):
407 """Retrieve item path (i.e. service and node) from item argument 407 """Retrieve item path (i.e. service and node) from item argument
408 408
409 This method is obviously only useful for edition of PubSub based features 409 This method is obviously only useful for edition of PubSub based features
410 """ 410 """
411 service = self.args.service 411 service = self.args.service
413 item = self.args.item 413 item = self.args.item
414 last_item = self.args.last_item 414 last_item = self.args.last_item
415 415
416 if self.args.current: 416 if self.args.current:
417 # user wants to continue current draft 417 # user wants to continue current draft
418 content_file_path = self.getCurrentFile(self.profile) 418 content_file_path = self.get_current_file(self.profile)
419 self.disp("Continuing edition of current draft", 2) 419 self.disp("Continuing edition of current draft", 2)
420 content_file_obj = content_file_path.open("r+b") 420 content_file_obj = content_file_path.open("r+b")
421 # we seek at the end of file in case of an item already exist 421 # we seek at the end of file in case of an item already exist
422 # this will write content of the existing item at the end of the draft. 422 # this will write content of the existing item at the end of the draft.
423 # This way no data should be lost. 423 # This way no data should be lost.
428 content_file_obj = content_file_path.open("r+b") 428 content_file_obj = content_file_path.open("r+b")
429 # we seek at the end for the same reason as above 429 # we seek at the end for the same reason as above
430 content_file_obj.seek(0, os.SEEK_END) 430 content_file_obj.seek(0, os.SEEK_END)
431 else: 431 else:
432 # we need a temporary file 432 # we need a temporary file
433 content_file_obj, content_file_path = self.getTmpFile() 433 content_file_obj, content_file_path = self.get_tmp_file()
434 434
435 if item or last_item: 435 if item or last_item:
436 self.disp("Editing requested published item", 2) 436 self.disp("Editing requested published item", 2)
437 try: 437 try:
438 if self.use_metadata: 438 if self.use_metadata:
439 content, metadata, item = await self.getItemData(service, node, item) 439 content, metadata, item = await self.get_item_data(service, node, item)
440 else: 440 else:
441 content, item = await self.getItemData(service, node, item) 441 content, item = await self.get_item_data(service, node, item)
442 except Exception as e: 442 except Exception as e:
443 # FIXME: ugly but we have not good may to check errors in bridge 443 # FIXME: ugly but we have not good may to check errors in bridge
444 if "item-not-found" in str(e): 444 if "item-not-found" in str(e):
445 #  item doesn't exist, we create a new one with requested id 445 #  item doesn't exist, we create a new one with requested id
446 metadata = None 446 metadata = None
527 col_value = filter_(value, row_cls(*row_data_list)) 527 col_value = filter_(value, row_cls(*row_data_list))
528 except TypeError: 528 except TypeError:
529 col_value = filter_(value) 529 col_value = filter_(value)
530 # we count size without ANSI code as they will change length of the 530 # we count size without ANSI code as they will change length of the
531 # string when it's mostly style/color changes. 531 # string when it's mostly style/color changes.
532 col_size = len(regex.ansiRemove(col_value)) 532 col_size = len(regex.ansi_remove(col_value))
533 else: 533 else:
534 col_value = str(value) 534 col_value = str(value)
535 col_size = len(col_value) 535 col_size = len(col_value)
536 new_row.append(col_value) 536 new_row.append(col_value)
537 if size is None: 537 if size is None:
556 if self._buffer is None: 556 if self._buffer is None:
557 raise exceptions.InternalError("buffer must be used to get a string") 557 raise exceptions.InternalError("buffer must be used to get a string")
558 return "\n".join(self._buffer) 558 return "\n".join(self._buffer)
559 559
560 @staticmethod 560 @staticmethod
561 def readDictValues(data, keys, defaults=None): 561 def read_dict_values(data, keys, defaults=None):
562 if defaults is None: 562 if defaults is None:
563 defaults = {} 563 defaults = {}
564 for key in keys: 564 for key in keys:
565 try: 565 try:
566 yield data[key] 566 yield data[key]
570 yield default 570 yield default
571 else: 571 else:
572 raise e 572 raise e
573 573
574 @classmethod 574 @classmethod
575 def fromListDict( 575 def from_list_dict(
576 cls, host, data, keys=None, headers=None, filters=None, defaults=None 576 cls, host, data, keys=None, headers=None, filters=None, defaults=None
577 ): 577 ):
578 """Create a table from a list of dictionaries 578 """Create a table from a list of dictionaries
579 579
580 each dictionary is a row of the table, keys being columns names. 580 each dictionary is a row of the table, keys being columns names.
598 headers = keys 598 headers = keys
599 if filters is None: 599 if filters is None:
600 filters = {} 600 filters = {}
601 filters = [filters.get(k) for k in keys] 601 filters = [filters.get(k) for k in keys]
602 return cls( 602 return cls(
603 host, (cls.readDictValues(d, keys, defaults) for d in data), headers, filters 603 host, (cls.read_dict_values(d, keys, defaults) for d in data), headers, filters
604 ) 604 )
605 605
606 def _headers(self, head_sep, headers, sizes, alignment="left", style=None): 606 def _headers(self, head_sep, headers, sizes, alignment="left", style=None):
607 """Render headers 607 """Render headers
608 608
677 None to use self.host.disp 677 None to use self.host.disp
678 """ 678 """
679 if not self.sizes: 679 if not self.sizes:
680 # the table is empty 680 # the table is empty
681 return 681 return
682 col_sep_size = len(regex.ansiRemove(col_sep)) 682 col_sep_size = len(regex.ansi_remove(col_sep))
683 683
684 # if we have columns to hide, we remove them from headers and size 684 # if we have columns to hide, we remove them from headers and size
685 if not hide_cols: 685 if not hide_cols:
686 headers = self.headers 686 headers = self.headers
687 sizes = self.sizes 687 sizes = self.sizes
782 return 782 return
783 783
784 host = command.host 784 host = command.host
785 785
786 try: 786 try:
787 uris_data = await host.bridge.URIFind(path, [key]) 787 uris_data = await host.bridge.uri_find(path, [key])
788 except Exception as e: 788 except Exception as e:
789 host.disp(f"can't find {key} URI: {e}", error=True) 789 host.disp(f"can't find {key} URI: {e}", error=True)
790 host.quit(C.EXIT_BRIDGE_ERRBACK) 790 host.quit(C.EXIT_BRIDGE_ERRBACK)
791 791
792 try: 792 try:
822 if values is None: 822 if values is None:
823 values = [] 823 values = []
824 values.extend(json.loads(new_values_json)) 824 values.extend(json.loads(new_values_json))
825 setattr(args, dest, values) 825 setattr(args, dest, values)
826 826
827 parsed_uri = xmpp_uri.parseXMPPUri(uri) 827 parsed_uri = xmpp_uri.parse_xmpp_uri(uri)
828 try: 828 try:
829 args.service = parsed_uri["path"] 829 args.service = parsed_uri["path"]
830 args.node = parsed_uri["node"] 830 args.node = parsed_uri["node"]
831 except KeyError: 831 except KeyError:
832 host.disp(_("Invalid URI found: {uri}").format(uri=uri), error=True) 832 host.disp(_("Invalid URI found: {uri}").format(uri=uri), error=True)