diff -r 6ac3667706be doc/developers.txt --- a/doc/developers.txt Tue Apr 26 17:22:33 2022 -0400 +++ b/doc/developers.txt Thu Apr 28 16:39:56 2022 +0200 @@ -290,6 +290,22 @@ fine) % fine" /> +Detectors and extensions +^^^^^^^^^^^^^^^^^^^^^^^^ + +The correct ``i18n`` objects gets automatically injected in the hyperdb. +In a detector you can access the i18n object and do translation like +this:: + + def statusfail(db, cl, nodeid, newvalues): + _ = db.i18n.gettext + raise ValueError(_("this does not work")) + + def init(db): + # fire before changes are made + db.status.audit('create', statusfail) + + Extracting Translatable Messages ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -314,7 +330,8 @@ roundup-gettext -extracting translatable messages from tracker's html templates. +extracting translatable messages from tracker's html templates and +detectors / extensions (assuming `polib`_ is installed) This utility creates message template file ``messages.pot`` in ``locale`` subdirectory of the tracker home directory. Translated messages may be put in *locale*.po files (where *locale* is selected @@ -408,6 +425,7 @@ .. _po filetype plugin: https://vim.sourceforge.io/scripts/script.php?script_id=695 .. _PO utilities: https://github.com/pinard/po-utils +.. _polib: https://polib.readthedocs.io .. _poEdit: https://poedit.net/ .. _Roundup Source: .. _Roundup source distribution: diff -r 6ac3667706be roundup/admin.py --- a/roundup/admin.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/admin.py Thu Apr 28 16:39:56 2022 +0200 @@ -30,7 +30,7 @@ import roundup.instance from roundup.configuration import (CoreConfig, NoConfigError, ParsingOptionError, UserConfig) -from roundup.i18n import _ +from roundup.i18n import _, get_translation from roundup.exceptions import UsageError from roundup.anypy.my_input import my_input from roundup.anypy.strings import repr_export @@ -1719,6 +1719,10 @@ # only open the database once! if not self.db: self.db = tracker.open(self.name) + # dont use tracker.config["TRACKER_LANGUAGE"] here as the cli operator + # likely wants to have i18n as set in the environment + # this is needed to fetch the locale's of the tracker's home dir. + self.db.i18n = get_translation (tracker_home = tracker.tracker_home) self.db.tx_Source = 'cli' diff -r 6ac3667706be roundup/backends/back_anydbm.py --- a/roundup/backends/back_anydbm.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/backends/back_anydbm.py Thu Apr 28 16:39:56 2022 +0200 @@ -90,6 +90,7 @@ disabled. """ FileStorage.__init__(self, config.UMASK) + roundupdb.Database.__init__(self) self.config, self.journaltag = config, journaltag self.dir = config.DATABASE self.classes = {} diff -r 6ac3667706be roundup/backends/rdbms_common.py --- a/roundup/backends/rdbms_common.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/backends/rdbms_common.py Thu Apr 28 16:39:56 2022 +0200 @@ -170,6 +170,7 @@ """ Open the database and load the schema from it. """ FileStorage.__init__(self, config.UMASK) + roundupdb.Database.__init__(self) self.config, self.journaltag = config, journaltag self.dir = config.DATABASE self.classes = {} diff -r 6ac3667706be roundup/cgi/PageTemplates/TALES.py --- a/roundup/cgi/PageTemplates/TALES.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/cgi/PageTemplates/TALES.py Thu Apr 28 16:39:56 2022 +0200 @@ -257,16 +257,6 @@ def setPosition(self, position): self.position = position - def translate(self, domain, msgid, mapping=None, - context=None, target_language=None, default=None): - if context is None: - context = self.contexts.get('here') - return getGlobalTranslationService().translate( - domain, msgid, mapping=mapping, - context=context, - default=default, - target_language=target_language) - class TALESTracebackSupplement: """Implementation of ITracebackSupplement""" def __init__(self, context, expression): diff -r 6ac3667706be roundup/cgi/client.py --- a/roundup/cgi/client.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/cgi/client.py Thu Apr 28 16:39:56 2022 +0200 @@ -387,7 +387,11 @@ self.instance = instance self.request = request self.env = env - self.setTranslator(translator) + if translator is not None : + self.setTranslator(translator) + else : + self.setTranslator(TranslationService.NullTranslationService()) + self.mailer = Mailer(instance.config) # If True the form contents wins over the database contents when # rendering html properties. This is set when an error occurs so @@ -540,11 +544,16 @@ # Set the charset and language, since other parts of # Roundup may depend upon that. self.determine_charset() - self.determine_language() + if self.instance.config["WEB_TRANSLATE_XMLRPC"] : + self.determine_language() # Open the database as the correct user. try: self.determine_user() self.db.tx_Source = "xmlrpc" + if hasattr (self, "translator") : + self.db.i18n = self.translator + else : + self.setTranslator(self.db.i18n) except LoginError as msg: output = xmlrpc_.client.dumps( xmlrpc_.client.Fault(401, "%s" % msg), @@ -594,12 +603,17 @@ def handle_rest(self): # Set the charset and language self.determine_charset() - self.determine_language() + if self.instance.config["WEB_TRANSLATE_REST"] : + self.determine_language() # Open the database as the correct user. # TODO: add everything to RestfulDispatcher try: self.determine_user() self.db.tx_Source = "rest" + if hasattr (self, "translator") : + self.db.i18n = self.translator + else : + self.setTranslator(self.db.i18n) except LoginError as err: self.response_code = http_.client.UNAUTHORIZED output = s2b("Invalid Login - %s"%str(err)) @@ -622,7 +636,7 @@ # Call csrf with xmlrpc checks enabled. # It will return True if everything is ok, # raises exception on check failure. - csrf_ok = self.handle_csrf(xmlrpc=True) + csrf_ok = self.handle_csrf(xmlrpc=True) except (Unauthorised, UsageError) as msg: # report exception back to server exc_type, exc_value, exc_tb = sys.exc_info() @@ -699,7 +713,6 @@ self._error_message = [] try: self.determine_charset() - self.determine_language() try: # make sure we're identified (even anonymously) @@ -708,6 +721,9 @@ # figure out the context and desired content template self.determine_context() + self.determine_language() + self.db.i18n = self.translator + # if we've made it this far the context is to a bit of # Roundup's real web interface (not a file being served up) # so do the Anonymous Web Acess check now @@ -764,6 +780,9 @@ # exception or a NotModified exception. Those # exceptions will be handled by the outermost set of # exception handlers. + self.determine_language() + self.db.i18n = self.translator + self.serve_file(designator) except SendStaticFile as file: self.serve_static_file(str(file)) @@ -985,11 +1004,18 @@ else: language = "" + if not language : + # default to tracker language + language = self.instance.config["TRACKER_LANGUAGE"] + + # this maybe is not correct, as get_translation could not + # find desired locale and switch back to "en" but we set + # self.language to the desired language ! self.language = language - if language: - self.setTranslator(TranslationService.get_translation( - language, - tracker_home=self.instance.config["TRACKER_HOME"])) + + self.setTranslator(TranslationService.get_translation( + language, + tracker_home=self.instance.config["TRACKER_HOME"])) def authenticate_bearer_token(self, challenge): ''' authenticate the bearer token. Refactored from determine_user() diff -r 6ac3667706be roundup/cgi/engine_zopetal.py --- a/roundup/cgi/engine_zopetal.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/cgi/engine_zopetal.py Thu Apr 28 16:39:56 2022 +0200 @@ -8,13 +8,11 @@ import os import os.path -from roundup.cgi.templating import StringIO, context, translationService, TALLoaderBase +from roundup.cgi.templating import StringIO, context, TALLoaderBase from roundup.cgi.PageTemplates import PageTemplate, GlobalTranslationService from roundup.cgi.PageTemplates.Expressions import getEngine from roundup.cgi.TAL import TALInterpreter -GlobalTranslationService.setGlobalTranslationService(translationService) - class Loader(TALLoaderBase): templates = {} diff -r 6ac3667706be roundup/cgi/form_parser.py --- a/roundup/cgi/form_parser.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/cgi/form_parser.py Thu Apr 28 16:39:56 2022 +0200 @@ -1,7 +1,7 @@ import re, mimetypes from roundup import hyperdb, date, password -from roundup.cgi import templating +from roundup.cgi import templating, TranslationService from roundup.cgi.exceptions import FormError @@ -38,7 +38,7 @@ self._ = self.gettext = client.gettext self.ngettext = client.ngettext except AttributeError: - _translator = templating.translationService + _translator = TranslationService.get_translation() self._ = self.gettext = _translator.gettext self.ngettext = _translator.ngettext diff -r 6ac3667706be roundup/cgi/templating.py --- a/roundup/cgi/templating.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/cgi/templating.py Thu Apr 28 16:39:56 2022 +0200 @@ -197,13 +197,7 @@ markdown = _import_markdown2() or _import_markdown() or _import_mistune() # bring in the templating support -from roundup.cgi import TranslationService, ZTUtils - -### i18n services -# this global translation service is not thread-safe. -# it is left here for backward compatibility -# until all Web UI translations are done via client.translator object -translationService = TranslationService.get_translation() +from roundup.cgi import ZTUtils def anti_csrf_nonce(client, lifetime=None): ''' Create a nonce for defending against CSRF attack. diff -r 6ac3667706be roundup/cgi/wsgi_handler.py --- a/roundup/cgi/wsgi_handler.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/cgi/wsgi_handler.py Thu Apr 28 16:39:56 2022 +0200 @@ -84,7 +84,7 @@ tracker_home=home) else: self.translator = None - self.preload() + self.tracker = roundup.instance.open(self.home, not self.debug) def __call__(self, environ, start_response): """Initialize with `apache.Request` object""" @@ -116,25 +116,14 @@ else: form = BinaryFieldStorage(fp=environ['wsgi.input'], environ=environ) - with self.get_tracker() as tracker: - client = tracker.Client(tracker, request, environ, form, - self.translator) - try: - client.main() - except roundup.cgi.client.NotFound: - request.start_response([('Content-Type', 'text/html')], 404) - request.wfile.write(s2b('Not found: %s' % - html_escape(client.path))) + client = self.tracker.Client(self.tracker, request, environ, form, + self.translator) + try: + client.main() + except roundup.cgi.client.NotFound: + request.start_response([('Content-Type', 'text/html')], 404) + request.wfile.write(s2b('Not found: %s' % + html_escape(client.path))) # all body data has been written using wfile return [] - - def preload(self): - """ Trigger pre-loading of imports and templates """ - with self.get_tracker(): - pass - - @contextmanager - def get_tracker(self): - # get a new instance for each request - yield roundup.instance.open(self.home, not self.debug) diff -r 6ac3667706be roundup/configuration.py --- a/roundup/configuration.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/configuration.py Thu Apr 28 16:39:56 2022 +0200 @@ -1080,12 +1080,20 @@ after the roundup web url configured in the 'tracker' section. If this variable is set to 'no', the xmlrpc path has no special meaning and will yield an error message."""), + (BooleanOption, 'translate_xmlrpc', 'no', + """Whether to enable i18n for the xmlrpc endpoint. Enable it if +you want to enable translation based on browsers lang (if enabled), trackers +lang (if set) or environment."""), (BooleanOption, 'enable_rest', "yes", """Whether to enable the REST API in the roundup web interface. By default the REST endpoint is the string 'rest' plus any additional REST-API parameters after the roundup web url configured in the tracker section. If this variable is set to 'no', the rest path has no special meaning and will yield an error message."""), + (BooleanOption, 'translate_rest', 'no', + """Whether to enable i18n for the rest endpoint. Enable it if +you want to enable translation based on browsers lang (if enabled), trackers +lang (if set) or environment."""), (IntegerNumberGeqZeroOption, 'api_calls_per_interval', "0", "Limit API calls per api_interval_in_sec seconds to\n" "this number.\n" diff -r 6ac3667706be roundup/mailgw.py --- a/roundup/mailgw.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/mailgw.py Thu Apr 28 16:39:56 2022 +0200 @@ -1603,6 +1603,12 @@ # get database handle for handling one email self.db = self.instance.open('admin') + language = self.instance.config["MAILGW_LANGUAGE"] or self.instance.config["TRACKER_LANGUAGE"] + self.db.i18n = i18n.get_language (language) + + global _ + _ = self.db.i18n.gettext + self.db.tx_Source = "email" try: diff -r 6ac3667706be roundup/pygettext.py --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/roundup/pygettext.py Thu Apr 28 16:39:56 2022 +0200 @@ -0,0 +1,654 @@ +#! /usr/bin/env python +# -*- coding: iso-8859-1 -*- +# Originally written by Barry Warsaw +# +# Minimally patched to make it even more xgettext compatible +# by Peter Funk +# +# 2002-11-22 J�rgen Hermann +# Added checks that _() only contains string literals, and +# command line args are resolved to module lists, i.e. you +# can now pass a filename, a module or package name, or a +# directory (including globbing chars, important for Win32). +# Made docstring fit in 80 chars wide displays using pydoc. +# + +# for selftesting +try: + import fintl + _ = fintl.gettext +except ImportError: + _ = lambda s: s + +__doc__ = _("""pygettext -- Python equivalent of xgettext(1) + +Many systems (Solaris, Linux, Gnu) provide extensive tools that ease the +internationalization of C programs. Most of these tools are independent of +the programming language and can be used from within Python programs. +Martin von Loewis' work[1] helps considerably in this regard. + +There's one problem though; xgettext is the program that scans source code +looking for message strings, but it groks only C (or C++). Python +introduces a few wrinkles, such as dual quoting characters, triple quoted +strings, and raw strings. xgettext understands none of this. + +Enter pygettext, which uses Python's standard tokenize module to scan +Python source code, generating .pot files identical to what GNU xgettext[2] +generates for C and C++ code. From there, the standard GNU tools can be +used. + +A word about marking Python strings as candidates for translation. GNU +xgettext recognizes the following keywords: gettext, dgettext, dcgettext, +and gettext_noop. But those can be a lot of text to include all over your +code. C and C++ have a trick: they use the C preprocessor. Most +internationalized C source includes a #define for gettext() to _() so that +what has to be written in the source is much less. Thus these are both +translatable strings: + + gettext("Translatable String") + _("Translatable String") + +Python of course has no preprocessor so this doesn't work so well. Thus, +pygettext searches only for _() by default, but see the -k/--keyword flag +below for how to augment this. + + [1] http://www.python.org/workshops/1997-10/proceedings/loewis.html + [2] http://www.gnu.org/software/gettext/gettext.html + +NOTE: pygettext attempts to be option and feature compatible with GNU +xgettext where ever possible. However some options are still missing or are +not fully implemented. Also, xgettext's use of command line switches with +option arguments is broken, and in these cases, pygettext just defines +additional switches. + +Usage: pygettext [options] inputfile ... + +Options: + + -a + --extract-all + Extract all strings. + + -d name + --default-domain=name + Rename the default output file from messages.pot to name.pot. + + -E + --escape + Replace non-ASCII characters with octal escape sequences. + + -D + --docstrings + Extract module, class, method, and function docstrings. These do + not need to be wrapped in _() markers, and in fact cannot be for + Python to consider them docstrings. (See also the -X option). + + -h + --help + Print this help message and exit. + + -k word + --keyword=word + Keywords to look for in addition to the default set, which are: + %(DEFAULTKEYWORDS)s + + You can have multiple -k flags on the command line. + + -K + --no-default-keywords + Disable the default set of keywords (see above). Any keywords + explicitly added with the -k/--keyword option are still recognized. + + --no-location + Do not write filename/lineno location comments. + + -n + --add-location + Write filename/lineno location comments indicating where each + extracted string is found in the source. These lines appear before + each msgid. The style of comments is controlled by the -S/--style + option. This is the default. + + -o filename + --output=filename + Rename the default output file from messages.pot to filename. If + filename is `-' then the output is sent to standard out. + + -p dir + --output-dir=dir + Output files will be placed in directory dir. + + -S stylename + --style stylename + Specify which style to use for location comments. Two styles are + supported: + + Solaris # File: filename, line: line-number + GNU #: filename:line + + The style name is case insensitive. GNU style is the default. + + -v + --verbose + Print the names of the files being processed. + + -V + --version + Print the version of pygettext and exit. + + -w columns + --width=columns + Set width of output to columns. + + -x filename + --exclude-file=filename + Specify a file that contains a list of strings that are not be + extracted from the input files. Each string to be excluded must + appear on a line by itself in the file. + + -X filename + --no-docstrings=filename + Specify a file that contains a list of files (one per line) that + should not have their docstrings extracted. This is only useful in + conjunction with the -D option above. + +If `inputfile' is -, standard input is read. +""") + +import os +import imp +import sys +import glob +import time +import getopt +import token +import tokenize +import operator + +__version__ = '1.5' + +default_keywords = ['_'] +DEFAULTKEYWORDS = ', '.join(default_keywords) + +EMPTYSTRING = '' + +# The normal pot-file header. msgmerge and Emacs's po-mode work better if it's +# there. +pot_header = _('''\ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR ORGANIZATION +# FIRST AUTHOR , YEAR. +# +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\\n" +"POT-Creation-Date: %(time)s\\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n" +"Last-Translator: FULL NAME \\n" +"Language-Team: LANGUAGE \\n" +"MIME-Version: 1.0\\n" +"Content-Type: text/plain; charset=CHARSET\\n" +"Content-Transfer-Encoding: ENCODING\\n" +"Generated-By: pygettext.py %(version)s\\n" + +''') + +def usage(code, msg=''): + print >> sys.stderr, __doc__ % globals() + if msg: + print >> sys.stderr, msg + sys.exit(code) + + +escapes = [] + +def make_escapes(pass_iso8859): + global escapes + escapes = [chr(i) for i in range(256)] + if pass_iso8859: + # Allow iso-8859 characters to pass through so that e.g. 'msgid + # "H�he"' would result not result in 'msgid "H\366he"'. Otherwise we + # escape any character outside the 32..126 range. + mod = 128 + else: + mod = 256 + for i in range(mod): + if not(32 <= i <= 126): + escapes[i] = "\\%03o" % i + escapes[ord('\\')] = '\\\\' + escapes[ord('\t')] = '\\t' + escapes[ord('\r')] = '\\r' + escapes[ord('\n')] = '\\n' + escapes[ord('\"')] = '\\"' + + +def escape(s): + global escapes + s = list(s) + for i in range(len(s)): + s[i] = escapes[ord(s[i])] + return EMPTYSTRING.join(s) + + +def safe_eval(s): + # unwrap quotes, safely + return eval(s, {'__builtins__':{}}, {}) + + +def normalize(s): + # This converts the various Python string types into a format that is + # appropriate for .po files, namely much closer to C style. + lines = s.split('\n') + if len(lines) == 1: + s = '"' + escape(s) + '"' + else: + if not lines[-1]: + del lines[-1] + lines[-1] = lines[-1] + '\n' + for i in range(len(lines)): + lines[i] = escape(lines[i]) + lineterm = '\\n"\n"' + s = '""\n"' + lineterm.join(lines) + '"' + return s + +def containsAny(str, set): + """Check whether 'str' contains ANY of the chars in 'set'""" + return 1 in [c in str for c in set] + + +def _get_modpkg_path(dotted_name, pathlist=None): + """Get the filesystem path for a module or a package. + + Return the file system path to a file for a module, and to a directory for + a package. Return None if the name is not found, or is a builtin or + extension module. + """ + # split off top-most name + parts = dotted_name.split('.', 1) + + if len(parts) > 1: + # we have a dotted path, import top-level package + try: + file, pathname, description = imp.find_module(parts[0], pathlist) + if file: file.close() + except ImportError: + return None + + # check if it's indeed a package + if description[2] == imp.PKG_DIRECTORY: + # recursively handle the remaining name parts + pathname = _get_modpkg_path(parts[1], [pathname]) + else: + pathname = None + else: + # plain name + try: + file, pathname, description = imp.find_module( + dotted_name, pathlist) + if file: + file.close() + if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]: + pathname = None + except ImportError: + pathname = None + + return pathname + + +def getFilesForName(name): + """Get a list of module files for a filename, a module or package name, + or a directory. + """ + if not os.path.exists(name): + # check for glob chars + if containsAny(name, "*?[]"): + files = glob.glob(name) + list = [] + for file in files: + list.extend(getFilesForName(file)) + return list + + # try to find module or package + name = _get_modpkg_path(name) + if not name: + return [] + + if os.path.isdir(name): + # find all python files in directory + list = [] + # get extension for python source files + if '_py_ext' not in globals(): + global _py_ext + _py_ext = [triple[0] for triple in imp.get_suffixes() + if triple[2] == imp.PY_SOURCE][0] + for root, dirs, files in os.walk(name): + # don't recurse into CVS directories + if 'CVS' in dirs: + dirs.remove('CVS') + # add all *.py files to list + list.extend( + [os.path.join(root, file) for file in files + if os.path.splitext(file)[1] == _py_ext] + ) + return list + elif os.path.exists(name): + # a single file + return [name] + + return [] + +class TokenEater: + def __init__(self, options): + self.__options = options + self.__messages = {} + self.__state = self.__waiting + self.__data = [] + self.__lineno = -1 + self.__freshmodule = 1 + self.__curfile = None + + def __call__(self, ttype, tstring, stup, etup, line): + # dispatch +## import token +## print >> sys.stderr, 'ttype:', token.tok_name[ttype], \ +## 'tstring:', tstring + self.__state(ttype, tstring, stup[0]) + + def __waiting(self, ttype, tstring, lineno): + opts = self.__options + # Do docstring extractions, if enabled + if opts.docstrings and not opts.nodocstrings.get(self.__curfile): + # module docstring? + if self.__freshmodule: + if ttype == tokenize.STRING: + self.__addentry(safe_eval(tstring), lineno, isdocstring=1) + self.__freshmodule = 0 + elif ttype not in (tokenize.COMMENT, tokenize.NL): + self.__freshmodule = 0 + return + # class docstring? + if ttype == tokenize.NAME and tstring in ('class', 'def'): + self.__state = self.__suiteseen + return + if ttype == tokenize.NAME and tstring in opts.keywords: + self.__state = self.__keywordseen + + def __suiteseen(self, ttype, tstring, lineno): + # ignore anything until we see the colon + if ttype == tokenize.OP and tstring == ':': + self.__state = self.__suitedocstring + + def __suitedocstring(self, ttype, tstring, lineno): + # ignore any intervening noise + if ttype == tokenize.STRING: + self.__addentry(safe_eval(tstring), lineno, isdocstring=1) + self.__state = self.__waiting + elif ttype not in (tokenize.NEWLINE, tokenize.INDENT, + tokenize.COMMENT): + # there was no class docstring + self.__state = self.__waiting + + def __keywordseen(self, ttype, tstring, lineno): + if ttype == tokenize.OP and tstring == '(': + self.__data = [] + self.__lineno = lineno + self.__state = self.__openseen + else: + self.__state = self.__waiting + + def __openseen(self, ttype, tstring, lineno): + if ttype == tokenize.OP and tstring == ')': + # We've seen the last of the translatable strings. Record the + # line number of the first line of the strings and update the list + # of messages seen. Reset state for the next batch. If there + # were no strings inside _(), then just ignore this entry. + if self.__data: + self.__addentry(EMPTYSTRING.join(self.__data)) + self.__state = self.__waiting + elif ttype == tokenize.STRING: + self.__data.append(safe_eval(tstring)) + elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT, + token.NEWLINE, tokenize.NL]: + # warn if we see anything else than STRING or whitespace + print >> sys.stderr, _( + '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"' + ) % { + 'token': tstring, + 'file': self.__curfile, + 'lineno': self.__lineno + } + self.__state = self.__waiting + + def __addentry(self, msg, lineno=None, isdocstring=0): + if lineno is None: + lineno = self.__lineno + if not msg in self.__options.toexclude: + entry = (self.__curfile, lineno) + self.__messages.setdefault(msg, {})[entry] = isdocstring + + def set_filename(self, filename): + self.__curfile = filename + self.__freshmodule = 1 + + def write(self, fp): + options = self.__options + timestamp = time.strftime('%Y-%m-%d %H:%M+%Z') + # The time stamp in the header doesn't have the same format as that + # generated by xgettext... + print >> fp, pot_header % {'time': timestamp, 'version': __version__} + # Sort the entries. First sort each particular entry's keys, then + # sort all the entries by their first item. + reverse = {} + for k, v in self.__messages.items(): + keys = v.keys() + keys.sort() + reverse.setdefault(tuple(keys), []).append((k, v)) + rkeys = reverse.keys() + rkeys.sort() + for rkey in rkeys: + rentries = reverse[rkey] + rentries.sort() + for k, v in rentries: + isdocstring = 0 + # If the entry was gleaned out of a docstring, then add a + # comment stating so. This is to aid translators who may wish + # to skip translating some unimportant docstrings. + if reduce(operator.__add__, v.values()): + isdocstring = 1 + # k is the message string, v is a dictionary-set of (filename, + # lineno) tuples. We want to sort the entries in v first by + # file name and then by line number. + v = v.keys() + v.sort() + if not options.writelocations: + pass + # location comments are different b/w Solaris and GNU: + elif options.locationstyle == options.SOLARIS: + for filename, lineno in v: + d = {'filename': filename, 'lineno': lineno} + print >>fp, _( + '# File: %(filename)s, line: %(lineno)d') % d + elif options.locationstyle == options.GNU: + # fit as many locations on one line, as long as the + # resulting line length doesn't exceed 'options.width' + locline = '#:' + for filename, lineno in v: + d = {'filename': filename, 'lineno': lineno} + s = _(' %(filename)s:%(lineno)d') % d + if len(locline) + len(s) <= options.width: + locline = locline + s + else: + print >> fp, locline + locline = "#:" + s + if len(locline) > 2: + print >> fp, locline + if isdocstring: + print >> fp, '#, docstring' + print >> fp, 'msgid', normalize(k) + print >> fp, 'msgstr ""\n' + + +def main(): + global default_keywords + try: + opts, args = getopt.getopt( + sys.argv[1:], + 'ad:DEhk:Kno:p:S:Vvw:x:X:', + ['extract-all', 'default-domain=', 'escape', 'help', + 'keyword=', 'no-default-keywords', + 'add-location', 'no-location', 'output=', 'output-dir=', + 'style=', 'verbose', 'version', 'width=', 'exclude-file=', + 'docstrings', 'no-docstrings', + ]) + except getopt.error, msg: + usage(1, msg) + + # for holding option values + class Options: + # constants + GNU = 1 + SOLARIS = 2 + # defaults + extractall = 0 # FIXME: currently this option has no effect at all. + escape = 0 + keywords = [] + outpath = '' + outfile = 'messages.pot' + writelocations = 1 + locationstyle = GNU + verbose = 0 + width = 78 + excludefilename = '' + docstrings = 0 + nodocstrings = {} + + options = Options() + locations = {'gnu' : options.GNU, + 'solaris' : options.SOLARIS, + } + + # parse options + for opt, arg in opts: + if opt in ('-h', '--help'): + usage(0) + elif opt in ('-a', '--extract-all'): + options.extractall = 1 + elif opt in ('-d', '--default-domain'): + options.outfile = arg + '.pot' + elif opt in ('-E', '--escape'): + options.escape = 1 + elif opt in ('-D', '--docstrings'): + options.docstrings = 1 + elif opt in ('-k', '--keyword'): + options.keywords.append(arg) + elif opt in ('-K', '--no-default-keywords'): + default_keywords = [] + elif opt in ('-n', '--add-location'): + options.writelocations = 1 + elif opt in ('--no-location',): + options.writelocations = 0 + elif opt in ('-S', '--style'): + options.locationstyle = locations.get(arg.lower()) + if options.locationstyle is None: + usage(1, _('Invalid value for --style: %s') % arg) + elif opt in ('-o', '--output'): + options.outfile = arg + elif opt in ('-p', '--output-dir'): + options.outpath = arg + elif opt in ('-v', '--verbose'): + options.verbose = 1 + elif opt in ('-V', '--version'): + print _('pygettext.py (xgettext for Python) %s') % __version__ + sys.exit(0) + elif opt in ('-w', '--width'): + try: + options.width = int(arg) + except ValueError: + usage(1, _('--width argument must be an integer: %s') % arg) + elif opt in ('-x', '--exclude-file'): + options.excludefilename = arg + elif opt in ('-X', '--no-docstrings'): + fp = open(arg) + try: + while 1: + line = fp.readline() + if not line: + break + options.nodocstrings[line[:-1]] = 1 + finally: + fp.close() + + # calculate escapes + make_escapes(not options.escape) + + # calculate all keywords + options.keywords.extend(default_keywords) + + # initialize list of strings to exclude + if options.excludefilename: + try: + fp = open(options.excludefilename) + options.toexclude = fp.readlines() + fp.close() + except IOError: + print >> sys.stderr, _( + "Can't read --exclude-file: %s") % options.excludefilename + sys.exit(1) + else: + options.toexclude = [] + + # resolve args to module lists + expanded = [] + for arg in args: + if arg == '-': + expanded.append(arg) + else: + expanded.extend(getFilesForName(arg)) + args = expanded + + # slurp through all the files + eater = TokenEater(options) + for filename in args: + if filename == '-': + if options.verbose: + print _('Reading standard input') + fp = sys.stdin + closep = 0 + else: + if options.verbose: + print _('Working on %s') % filename + fp = open(filename) + closep = 1 + try: + eater.set_filename(filename) + try: + tokenize.tokenize(fp.readline, eater) + except tokenize.TokenError, e: + print >> sys.stderr, '%s: %s, line %d, column %d' % ( + e[0], filename, e[1][0], e[1][1]) + finally: + if closep: + fp.close() + + # write the output + if options.outfile == '-': + fp = sys.stdout + closep = 0 + else: + if options.outpath: + options.outfile = os.path.join(options.outpath, options.outfile) + fp = open(options.outfile, 'w') + closep = 1 + try: + eater.write(fp) + finally: + if closep: + fp.close() + +if __name__ == '__main__': + main() + # some more test strings + _(u'a unicode string') + # this one creates a warning + _('*** Seen unexpected token "%(token)s"') % {'token': 'test'} + _('more' 'than' 'one' 'string') diff -r 6ac3667706be roundup/roundupdb.py --- a/roundup/roundupdb.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/roundupdb.py Thu Apr 28 16:39:56 2022 +0200 @@ -36,6 +36,7 @@ from roundup.hyperdb import iter_roles from roundup.mailer import Mailer, MessageSendError, nice_sender_header +from roundup.i18n import RoundupNullTranslations from roundup.anypy.strings import b2s, s2u import roundup.anypy.random_ as random_ @@ -54,6 +55,9 @@ # (eg. the current user edits their username) journal_uid = None + def __init__(self): + self.i18n = RoundupNullTranslations() + def getuid(self): """Return the id of the "user" node associated with the user that owns this connection to the hyperdatabase.""" diff -r 6ac3667706be roundup/scripts/roundup_gettext.py --- a/roundup/scripts/roundup_gettext.py Tue Apr 26 17:22:33 2022 -0400 +++ b/roundup/scripts/roundup_gettext.py Thu Apr 28 16:39:56 2022 +0200 @@ -2,7 +2,7 @@ # # Copyright 2004 Richard Jones (richard@mechanicalcat.net) -"""Extract translatable strings from tracker templates""" +"""Extract translatable strings from tracker templates and detectors/extensions""" from __future__ import print_function import os @@ -23,6 +23,30 @@ from roundup.i18n import _ from roundup.cgi.TAL import talgettext +from roundup.pygettext import make_escapes, TokenEater, tokenize +import polib, tempfile + +# from pygettext's main(): +class Options: + # constants + GNU = 1 + SOLARIS = 2 + # defaults + extractall = 0 # FIXME: currently this option has no effect at all. + escape = 0 + keywords = ["_", "gettext", "ngettext", "ugettext"] + outpath = '' + outfile = '' + writelocations = 1 + locationstyle = GNU + verbose = 0 + width = 78 + excludefilename = '' + docstrings = 0 + nodocstrings = {} + toexclude = [] # TODO we should exclude all strings already found in some template + +tokeneater_options = Options() # name of message template file. # i don't think this will ever need to be changed, but still... @@ -62,6 +86,45 @@ # run talgettext.main() + # we have now everything from the templates in the TEMPLATE_FILE + # now we search in home/detectors and home/extensions *.py files for + # tokeneater_options.keywords + # this is partly assembled from pygettext's main() + make_escapes(not tokeneater_options.escape) + + pyfiles = [] + for source in ["detectors", "extensions"] : + for root, dirs, files in os.walk (os.path.join ("..", source)) : + pyfiles.extend ([os.path.join (root, f) for f in files if f.endswith (".py")]) + + eater = TokenEater (tokeneater_options) + + for filename in pyfiles : + eater.set_filename (filename) + with open (filename, "r") as f: + try : + tokenize.tokenize(f.readline, eater) + except tokenize.TokenError, e: + print >> sys.stderr, '%s: %s, line %d, column %d' % ( + e[0], filename, e[1][0], e[1][1]) + + with tempfile.NamedTemporaryFile("w") as tf : + eater.write (tf) + tf.seek (0) + p1 = polib.pofile(TEMPLATE_FILE) + p2 = polib.pofile(tf.name) + + p2_msg_ids = set([e.msgid for e in p2]) + for e in p1 : + if e.msgid in p2_msg_ids : + p2_e = p2.find (e.msgid) + e.occurrences.extend (p2_e.occurrences) + p2_msg_ids.remove (e.msgid) + + for msgid in p2_msg_ids : + p1.append (p2.find (msgid)) + + p1.save () if __name__ == "__main__": run()