From 21d7110cb098d81ffcca5b7fe69659ff96fac490 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Janiszewski?= Date: Tue, 30 Oct 2018 22:14:45 +0100 Subject: [PATCH 01/31] Compare to `None` using identity `is` operator This is a trivial change that replaces `==` operator with `is` operator, following PEP 8 guideline: > Comparisons to singletons like None should always be done with is or is not, never the equality operators. https://legacy.python.org/dev/peps/pep-0008/#programming-recommendations (cherry picked from commit 9d333bc95a555bee88d5f09f3ff6777cc5c313cb) --- data/tools/scoutDefault.py | 10 +++++----- data/tools/unit_tree/animations.py | 2 +- data/tools/wesnoth/campaignserver_client.py | 2 +- data/tools/wesnoth/libgithub.py | 6 +++--- data/tools/wesnoth/wescamp.py | 16 ++++++++-------- data/tools/wesnoth/wmldata.py | 6 +++--- data/tools/wesnoth/wmlparser.py | 6 +++--- data/tools/wesnoth/wmlparser3.py | 2 +- utils/gdb/register_wesnoth_pretty_printers.py | 2 +- utils/simulate_lobby_traffic.py | 2 +- utils/wiki_grabber.py | 4 ++-- 11 files changed, 29 insertions(+), 29 deletions(-) diff --git a/data/tools/scoutDefault.py b/data/tools/scoutDefault.py index f655a1e1551..fb87597b192 100755 --- a/data/tools/scoutDefault.py +++ b/data/tools/scoutDefault.py @@ -36,7 +36,7 @@ AI_SCOUTS_TEXT = "\n\t[ai]%s\n\t[/ai]" % SCOUTS_TEXT.replace('\n','\n\t') def applySearch(text, RE, groupId): data = RE.search(text, 0) - if data != None: + if data is not None: return data.group(groupId) else: return "" @@ -65,7 +65,7 @@ class wikiAi: self.updated_description = "" def addAiData(self, aiContent): - if aiContent != None: + if aiContent is not None: self.start = applySearch(aiContent, AI_START, 'text') self.scouts = applySearch(aiContent, AI_SCOUTS, 'text') self.full_description = aiContent @@ -89,9 +89,9 @@ class wikiSide: self.scouts_setting = False def addAiData(self, sideContent): - if sideContent != None: + if sideContent is not None: aiDetail = ai_block.search(sideContent, 0) - while aiDetail != None: + while aiDetail is not None: if applySearch(aiDetail.group(), AI_TIME, 'text') == "" and applySearch(aiDetail.group(), AI_TURNS, 'text') == "": self.ai.append(wikiAi()) self.ai[self.getCurrentAiNumber()].addAiData(aiDetail.group()) @@ -145,7 +145,7 @@ class wikiScenario: def parseScenario (self, scenarioContent): self.addScenarioData(scenarioContent) sideDetail = side_block.search(scenarioContent, 0) - while sideDetail != None: + while sideDetail is not None: self.addSideData(sideDetail.group()) self.addAiData(sideDetail.group()) searchStart = sideDetail.end() diff --git a/data/tools/unit_tree/animations.py b/data/tools/unit_tree/animations.py index 1fcb5c7d104..64ceb7dcdfb 100644 --- a/data/tools/unit_tree/animations.py +++ b/data/tools/unit_tree/animations.py @@ -64,7 +64,7 @@ def write_table_row(out, unit, color, name = None): if abil.get_all(tag = "teleport"): needed["teleport"] = True - if name == None: name = unit.id + if name is None: name = unit.id out.write("%s" % (color and "c1" or "c2", name)) diff --git a/data/tools/wesnoth/campaignserver_client.py b/data/tools/wesnoth/campaignserver_client.py index 497913eecbc..032503c1fed 100755 --- a/data/tools/wesnoth/campaignserver_client.py +++ b/data/tools/wesnoth/campaignserver_client.py @@ -57,7 +57,7 @@ class CampaignClient: self.verbose = False self.quiet = quiet - if address != None: + if address is not None: self.canceled = False self.error = False s = address.split(":") diff --git a/data/tools/wesnoth/libgithub.py b/data/tools/wesnoth/libgithub.py index 4942721b958..b7394796127 100644 --- a/data/tools/wesnoth/libgithub.py +++ b/data/tools/wesnoth/libgithub.py @@ -519,7 +519,7 @@ class GitHub(object): return json_parsed def _github_have_authorization(self): - return self.authorization != None + return self.authorization is not None def _github_authorization(self): if self.authorization: return self.authorization @@ -541,7 +541,7 @@ class GitHub(object): p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=cwd) out = "" err = "" - while(p.poll() == None): + while(p.poll() is None): out += p.stdout.read() err += p.stderr.read() @@ -615,6 +615,6 @@ def get_build_system(possible_dirs=[]): Returns: The Addon object of the build-system """ global _g - if _g == None: + if _g is None: _g = _gen(possible_dirs) return next(_g) diff --git a/data/tools/wesnoth/wescamp.py b/data/tools/wesnoth/wescamp.py index 5eb999db4bd..37481473d24 100755 --- a/data/tools/wesnoth/wescamp.py +++ b/data/tools/wesnoth/wescamp.py @@ -449,13 +449,13 @@ if __name__ == "__main__": logging.getLogger().addHandler(handler) server = "localhost" - if(args.server != None): + if(args.server is not None): server = args.server - if args.port != None: + if args.port is not None: server += ":" + args.port campaignd_configured = True - elif args.branch != None: + elif args.branch is not None: for port, version in libwml.CampaignClient.portmap: if version.startswith(args.branch): server += ":" + port @@ -464,7 +464,7 @@ if __name__ == "__main__": target = None tmp = tempdir() - if(args.temp_dir != None): + if(args.temp_dir is not None): if(args.upload_all): logging.error("TEMP-DIR not allowed for UPLOAD-ALL.") sys.exit(2) @@ -520,10 +520,10 @@ if __name__ == "__main__": print(k) # Upload an addon to wescamp. - elif(args.upload != None): + elif(args.upload is not None): assert_campaignd(campaignd_configured) assert_wescamp(wescamp_configured) - if(wescamp == None): + if(wescamp is None): logging.error("No wescamp checkout specified") sys.exit(2) @@ -546,7 +546,7 @@ if __name__ == "__main__": elif(args.upload_all): assert_campaignd(campaignd_configured) assert_wescamp(wescamp_configured) - if(wescamp == None): + if(wescamp is None): logging.error("No wescamp checkout specified.") sys.exit(2) @@ -581,7 +581,7 @@ if __name__ == "__main__": elif(args.checkout or args.checkout_readonly): assert_wescamp(wescamp_configured) - if(wescamp == None): + if(wescamp is None): logging.error("No wescamp checkout specified.") sys.exit(2) diff --git a/data/tools/wesnoth/wmldata.py b/data/tools/wesnoth/wmldata.py index af42deaf83f..b42f26cbdf6 100755 --- a/data/tools/wesnoth/wmldata.py +++ b/data/tools/wesnoth/wmldata.py @@ -275,7 +275,7 @@ class DataSub(Data): bytes = "" for r in result: - if r != None: + if r is not None: # For networking, we need actual bytestream here, not unicode. if type(r) is unicode: r = r.encode("utf8") bytes += str(r) @@ -577,11 +577,11 @@ class DataSub(Data): """For the even lazier, looks for a value inside a difficulty ifdef. """ v = self.get_text_val(tag) - if v != None: return v + if v is not None: return v for ifdef in self.get_ifdefs(["EASY", "NORMAL", "HARD"][difficulty]): v = ifdef.get_text_val(tag) - if v != None: return v + if v is not None: return v return default diff --git a/data/tools/wesnoth/wmlparser.py b/data/tools/wesnoth/wmlparser.py index c9684dccda5..a6554e4f48e 100755 --- a/data/tools/wesnoth/wmlparser.py +++ b/data/tools/wesnoth/wmlparser.py @@ -378,12 +378,12 @@ class Parser: elif macro[0] == ".": dirpath = self.current_path + macro[1:] # Otherwise, try to interpret the macro as a filename in the data dir. - elif self.data_dir != None: + elif self.data_dir is not None: dirpath = self.data_dir + "/" + macro else: dirpath = None - if dirpath != None and os.path.exists(dirpath): + if dirpath is not None and os.path.exists(dirpath): dirpath = os.path.normpath(dirpath) if self.only_expand_pathes: if not [x for x in self.only_expand_pathes if os.path.commonprefix([dirpath, x]) == x]: @@ -675,7 +675,7 @@ class Parser: self.read_while(" ") text = self.read_lines_until("#enddef") - if text == None: + if text is None: raise Error(self, "#define without #enddef") self.macros[params[0]] = self.Macro( diff --git a/data/tools/wesnoth/wmlparser3.py b/data/tools/wesnoth/wmlparser3.py index 8e036e4f6fa..5b15f8dd8b9 100755 --- a/data/tools/wesnoth/wmlparser3.py +++ b/data/tools/wesnoth/wmlparser3.py @@ -349,7 +349,7 @@ class Parser: if data_dir: self.data_dir = os.path.abspath(data_dir) self.keep_temp_dir = None self.temp_dir = None - self.no_preprocess = (wesnoth_exe == None) + self.no_preprocess = (wesnoth_exe is None) self.preprocessed = None self.verbose = False diff --git a/utils/gdb/register_wesnoth_pretty_printers.py b/utils/gdb/register_wesnoth_pretty_printers.py index 9b970b195d1..d3e39ee7081 100644 --- a/utils/gdb/register_wesnoth_pretty_printers.py +++ b/utils/gdb/register_wesnoth_pretty_printers.py @@ -43,7 +43,7 @@ def create_wesnoth_lookup_function(pretty_printers_dict): # Get the type name. typename = type.tag - if typename == None: + if typename is None: return None # Iterate over local dictionary of types to determine diff --git a/utils/simulate_lobby_traffic.py b/utils/simulate_lobby_traffic.py index d5cecd477ac..782c952322f 100644 --- a/utils/simulate_lobby_traffic.py +++ b/utils/simulate_lobby_traffic.py @@ -50,7 +50,7 @@ while len(clients) > 0 and time.monotonic() < waiting_start_time + EXIT_WAIT_TIM time.sleep(1.0) clients_copy = list(clients) for c in clients_copy: - if c.poll() != None: + if c.poll() is not None: # The process has terminated, remove it from the set. clients.remove(c) diff --git a/utils/wiki_grabber.py b/utils/wiki_grabber.py index 39d3afb40a1..9a64e4dabc0 100755 --- a/utils/wiki_grabber.py +++ b/utils/wiki_grabber.py @@ -121,7 +121,7 @@ if __name__ == "__main__": res = re.compile("^" + key + " *= *(.*)$", re.M).search(data) - if res != None: + if res is not None: res = res.group(1) return res @@ -131,7 +131,7 @@ if __name__ == "__main__": page = get_value(data, "@page") order = get_value(data, "@order") - if order == None: + if order is None: order = 10000 return [page, order] -- 2.29.2 From 78cea2f7727843719c28d76d5504131d469aa0fd Mon Sep 17 00:00:00 2001 From: josteph Date: Mon, 16 Sep 2019 20:15:25 +0000 Subject: [PATCH 02/31] utils/codelist: Fix TypeError that occurred on any input (cherry picked from commit 2b1ebdc2047a75b1d0dc5724f8b91bef150d9c3c) --- utils/codelist | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/codelist b/utils/codelist index 43d41627c27..db5747fafce 100755 --- a/utils/codelist +++ b/utils/codelist @@ -39,5 +39,5 @@ def printbyrange(lst): out += "%d-%d," % tuple(elt) return out[:-1] -codepoints = [int(x.strip) for x in sys.stdin.readlines()] +codepoints = [int(x.strip()) for x in sys.stdin.readlines()] print printbyrange(rangeify(codepoints)) -- 2.29.2 From 9b2fdedd810ddba261812ed6f8c55625aba071fe Mon Sep 17 00:00:00 2001 From: josteph Date: Mon, 16 Sep 2019 20:16:50 +0000 Subject: [PATCH 03/31] utils/codelist: Convert to python3 Issue #1508 (cherry picked from commit 4e8d8e844a7c0bd53ae2e8fe6ba5d1cd0ceaea28) --- utils/codelist | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/codelist b/utils/codelist index db5747fafce..4941ad66543 100755 --- a/utils/codelist +++ b/utils/codelist @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python # codelist # given list of integers, one per line, outputs a minimal list of ranges # describing the list @@ -40,4 +40,4 @@ def printbyrange(lst): return out[:-1] codepoints = [int(x.strip()) for x in sys.stdin.readlines()] -print printbyrange(rangeify(codepoints)) +print(printbyrange(rangeify(codepoints))) -- 2.29.2 From 16d1e97711020bd8028ae279444b8c7fd64fd652 Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Mon, 16 Sep 2019 21:59:07 +0200 Subject: [PATCH 04/31] Ported expand-terrain-macros to Python 3 (cherry picked from commit 9982afb21fef752b46a4162bb2a26ef609bd3279) --- data/core/terrain-graphics/Makefile | 2 +- data/tools/expand-terrain-macros.py | 91 ++++++++++------------------- 2 files changed, 33 insertions(+), 60 deletions(-) diff --git a/data/core/terrain-graphics/Makefile b/data/core/terrain-graphics/Makefile index 3f27c1bafdc..10b77c03b8a 100644 --- a/data/core/terrain-graphics/Makefile +++ b/data/core/terrain-graphics/Makefile @@ -2,4 +2,4 @@ # expand-terrain-macros.py tool. Here's a canned invocation that will rebuild all. rebuild: - for i in *; do python ../../../data/tools/expand-terrain-macros.py -a -r $$i; done + for i in *; do python3 ../../../data/tools/expand-terrain-macros.py -a -r $$i; done diff --git a/data/tools/expand-terrain-macros.py b/data/tools/expand-terrain-macros.py index 4c99c439f82..89123d7b6f1 100755 --- a/data/tools/expand-terrain-macros.py +++ b/data/tools/expand-terrain-macros.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # expand-terrain-macros.py - Expand "meta-macros" for terrain WML # @@ -35,56 +35,30 @@ # !!! ONLY USE THIS IF YOU KNOW WHAT YOU ARE DOING !!! import sys -import getopt - -def printUsage(): - print "Usage: expand-terrain-macros.py [OPTIONS] filename1\ - [filename2 [...]]\n" - print """Options: - -i Insert the expanded sections into the input file(s) immediately after - their macro definitions. - -a Append the expanded sections to the input file(s) - -r Replace the input file(s) with the resulting output. Previously generated - expansions will be removed. Implies -i if nothing else is specified. - -If no options are specified, only the expanded sections will be printed to -stdout""" - -insert = False -append = False -replace = False - -try: - (opts, args) = getopt.getopt(sys.argv[1:], 'iar') -except getopt.GetoptError, e: - print 'Error parsing command-line arguments: %s' % e - printUsage() - sys.exit(1) -for (option, parameter) in opts: - if option == '-i': - insert = True - if option == '-a': - append = True - if option == '-r': - replace = True +import argparse + +parser = argparse.ArgumentParser(description="", +epilog="If no options are specified, only the expanded sections will be printed to stdout") +group = parser.add_mutually_exclusive_group() +group.add_argument("-i", "--insert", action="store_true", +help="Insert the expanded sections into the input file(s) immediately after their macro definitions.") +group.add_argument("-a", "--append", action="store_true", +help="Append the expanded sections to the input file(s).") +parser.add_argument("-r", "--replace", action="store_true", +help="Replace the input file(s) with the resulting output. Previously generated expansions will be removed. Implies -i if nothing else is specified.") +parser.add_argument("filenames", nargs="+") +args = parser.parse_args() + +insert = args.insert +append = args.append +replace = args.replace if replace and not append: insert = True -if insert and append: - print "Error: cannot use -i and -a at the same time" - printUsage() - sys.exit(1) - - -if len(args) == 0: - printUsage() - sys.exit(1) - -for filename in args: - f = file(filename) - content = f.readlines() - f.close() +for filename in args.filenames: + with open(filename) as f: + content = f.readlines() changed = False output = [] @@ -113,7 +87,7 @@ for filename in args: if len(split_param) == 3: optional_params.append(split_param[0]) elif len(split_param) != 1: - print "Error in line:\n" + line + print("Error in line:\n" + line) sys.exit(1) params.append(split_param) @@ -127,8 +101,8 @@ for filename in args: result.append("#generated from: " + line.strip()) result.append("#Please do not modify") - for i in xrange(2**len(optional_params) - 2, -1, -1): - enabled_map = dict([(param, i & (1< Date: Tue, 17 Sep 2019 21:43:04 +0200 Subject: [PATCH 05/31] Removed the unused scoutDefault Python tool (cherry picked from commit 2863db36cc81b5b4b8dc7346b71b16f948d80862) --- data/tools/scoutDefault.py | 278 ------------------------------------- 1 file changed, 278 deletions(-) delete mode 100755 data/tools/scoutDefault.py diff --git a/data/tools/scoutDefault.py b/data/tools/scoutDefault.py deleted file mode 100755 index fb87597b192..00000000000 --- a/data/tools/scoutDefault.py +++ /dev/null @@ -1,278 +0,0 @@ -#!/usr/bin/env python2 -# -# Automagically set the village_per_scout parameters in MP scenarios. - -import sys -import os -import getopt -import re - -overwrite = False -defaultValue = 4 -suffix = '' - -RE_SCENARIO = '.*?\[multiplayer\].*?\[\/multiplayer\]' -scenario_block = re.compile(RE_SCENARIO, re.DOTALL) - -side_block = re.compile('( |\t)*\[side\].*?\[\/side\]( |\t)*\n', re.DOTALL) - -ai_block = re.compile('( |\t)*\[ai\].*?\[\/ai\]( |\t)*\n', re.DOTALL) - -AI_SIDE = re.compile('(?P( |\t)*side=(\w| |\-|\,|\t)*)\n', re.DOTALL) -AI_TIME = re.compile('(?P( |\t)*time_of_day=(\w| |\-|\,|\t)*)', re.DOTALL) -AI_TURNS = re.compile('(?P( |\t)*turns=(\w| |\-|\,|\t)*)', re.DOTALL) -AI_SCOUTS = re.compile('(?P( |\t)*villages_per_scout=(\w| |\-|\,|\t)*)', re.DOTALL) -AI_SCOUTS_VALUE = re.compile('(?P(?<=villages_per_scout=)(\d)*)', re.DOTALL) -AI_CAN_RECRUIT = re.compile('(?P(?<=canrecruit=)(\d)*)', re.DOTALL) -AI_START = re.compile('(?P( |\t)*\[ai\](\w| |\t)*\n)', re.DOTALL) - -IF_TEXT = "[if]\n" -ENDIF_TEXT = "[/if]\n" -ELSE_TEXT = "[else]\n" -ENDELSE_TEXT = "[/else]" - -SCOUTS_TEXT = "\n\tvillages_per_scout=0" -AI_SCOUTS_TEXT = "\n\t[ai]%s\n\t[/ai]" % SCOUTS_TEXT.replace('\n','\n\t') - -def applySearch(text, RE, groupId): - data = RE.search(text, 0) - if data is not None: - return data.group(groupId) - else: - return "" - -def updateDescription(ai, sides): - if ai.value != "": - new = defaultValue * sides - ai.updated_description = ai.updated_description.replace(ai.value, str(new)) - -def getIndent(itemText, subitemText): - item = re.compile('^( |\t)*').search(itemText).group() - subitem = re.compile('^( |\t)*').search(subitemText).group() - if item == '' or subitem == '': - return subitem - if item[0] == '\t' and subitem[0] == '\t': - return (len(subitem) - len(item)) * '\t' - if item[0] == ' ' and subitem[0] == ' ': - return (len(subitem) - len(item)) * ' ' - return '\t' - -class wikiAi: - def __init__(self): - self.start = "" - self.scouts = "" - self.full_description = "" - self.updated_description = "" - - def addAiData(self, aiContent): - if aiContent is not None: - self.start = applySearch(aiContent, AI_START, 'text') - self.scouts = applySearch(aiContent, AI_SCOUTS, 'text') - self.full_description = aiContent - self.updated_description = aiContent - self.value = applySearch(aiContent, AI_SCOUTS_VALUE, 'text') - -class wikiAiList(list): - def __str__(self): - output = "" - for item in self: - output = output + item.full_description + " ; " - return output - -class wikiSide: - def __init__(self): - self.full_description = '' - self.updated_description = '' - self.side = '' - # Will only contain one element - self.ai = wikiAiList() - self.scouts_setting = False - - def addAiData(self, sideContent): - if sideContent is not None: - aiDetail = ai_block.search(sideContent, 0) - while aiDetail is not None: - if applySearch(aiDetail.group(), AI_TIME, 'text') == "" and applySearch(aiDetail.group(), AI_TURNS, 'text') == "": - self.ai.append(wikiAi()) - self.ai[self.getCurrentAiNumber()].addAiData(aiDetail.group()) - if self.ai[self.getCurrentAiNumber()].scouts != "": - self.scouts_setting = True - break - searchStart = aiDetail.end() - aiDetail = ai_block.search(sideContent, searchStart) - - - def updateAi(self, sides): - if not len(self.ai): - self.ai.append(wikiAi()) - space = re.compile('^( |\t)*').search(self.full_description).group() - indent = getIndent(self.full_description, self.side) - side_scout_text = AI_SCOUTS_TEXT.replace('\t', indent) - side_scout_text = side_scout_text.replace('\n', '\n' + space) - self.ai[self.getCurrentAiNumber()].addAiData(side_scout_text) - self.updated_description = self.updated_description.replace('\n', self.ai[self.getCurrentAiNumber()].full_description + '\n', 1) - updateDescription(self.ai[0], sides) - else: - if not self.scouts_setting: - space = re.compile('^( |\t)*').search(self.full_description).group() - indent = getIndent(self.full_description, self.side) - side_scout_text = AI_SCOUTS_TEXT.replace('\t', indent) - side_scout_text = side_scout_text.replace('\n', '\n' + space) - self.ai[0].updated_description = self.ai[0].updated_description.replace(self.ai[0].start, self.ai[0].start.replace('\n', side_scout_text + '\n')) - updateDescription(self.ai[0], sides) - else: - if overwrite: - updateDescription(self.ai[0], sides) - if self.ai[0].full_description != self.ai[0].updated_description: - self.updated_description = self.updated_description.replace(self.ai[0].full_description, self.ai[0].updated_description, 1) - - def getCurrentAiNumber(self): - return len(self.ai) - 1 - -class wikiSideList(list): - def __str__(self): - output = "" - for item in self: - output = output + item.full_description + " ; " - return output - -class wikiScenario: - def __init__(self): - self.side = wikiSideList() - self.full_description = '' - self.updated_description = '' - - def parseScenario (self, scenarioContent): - self.addScenarioData(scenarioContent) - sideDetail = side_block.search(scenarioContent, 0) - while sideDetail is not None: - self.addSideData(sideDetail.group()) - self.addAiData(sideDetail.group()) - searchStart = sideDetail.end() - sideDetail = side_block.search(scenarioContent, searchStart) - self.updateAi() - - def addScenarioData(self, scenarioContent): - self.full_description = scenarioContent - self.updated_description = scenarioContent - - def addSideData(self, sideContent): - canrecruit = applySearch(sideContent, AI_CAN_RECRUIT, 'text') - if canrecruit == "0": - return - self.side.append(wikiSide()) - self.side[self.getCurrentSideNumber()].full_description = sideContent - self.side[self.getCurrentSideNumber()].updated_description = sideContent - self.side[self.getCurrentSideNumber()].side = applySearch(sideContent, AI_SIDE, 'text') - - def addAiData(self, aiContent): - self.side[self.getCurrentSideNumber()].addAiData(aiContent) - - def updateAi(self): - for side in self.side: - side.updateAi(len(self.side)) - for side in self.side: - if side.full_description != side.updated_description: - self.updated_description = self.updated_description.replace(side.full_description, side.updated_description, 1) - - def getCurrentSideNumber(self): - return len(self.side) - 1 - -class wikiScenarioList(list): - def __str__(self): - output = "" - for scenario in self: - output = output + scenario.full_description + " ; " - return output - - def addScenario(self, scenario): - self.append(scenario) - -def parseAll(dirName, fileList): - scenarioListIndex = 0 - scenarioList = wikiScenarioList() - for fileName in fileList: - if os.path.splitext(fileName)[1] != '.cfg': - continue - if os.path.isdir(os.path.join(dirName, fileName)): - continue - f = file(os.path.join(dirName, fileName)) - fileContent = f.read() - f.close() - searchStart = 0 - scenario = scenario_block.match(fileContent, searchStart) - while scenario != None: - scenarioList.addScenario(wikiScenario()) - scenarioList[scenarioListIndex].parseScenario(scenario.group(0)) - searchStart = scenario.end() - scenario = scenario_block.search(fileContent, searchStart) - scenarioListIndex += 1 - updated_file = fileContent - for scenarioItem in scenarioList: - if scenarioItem.full_description != scenarioItem.updated_description: - updated_file = updated_file.replace(scenarioItem.full_description, scenarioItem.updated_description) - if updated_file != fileContent: - (basename_out, ext_out) = os.path.splitext(fileName) - basename_out = basename_out + suffix + ext_out - f = file(basename_out,'w') - f.write(updated_file) - f.close() - -def printUsage(): - print """scoutDefault.py [-hRO] [-d directory] [-f file] [-x extension] --h : print this message --R : recursively parse directories --O : overwrite village_per_scout value in scenario --d : directory to look for file to parse --f : name of the file to parse --x : suffix to append to filename -Example of use: - ./scoutDefault.py -h - Get help - ./scoutDefault.py -f 2p_Blitz.cfg -x _new - Run the script and write output on 2p_Blitz_new.cfg - ./scoutDefault.py -d /usr/local/share/wesnoth/data/scenarios - Run the script on all file under that directory - ./scoutDefault.py -R -d /usr/local/share/wesnoth - Run the script on all directories under that directory - ./scoutDefault.py -f 2p_Blitz.cfg -O - Run the script on 2p_Blitz.cfg and delete previous value""" - -recursive = False -entryPoint = os.getcwd() -entryFile = os.listdir(os.getcwd()) -resourcesFile = {} -try: - (opts, argsProper) = getopt.getopt(sys.argv[1:], 'ROhf:d:x:v:"') -except getopt.GetoptError, e: - print 'Error parsing command-line arguments: %s' % e - printUsage() - sys.exit(1) -for (option, parameter) in opts: - if option == '-h': # Print the commandline help and exit. - printUsage() - sys.exit(0) - elif option == '-R': - recursive = True - elif option == '-O': - overwrite = False - elif option == '-d': - if not os.path.exists(parameter): - print 'Error: %s directory does not exist' % parameter - sys.exit(1) - elif not os.path.isdir(parameter): - print 'Error: %s is not a directory' % parameter - sys.exit(1) - entryPoint = parameter - entryFile = os.listdir(entryPoint) - elif option == '-f': - entryFile = [] - entryFile.append(os.path.basename(parameter)) - entryPoint = os.path.dirname(parameter) - elif option == '-x': - suffix = parameter - -if recursive == True: - os.path.walk(entryPoint, parseAll, resourcesFile) -else: - parseAll(entryPoint, entryFile) -- 2.29.2 From fd386806b1abb821e8b15d21cdd73244625c9677 Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Thu, 19 Sep 2019 18:05:23 +0200 Subject: [PATCH 06/31] Removed unused journeylifter Python tool (cherry picked from commit b8965178348fab03718fb4b7fb798e8427d7cbb9) --- data/tools/journeylifter | 129 --------------------------------------- 1 file changed, 129 deletions(-) delete mode 100755 data/tools/journeylifter diff --git a/data/tools/journeylifter b/data/tools/journeylifter deleted file mode 100755 index a6e47d9548e..00000000000 --- a/data/tools/journeylifter +++ /dev/null @@ -1,129 +0,0 @@ -#!/usr/bin/env python2 - -""" -journeylifter -- turn in-line track markers into a journey file. - -Run in the scenario directory of a campaign. Generates a journey.cfg and -rewrites the files to reference it. - -All mainline campaigns have already undergone this conversion; this script -may be helpful with UMC. - -Assumes any existing journey.cfg is generated and removes it. -Assumes wmllint has been run, converting DOT/CROSS macros to the new form. -Assumes the scenario's filenames sort in the order they present. -Assumes there is only one continuous span of journey markers per file. -""" - -import os, sys, re, getopt, shutil - -top = "." -if __name__ == "__main__": - (options, arguments) = getopt.getopt(sys.argv[1:], "-d?hrc", [ - 'directory', - 'help', - 'revert', - 'clean', - ]) - revert = clean = False - for (opt, val) in options: - if opt in ('-d', '--directory'): - top = val - elif opt in ('-?', '-h', '-"# trackplacer: tracks begin\n'): - print __doc__ - sys.exit(0) - elif opt in ('-r', '--revert'): - revert = True - elif opt in ('-c', '--clean'): - clean = True - - os.chdir(top) - try: - os.remove("journey.cfg") - except OSError: - pass - files = filter(lambda x: x.endswith(".cfg"), os.listdir(".")) - files.sort() - if revert: - for name in files: - if os.path.exists(name + ".bak"): - os.rename(name + ".bak", name) - elif clean: - for name in files: - if os.path.exists(name + ".bak"): - os.remove(name + ".bak") - else: - jfp = open("journey.cfg", "w") - jfp.write("# trackplacer: tracks begin\n\n") - old_waypoint_re = re.compile("{OLD_[A-Z]+ +([0-9]+) +([0-9]+)}") - new_waypoint_re = re.compile("{NEW_[A-Z]+ +([0-9]+) +([0-9]+)}") - scenario_id_re = re.compile('id=[0-9]*_?"?([^"\n]*)') - background_re = re.compile('background=.*') - background = None - mapfile = {} - out = {} - n = 0 - id_list = [] - indent = 8 - - for name in files: - out[name] = [] - lineno = 0 - scenario_id = None - inside = False - for line in open(name): - lineno += 1 - if re.search(old_waypoint_re, line): - continue - elif re.search(new_waypoint_re, line): - indent = 0 - while line[indent] == ' ': - indent += 1 - if not inside: - inside = True - n += 1 - jfp.write("#define JOURNEY_STAGE%d\n" % n) - jfp.write(" # from %s, line %d\n" % (name, lineno)) - jfp.write(" " + line.lstrip()) - elif re.search(background_re, line): - mapfile["JOURNEY_STAGE%d" % (n+1,)] = (line, name, lineno) - out[name].append(line) - else: - if inside: - inside = False - jfp.write("#enddef\n\n") - out[name].append((" " * indent) + "{TO_%s}\n" % scenario_id) - id_list.append((scenario_id, name)) - out[name].append(line) - if not scenario_id: - m = scenario_id_re.search(line) - if m: - scenario_id = m.group(1).upper() - # Now edit out background lines in relevant [parts]s - for (line, name, lineno) in mapfile.values(): - i = lineno - while True: - if '[part]' in out[name][i]: - break - if 'background' in out[name][i]: - out[name] = out[name][:i] + out[name][i+1:] - break - i -= 1 - # Done processing individual files, now write the journey postamble - jfp.write("# trackplacer: tracks end\n") - jfp.write("# wmllint: no translatables\n\n") - n = 0 - for (scenario_id, name) in id_list: - n += 1 - jfp.write("#define TO_%s\n" % scenario_id) - jfp.write(" # from %s\n" % name) - segment = "JOURNEY_STAGE%d" % n - if segment in mapfile: - (line, name, lineno) = mapfile[segment] - jfp.write(" " + line.lstrip()) - jfp.write(" {%s}\n" % segment) - jfp.write("#enddef\n\n") - jfp.close() - for name in files: - os.rename(name, name + ".bak") - open(name, "w").writelines(out[name]) -- 2.29.2 From 1f29f9fe60f32d3c216adab5da3f16274b8b55c7 Mon Sep 17 00:00:00 2001 From: Steve Cotton Date: Thu, 17 Oct 2019 22:25:38 +0200 Subject: [PATCH 07/31] Remove journeylifter from data/tools/README.md Of the removed tools, it's the only one listed in this readme. The removal was b8965178348fab03718fb4b7fb798e8427d7cbb9. (cherry picked from commit 4c48defdd0ddfede7e4a224de51c5b5d8c46f204) --- data/tools/README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/data/tools/README.md b/data/tools/README.md index 6a1e833438b..e29f653e3e5 100644 --- a/data/tools/README.md +++ b/data/tools/README.md @@ -4,11 +4,6 @@ also belong here. Other utils are in utils/. == Scripts == -=== journeylifter === - -A program for converting campaigns to use trackplacer-format journey files. -All mainline campaigns have already been converted; this is for lifting UMC. - === rmtrans === Remove nearly transparent pixels from images using GIMP. It currently affects -- 2.29.2 From 66e6b3e7ae6db2cb0b9d54f5072cc1b2c5c0205a Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Fri, 20 Sep 2019 22:01:31 +0200 Subject: [PATCH 08/31] Ported wmlflip to Python 3 (cherry picked from commit 1fe32ff69d28f3bbefde68f6b4eae848576d35c0) --- data/tools/wmlflip | 77 +++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 39 deletions(-) diff --git a/data/tools/wmlflip b/data/tools/wmlflip index 4b673f0a0db..6f6e5024388 100755 --- a/data/tools/wmlflip +++ b/data/tools/wmlflip @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ wmlflip -- coordinate transformation for .cfg macro calls. @@ -34,8 +34,8 @@ options to pacify the argument parser. More transformations would be easy to write. """ -import sys, os, time, getopt, cStringIO, re -from wesnoth.wmltools import * +import sys, os, time, getopt, io, re +from wesnoth.wmltools3 import * class ParseArgs: "Mine macro argument locations out of a .cfg file." @@ -56,20 +56,20 @@ class ParseArgs: return self.fp.read(1) def ungetchar(self, c): if verbose: - print "pushing back", c + print("pushing back", c) self.pushback = c def parse_until(self, enders): "Parse until we reach specified terminator." if self.verbose: self.lead += "*" - print self.lead + " parse_until(%s) starts" % enders + print(self.lead + " parse_until(%s) starts" % enders) while True: c = self.getchar() if self.verbose: - print self.lead + "I see", c + print(self.lead + "I see", c) if c in enders: if self.verbose: - print self.lead + "parse_until(%s) ends" % enders + print(self.lead + "parse_until(%s) ends" % enders) self.lead = self.lead[:-1] return c elif c == '{': @@ -78,7 +78,7 @@ class ParseArgs: "We see a start of call." if self.verbose: self.lead += "*" - print self.lead + "parse_call()" + print(self.lead + "parse_call()") self.namestack.append(["", []]) # Fill in the name of the called macro while True: @@ -88,12 +88,12 @@ class ParseArgs: else: break if self.verbose: - print self.lead + "name", self.namestack[-1] + print(self.lead + "name", self.namestack[-1]) # Discard if no arguments if c == '}': self.namestack.pop() if self.verbose: - print self.lead + "parse_call() ends" + print(self.lead + "parse_call() ends") self.lead = self.lead[:-1] return # If non-space, this is something like a filename include; @@ -103,7 +103,7 @@ class ParseArgs: c = self.getchar() if c == '}': if self.verbose: - print self.lead + "parse_call() ends" + print(self.lead + "parse_call() ends") self.lead = self.lead[:-1] return # It's a macro call with arguments; @@ -115,21 +115,21 @@ class ParseArgs: # Record the scope we just parsed self.parsed.append(self.namestack.pop()) if self.verbose: - print self.lead + "parse_call() ends" + print(self.lead + "parse_call() ends") self.lead = self.lead[:-1] def parse_actual(self): "Parse an actual argument." # Skip leading whitespace if self.verbose: self.lead += "*" - print self.lead + "parse_actual() begins" + print(self.lead + "parse_actual() begins") while True: c = self.getchar() if not c.isspace(): break if c == '}': if self.verbose: - print "** parse_actual() returns False" + print("** parse_actual() returns False") self.lead = self.lead[:-1] return False # Looks like we have a real argument @@ -146,7 +146,7 @@ class ParseArgs: argend = self.fp.tell() elif c == '"': if verbose: - print self.lead + "starting string argument" + print(self.lead + "starting string argument") self.parse_until(['"']) argend = self.fp.tell() else: @@ -155,7 +155,7 @@ class ParseArgs: self.ungetchar(ender) self.namestack[-1][1].append((argstart, argend)) if self.verbose: - print self.lead + "parse_actual() returns True" + print(self.lead + "parse_actual() returns True") self.lead = self.lead[:-1] return True @@ -186,15 +186,14 @@ def relevant_macros(): def transformables(filename, relevant, verbose): "Return a list of transformable (X,Y) regions in the specified file." # Grab the content - fp = open(filename, "r") - content = fp.read() - fp.close() + with open(filename, "r") as fp: + content = fp.read() # Get argument offsets from it. - calls = ParseArgs(cStringIO.StringIO(content), verbose) + calls = ParseArgs(io.StringIO(content), verbose) # Filter out irrelevant calls. - parsed = filter(lambda x: x[0] in relevant, calls.parsed) + parsed = [x for x in calls.parsed if x[0] in relevant] # Extract coordinate pair locations from macro arguments. pairs = [] @@ -215,7 +214,7 @@ def transformables(filename, relevant, verbose): # Sort by start of the x coordinate, then reverse the list, # so later changes won't screw up earlier ones. # Presumes that coordinate pairs are never interleaved. - pairs.sort(lambda p, q: cmp(p[0][0], q[0][0])) + pairs.sort(key=lambda element: element[0][0]) pairs.reverse() # Return the file content as a string and the transformable extents in it. @@ -224,12 +223,13 @@ def transformables(filename, relevant, verbose): def mapsize(filename): "Return the size of a specified mapfile." x = y = 0 - for line in open(filename): - if "," in line: - y += 1 - nx = line.count(",") + 1 - assert(x == 0 or x == nx) - x = nx + with open(filename) as f: + for line in f: + if "," in line: + y += 1 + nx = line.count(",") + 1 + assert(x == 0 or x == nx) + x = nx return (x, y) if __name__ == '__main__': @@ -237,7 +237,7 @@ if __name__ == '__main__': verbose = 0 mapfile = None translate = False - (options, arguments) = getopt.getopt(sys.argv[1:], "m:txyv") + (options, arguments) = getopt.getopt(sys.argv[1:], "m:txyvh") for (switch, val) in options: if switch in ('-h', '--help'): @@ -250,23 +250,23 @@ if __name__ == '__main__': elif switch in ('-x'): flip_x = True elif switch in ('-y'): - print >>sys.stderr, "Vertical flip is not yet supported." + print("Vertical flip is not yet supported.", file=sys.stderr) sys.exit(0) elif switch == '-v': verbose += 1 if verbose: - print "Debugging output enabled." + print("Debugging output enabled.") if mapfile: (mx, my) = mapsize(mapfile) - print >>sys.stderr, "%s is %d wide by %d high" % (mapfile, mx, my) + print("%s is %d wide by %d high" % (mapfile, mx, my), file=sys.stderr) if arguments and not flip_x and not translate: - print >>sys.stderr, "No coordinate transform is specified." + print("No coordinate transform is specified.", file=sys.stderr) sys.exit(0) if flip_x and not mapfile: - print >>sys.stderr, "X flip transformation needs to know the map size.." + print("X flip transformation needs to know the map size..", file=sys.stderr) sys.exit(0) if translate: @@ -279,7 +279,7 @@ if __name__ == '__main__': # For each file named on the command line... for filename in arguments: if verbose: - print >>sys.stderr, "Processing file", filename + print("Processing file", filename, file=sys.stderr) (content, pairs) = transformables(filename, relevant, verbose > 1) @@ -310,13 +310,12 @@ if __name__ == '__main__': # This is generic again target.append((xn, yn)) if verbose: - print "(%d, %d) -> (%d, %d)" % (x, y, xn, yn) + print("(%d, %d) -> (%d, %d)" % (x, y, xn, yn)) # Perform the actual transformation for (((xs, xe), (ys, ye)), (xn, yn)) in zip(pairs, target): content = content[:ys] + repr(yn) + content[ye:] content = content[:xs] + repr(xn) + content[xe:] - fp = open(filename, "w") - fp.write(content) - fp.close() + with open(filename, "w") as fp: + fp.write(content) -- 2.29.2 From e493f628be6f334b10c3b6ccd64840ee6bed113c Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Mon, 23 Sep 2019 10:50:55 +0200 Subject: [PATCH 09/31] Updated the shebang of two Python 3 scripts (cherry picked from commit b4d855caadd230edc24048591c534028dbd30b51) --- utils/codelist | 2 +- utils/po_stat.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/codelist b/utils/codelist index 4941ad66543..4dc6ad5662a 100755 --- a/utils/codelist +++ b/utils/codelist @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # codelist # given list of integers, one per line, outputs a minimal list of ranges # describing the list diff --git a/utils/po_stat.py b/utils/po_stat.py index 599ca4f7033..da1df187f13 100755 --- a/utils/po_stat.py +++ b/utils/po_stat.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 import sys -- 2.29.2 From 64c1043117e05b1c7e01a4ab5d937405cd9f83d9 Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Mon, 23 Sep 2019 10:52:55 +0200 Subject: [PATCH 10/31] Removed utils/wescamp_import script (cherry picked from commit dbe9d60c1f174d911fb70a1f9f8af46133affc9b) --- utils/wescamp_import | 120 ------------------------------------------- 1 file changed, 120 deletions(-) delete mode 100755 utils/wescamp_import diff --git a/utils/wescamp_import b/utils/wescamp_import deleted file mode 100755 index 1cad20e1c9b..00000000000 --- a/utils/wescamp_import +++ /dev/null @@ -1,120 +0,0 @@ -#!/usr/bin/env python2 -# -# wescamp_import -- generate a shellscript to import a campaign from WesCamp -# -# Pipe the output of this command to sh to actually perform the operation -# -import sys, os, getopt, shutil - -if __name__ == "__main__": - def help(): - sys.stderr.write("""\ -Usage: wescamp_import {-h | wescamp-path campaign-name textdomain} - Options may be any of these: - -h, --help Emit this help message and quit - Requires as first argument a path to a WesCamp checkout - Requires as second argument a campaign name. - Call from the top level directory of mainline. -""") - - - # Process options - (options, arguments) = getopt.getopt(sys.argv[1:], "h", ['help',]) - for (switch, val) in options: - if switch in ('-h', '--help'): - help() - sys.exit(0) - if len(arguments) == 0: - sys.stderr.write("wescamp_import: a path to a local WesCamp checkout is required.\n") - sys.exit(1) - else: - wescamp_path = arguments[0] - if len(arguments) == 1: - sys.stderr.write("wescamp_import: a campaign name is required.\n") - sys.exit(1) - else: - campaign = arguments[1] - if len(arguments) == 2: - sys.stderr.write("wescamp_import: a text domain name is required.\n") - sys.exit(1) - else: - textdomain = arguments[2] - - print """\ -# Generated script to do an import of %(textdomain)s from a WesCamp checkout -# -mkdir po/%(textdomain)s -cp %(wescamp_path)s/%(campaign)s/po/*.po* po/%(textdomain)s - -cp po/wesnoth/LINGUAS po/%(textdomain)s/ - -cat > po/%(textdomain)s/FINDCFG <<'EOF' -find data/campaigns/%(campaign)s -name '*.cfg' -print -find data/campaigns/%(campaign)s -name '*.lua' -print -EOF - -cat > po/%(textdomain)s/Makevars <<'EOF' -# Makefile variables for PO directory in any package using GNU gettext. - -# Usually the message domain is the same as the package name. -DOMAIN = %(textdomain)s - -# These two variables depend on the location of this directory. -subdir = po/$(DOMAIN) -top_builddir = ../.. - -# These options get passed to xgettext. -XGETTEXT_OPTIONS = --from-code=UTF-8 --sort-by-file --keyword=sgettext --keyword=vgettext --keyword=_n:1,2 --keyword=sngettext:1,2 --keyword=vngettext:1,2 - -# This is the copyright holder that gets inserted into the header of the -# $(DOMAIN).pot file. Set this to the copyright holder of the surrounding -# package. (Note that the msgstr strings, extracted from the package's -# sources, belong to the copyright holder of the package.) Translators are -# expected to transfer the copyright for their translations to this person -# or entity, or to disclaim their copyright. The empty string stands for -# the public domain; in this case the translators are expected to disclaim -# their copyright. -COPYRIGHT_HOLDER = Wesnoth development team - -# This is the email address or URL to which the translators shall report -# bugs in the untranslated strings: -# - Strings which are not entire sentences, see the maintainer guidelines -# in the GNU gettext documentation, section 'Preparing Strings'. -# - Strings which use unclear terms or require additional context to be -# understood. -# - Strings which make invalid assumptions about notation of date, time or -# money. -# - Pluralisation problems. -# - Incorrect English spelling. -# - Incorrect formatting. -# It can be your email address, or a mailing list address where translators -# can write to without being subscribed, or the URL of a web page through -# which the translators can contact you. -MSGID_BUGS_ADDRESS =https://bugs.wesnoth.org/ - -# This is the list of locale categories, beyond LC_MESSAGES, for which the -# message catalogs shall be used. It is usually empty. -EXTRA_LOCALE_CATEGORIES = -EOF - -touch po/%(textdomain)s/POTFILES.in - -ed po/Makefile.am <<'EOF' -/^SUBDIRS/s/$/ %(textdomain)s/ -wq -EOF - -# Warning: this depends on m4/Makefile being in the list of generated files -ed configure.ac <<'EOF' -/m4\/Makefile/-1a -po/%(textdomain)s/Makefile.in -. -wq -EOF - -git add po/%(textdomain)s - -# To be able to review the commit before actually doing it -# recommended commit-msg: Wescamp import -git commit configure.ac po/Makefile.am po/%(textdomain)s -""" % locals() -- 2.29.2 From e8bcdaef155fb8d7245b438a9cf84409f809a0db Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Sat, 28 Sep 2019 10:59:45 +0200 Subject: [PATCH 11/31] Removed broken wmlvalidator Python script (cherry picked from commit 5654b4c929a30d8b0ee6609f2ca1085f4a3b1d1a) --- data/tools/wesnoth/wmlgrammar.py | 156 ----------------- data/tools/wmlvalidator | 282 ------------------------------- 2 files changed, 438 deletions(-) delete mode 100644 data/tools/wesnoth/wmlgrammar.py delete mode 100755 data/tools/wmlvalidator diff --git a/data/tools/wesnoth/wmlgrammar.py b/data/tools/wesnoth/wmlgrammar.py deleted file mode 100644 index 838e8b0aa2a..00000000000 --- a/data/tools/wesnoth/wmlgrammar.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python2 - -""" -wmlgrammar -- parses a given schema into a more usable form -""" -from __future__ import print_function -import collections -import re - -REQUIRED = 1 -OPTIONAL = 2 -REPEATED = 3 -FORBIDDEN = 4 - - -class Grammar(object): - def __init__(self, schema): - schema = schema.get_first("schema") - self.datatypes = {} - self.elements = {} - self.categories = collections.defaultdict(list) - for type in schema.get_all_text(): - match = parse_match(type.data) - self.datatypes.update({type.name: match}) - for element in schema.get_all_subs(): - node = Node(element, self.datatypes) - self.elements.update({node.name: node}) - for element in [el for el in self.elements.values() if el.parent]: - element.inherit(self.elements[element.parent]) - # categories - for element in [el for el in self.elements.values() if el.category]: - self.categories[element.category].append(element) - - def get_element(self, name): - return self.elements[name] - - def get_datatype(self, name): - return self.datatypes[name] - - def get_category(self, name): - return self.categories.get(name, []) - - -class Node(object): - def __init__(self, schema, datatypes): - self.name = schema.name - self.elements = set([]) - self.ext_elements = [] # Ugh, do we really want to do this? - self.attributes = set() - self.parent = None - self.description = None - self.category = None - for item in schema.get_all_text(): - if item.name[0] == '_': - self.elements.add(Element(item)) - else: - self.attributes.add(Attribute(item, datatypes)) - for item in schema.get_all_subs(): - if item.name == "element": - print("[element] found in schema, not parsing yet") - # self.ext_elements... - elif item.name == "description": - self.description = item.get_text("text") - self.category = item.get_text("category") - else: - raise Exception("Unknown element [%s] encountered in grammar for [%s]" % (item.name, self.name)) - if ':' in self.name: - self.name, self.parent = self.name.split(':', 1) - - def inherit(self, other): - assert self.parent == other.name - self.elements.update(other.elements) - self.attributes.update(other.attributes) - self.parent = None - - def get_attributes(self): - return self.attributes - - def get_elements(self): - return self.elements - - -class Element(object): - def __init__(self, schema): - first, second = schema.data.split(" ", 1) - self.name = schema.name[1:] - self.freq = parse_frequency(first) - self.subname = second - - def match(self, name): - return self.name == name - - def __hash__(self): - return hash(self.name) - - def __cmp__(self, other): - return (isinstance(other, type(self)) or isinstance(self, type(other))) and cmp(self.name, other.name) - - -class ExtElement(Element): - def __init__(self, schema): - self.re = parse_match(schema.get_text("match").data) - self.freq = parse_frequency(schema.get_text("freq").data) - self.subname = schema.get_text("name").data - - def match(self, name): - return bool(self.re.match(name)) - - -class Attribute(object): - def __init__(self, schema, datatypes): - parts = schema.data.split(" ") - if parts[1] not in datatypes: - raise Exception("Unknown datatype '%s'" % parts[1]) - self.name = schema.name - self.freq = parse_frequency(parts[0]) - self.type = parts[1] - self.optionals = parts[2:] - self.re = datatypes[parts[1]] - - def match(self, name): - return self.name == name - - def validate(self, value): - return bool(self.re.match(value)) - - def __hash__(self): - return hash(self.name) - - def __cmp__(self, other): - return (isinstance(other, type(self)) or isinstance(self, type(other))) and cmp(self.name, other.name) - - -def parse_frequency(string): - if string == "required": - return REQUIRED - elif string == "optional": - return OPTIONAL - elif string == "repeated": - return REPEATED - elif string == "forbidden": - return FORBIDDEN - else: - raise Exception("Unknown frequency '%s'" % string) - - -def parse_match(string): - (matchtype, matchtext) = string.split(" ", 1) - if matchtype == "re": - match = re.compile(matchtext) - elif matchtype == "enum": - match = re.compile("^(" + matchtext.replace(',', '|') + ")$") - else: - raise Exception("Unknown datatype encountered in %s=\"%s\": '%s'" % (type.name, type.data, matchtype)) - return match -# vim: tabstop=4: shiftwidth=4: expandtab: softtabstop=4: autoindent: diff --git a/data/tools/wmlvalidator b/data/tools/wmlvalidator deleted file mode 100755 index 06591cc50b4..00000000000 --- a/data/tools/wmlvalidator +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env python2 -""" -wmltest -- tool to validate the syntax and semantics of WML. - -Use --help to see usage. -""" -#TODO: -#-define verbosity levels better - -from __future__ import print_function -import wesnoth.wmldata as wmldata -import wesnoth.wmlparser as wmlparser -import wesnoth.wmlgrammar as wmlgrammar -import re - -def print_indent(string, depth, char=' '): - print("%s%s" % (depth * char, string)) - -class Validator: - """ - The class that takes a wmlgrammar object to validate wml trees with - """ - def __init__(self, schema, verbosity=0): - self.schema = wmlgrammar.Grammar(schema) - self.verbosity = verbosity - self.validate_result = {} - - def validate_result_add(self, from_file, line, origin, message): - if not from_file in self.validate_result: - self.validate_result[from_file] = [] - - self.validate_result[from_file].append({'line': line, 'origin': origin, 'message': message}) - - def validate_result_print(self): - normal = '\033[0m' - bold = '\033[1m' - underline = '\033[4m' - for k, v in self.validate_result.iteritems(): - print("%s%s%s" % (bold, k, normal)) - for i in v: - print("%s#%d: %s%s %s" % (underline, i['line'], i['origin'], normal, i['message'])) - - def validate(self, node, depth=0, name=None): - """ - Validate the given DataSub node. - depth indicates how deep we've recursed into the tree, - name is a mechanism for overwriting the node name to look up in the schema, used for overloaded names. - """ - if not name or name == node.name: - name = node.name - verbosename = name - else: - verbosename = "%s (%s)" % (node.name, name) - - if self.verbosity > 1: - print_indent(node.name, depth) - - try: - schema = self.schema.get_element(name) - except KeyError: - print("No valid schema found for %s" % verbosename) - return - - # Validate the attributes - for attribute in schema.get_attributes(): - matches = node.get_texts(attribute.name) - - # Check frequency - nummatches = len(matches) - if attribute.freq == wmlgrammar.REQUIRED and nummatches != 1: - self.validate_result_add(node.file, node.line, "Attribute [%s] %s" % (verbosename, attribute.name), "Should appear exactly once, not %d times" % nummatches) - elif attribute.freq == wmlgrammar.OPTIONAL and nummatches > 1: - self.validate_result_add(node.file, node.line, "Attribute [%s] %s" % (verbosename, attribute.name), "Should appear at most once, not %d times" % nummatches) - elif attribute.freq == wmlgrammar.FORBIDDEN and nummatches > 0: - self.validate_result_add(node.file, node.line, "Attribute [%s] %s" % (verbosename, attribute.name), "Should not appear. It appears %d times" % nummatches) - - # Check use - for match in matches: - if 'translatable' in attribute.optionals and match.is_translatable() == False: - self.validate_result_add(node.file, node.line, "Attribute [%s] %s" % (verbosename, attribute.name), "Value is translatable, but haven't _ at the beginning") - elif 'translatable' not in attribute.optionals and 'optional-translatable' not in attribute.optionals and match.is_translatable() == True: - self.validate_result_add(node.file, node.line, "Attribute [%s] %s" % (verbosename, attribute.name), "Value isn't translatable, but have a _ at the beginning") - - def check_attribute_value(value, pos=None): - def gerate_message_with_pos(): - if pos is None: - return "" - else: - return " (At position %d)" % pos - - if not attribute.validate(value): - self.validate_result_add(node.file, node.line, "Attribute [%s] %s%s" % (verbosename, attribute.name, gerate_message_with_pos()), "Value should be %s, found: %s" % (attribute.type, value)) - - regex_limit = re.compile(ur'^limit\((\d+.\d+|\d+),(\d+.\d+|\d+)\)$') - check_limit = [i for i in attribute.optionals if regex_limit.search(i)] - if len(check_limit): - check_limit = check_limit[0] - number_min, number_max = regex_limit.search(check_limit).groups() - - if float(value) > float(number_max) or float(value) < float(number_min): - self.validate_result_add(node.file, node.line, "Attribute [%s] %s%s" % (verbosename, attribute.name, gerate_message_with_pos()), "Value must be between %s and %s, found : %s" % (number_min, number_max, value)) - - regex_limit_lower = re.compile(ur'^limit-lower\((\d+.\d+|\d+)\)$') - check_limit_lower = [i for i in attribute.optionals if regex_limit_lower.search(i)] - if len(check_limit_lower): - check_limit_lower = check_limit_lower[0] - number = regex_limit_lower.search(check_limit_lower).group(1) - - if float(value) < float(number): - self.validate_result_add(node.file, node.line, "Attribute [%s] %s%s" % (verbosename, attribute.name, gerate_message_with_pos()), "Value needs to be at least %s, found : %s" % (number, value)) - - regex_limit_max = re.compile(ur'^limit-max\((\d+.\d+|\d+)\)$') - check_limit_max = [i for i in attribute.optionals if regex_limit_max.search(i)] - if len(check_limit_max): - check_limit_max = check_limit_max[0] - number = regex_limit_max.search(check_limit_max).group(1) - - if float(value) > float(number): - self.validate_result_add(node.file, node.line, "Attribute [%s] %s%s" % (verbosename, attribute.name, gerate_message_with_pos()), "Value needs to be at max %s, found : %s" % (number, value)) - - regex_file_exist = re.compile(ur'^need-file-in\(([\w.\-\/]+)\)$') - check_file_exist = [i for i in attribute.optionals if regex_file_exist.search(i)] - if len(check_file_exist): - check_file_exist = check_file_exist[0] - directory = regex_file_exist.search(check_file_exist).group(1) - - value_directory, value_file = re.search(re.compile(ur'(?:(.*)?\/)?(.+)'), value).groups() - - import glob - if directory == '.': - sub_directory = os.path.dirname(node.file) + '/' - else: - sub_directory = os.path.dirname(node.file) + '/' + directory + '/' - - if not value_directory is None: - sub_directory += value_directory + '/' - - files_from_sub_directory = glob.glob(sub_directory + '*') - - # We just want the names of the files from directory, but... - if os.path.splitext(value_file)[1] == '': - # ... if without extension in the value_file, then it is implied. In this case, we do want extensions - files_from_sub_directory = [re.sub(re.compile(r'^.*\/(.*)\..*'), r'\1', i) for i in files_from_sub_directory] - else: - files_from_sub_directory = [re.sub(re.compile(r'^.*\/(.*)'), r'\1', i) for i in files_from_sub_directory] - - if not value_file in files_from_sub_directory: - self.validate_result_add(node.file, node.line, "Attribute [%s] %s%s" % (verbosename, attribute.name, gerate_message_with_pos()), "The file %s not exist in directory %s" % (value_file, sub_directory)) - - if 'list' in attribute.optionals: - pos = 1 - for i in match.data.split(","): - if i[0] == ' ': i = i[1:] - check_attribute_value(i, pos=pos) - pos += 1 - else: - check_attribute_value(match.data) - node.remove(match) # Get rid of these so we can see what's left - for attribute in node.get_all_text(): - self.validate_result_add(node.file, node.line, "Attribute [%s] %s" % (verbosename, attribute.name), "Found, which has no meaning there") - - # Validate the elements - for element in schema.get_elements(): - matches = node.get_subs(element.name) - - # Check frequency - nummatches = len(matches) - if element.freq == wmlgrammar.REQUIRED and nummatches != 1: - self.validate_result_add(node.file, node.line, "Element [%s] [%s]" % (verbosename, element.name), "Should appear exactly once, not %d times" % nummatches) - elif element.freq == wmlgrammar.OPTIONAL and nummatches > 1: - self.validate_result_add(node.file, node.line, "Element [%s] [%s]" % (verbosename, element.name), "Should appear at most once, not %d times" % nummatches) - elif element.freq == wmlgrammar.FORBIDDEN and nummatches > 0: - self.validate_result_add(node.file, node.line, "Element [%s] [%s]" % (verbosename, element.name), "Should not appear. It appears %d times" % nummatches) - - # Check sub - for match in matches: - self.validate(match, depth+1, element.subname) - node.remove(match) - for element in node.get_all_subs(): - self.validate_result_add(node.file, node.line, "Element [%s] [%s]" % (verbosename, element.name), "Found, which has no meaning there") - -if __name__ == '__main__': - import argparse, subprocess, os, codecs, sys - - # Ugly hack to force the output of UTF-8. - # This prevents us from crashing when we're being verbose - # and encounter a non-ascii character. - sys.stdout = codecs.getwriter('utf-8')(sys.stdout) - - ap = argparse.ArgumentParser("Usage: %(prog)s [options]") - ap.add_argument("-p", "--path", - help = "Specify Wesnoth's data dir", - dest = "path") - ap.add_argument("-u", "--userpath", - help = "Specify user data dir", - dest = "userpath") - ap.add_argument("-s", "--schema", - help = "Specify WML schema", - dest = "schema") - ap.add_argument("-v", "--verbose", - action = "count", - dest = "verbose", - help = "Increase verbosity, 4 is the maximum.") - ap.add_argument("-D", "--define", - action = "append", - dest = "defines", - default = [], - help = "Define (empty) preprocessor macros, for campaign/multiplayer inclusion.") - ap.add_argument("filename", - nargs = "*", - help = "Files to validate or directory. If it is a directory, get all the cfg files from directory") - args = ap.parse_args() - if args.path: - args.path = os.path.expanduser(args.path) - if args.userpath: - args.userpath = os.path.expanduser(args.userpath) - if args.filename: - args.filename = [os.path.expanduser(i) for i in args.filename] - - if not args.path: - try: - p = subprocess.Popen(["wesnoth", "--path"], stdout = subprocess.PIPE) - path = p.stdout.read().strip() - args.path = os.path.join(path, "data") - sys.stderr.write("No Wesnoth path given.\nAutomatically found '%s'\n" % (args.path, ) ) - except OSError: - args.path = '.' - sys.stderr.write("Could not determine Wesnoth path.\nAssuming '%s'\n" % (args.path, ) ) - - list_files_analyze = [] - if len(args.filename) < 1: - list_files_analyze.append(os.path.join(args.path, '_main.cfg')) - - for i in args.filename: - if os.path.isdir(i): - if i[-1] != '/': - i += '/' - cfg_from_dir = [i + cfg for cfg in os.listdir(i) if cfg[-3:] == 'cfg'] - list_files_analyze += cfg_from_dir - else: - list_files_analyze.append(i) - - if args.verbose > 1: - print("Args: %s\n"% (args, )) - - if not args.schema: - args.schema = os.path.join(args.path, 'schema.cfg') - - # Parse the schema - parser = wmlparser.Parser(args.path) - - if args.verbose > 3: - parser.verbose = True - parser.parse_file(args.schema) - - schema = wmldata.DataSub("schema") - parser.parse_top(schema) - - # Construct the validator - validator = Validator(schema, args.verbose) - - # Parse the WML - parser = wmlparser.Parser(args.path, args.userpath) - - if args.verbose > 3: - parser.verbose = True - - if args.userpath: - parser.parse_text("{~add-ons}") - for file in list_files_analyze: - parser.parse_file(file) - for macro in args.defines: - parser.parse_text("#define %s \n#enddef" % (macro, ) ) - - data = wmldata.DataSub("root") - parser.parse_top(data) - - # Validate - validator.validate(data) - validator.validate_result_print() - -# vim: tabstop=4: shiftwidth=4: expandtab: softtabstop=4: autoindent: -- 2.29.2 From 1b7bf83a0a61bbf80390c2c64748d80672defc7b Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Sun, 29 Sep 2019 11:06:11 +0200 Subject: [PATCH 12/31] Ported umc_dev/build/update_version to Python 3 (cherry picked from commit af64a10d50640937a6379904f8be33327225cae0) --- utils/umc_dev/build/update_version | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) mode change 100644 => 100755 utils/umc_dev/build/update_version diff --git a/utils/umc_dev/build/update_version b/utils/umc_dev/build/update_version old mode 100644 new mode 100755 index ff3fa86dd75..22669589b08 --- a/utils/umc_dev/build/update_version +++ b/utils/umc_dev/build/update_version @@ -1,25 +1,27 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import os, sys, shutil base_path = os.path.dirname(os.path.realpath(__file__)) -usage = "usage: update_version current_version new_version \ne.g.: update_version 2.0.0 2.0.1" -files = ["org.wesnoth.dependencies.feature/feature.xml", "org.wesnoth.feature/category.xml", "org.wesnoth.feature/feature.xml", -"org.wesnoth.ui/META-INF/MANIFEST.MF","org.wesnoth/META-INF/MANIFEST.MF", "org.wesnoth/org.wesnoth.product"] +usage = """usage: update_version current_version new_version +e.g.: update_version 2.0.0 2.0.1""" +files = ["org.wesnoth.dependencies.feature/feature.xml", + "org.wesnoth.feature/category.xml", + "org.wesnoth.feature/feature.xml", + "org.wesnoth.ui/META-INF/MANIFEST.MF", + "org.wesnoth/META-INF/MANIFEST.MF", + "org.wesnoth/org.wesnoth.product"] if len(sys.argv) < 3: - print usage + print(usage) else: - print "Replacing version ", sys.argv[1], " with ", sys.argv[2], "..." + print("Replacing version ", sys.argv[1], " with ", sys.argv[2], "...") stext = sys.argv[1] + ".qualifier" rtext = sys.argv[2] + ".qualifier" for file in files: sourcePath = os.path.join(os.path.join(base_path, ".."), file) targetPath = os.path.join(os.path.join(base_path, ".."), file + ".tmp") - print "Processing: ", sourcePath - input = open(sourcePath) - output = open(targetPath, "wb") - for s in input.xreadlines(): - output.write(s.replace(stext, rtext)) - input.close() - output.close() + print("Processing: ", sourcePath) + with open(sourcePath) as infile, open(targetPath, "w") as outfile: + for line in infile: + outfile.write(line.replace(stext, rtext)) shutil.move (targetPath, sourcePath) -- 2.29.2 From 0bae0a525e05a495eb310451c772fa2b55a52d4f Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Sat, 28 Sep 2019 11:50:30 +0200 Subject: [PATCH 13/31] Ported wmlparser to Python 3 (cherry picked from commit a830561dd03bc7dd40e67968c31c2699dbb3bc27) --- data/tools/wesnoth/wmldata.py | 42 ++++++++++------------------ data/tools/wesnoth/wmlparser.py | 49 +++++++++++++++++---------------- 2 files changed, 39 insertions(+), 52 deletions(-) diff --git a/data/tools/wesnoth/wmldata.py b/data/tools/wesnoth/wmldata.py index b42f26cbdf6..88a91089009 100755 --- a/data/tools/wesnoth/wmldata.py +++ b/data/tools/wesnoth/wmldata.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # encoding: utf-8 """ @@ -15,9 +15,7 @@ textdomain stuff in here is therefore only useful to CampGen, as that does not allow composed strings like above. """ -from __future__ import print_function import re, sys -import wmlparser import codecs class Data: @@ -45,13 +43,12 @@ class Data: if write: # The input usually is utf8, but it also may be not - for example # if a .png image is transmitted over WML. As this is only for - # display purposes, we ask Python to replace any garbage - and then - # re-encode as utf8 for console output. - text = result.decode("utf8", "replace") - text = text.encode("utf8", "replace") - sys.stdout.write(text) - sys.stdout.write("\n") - + # display purposes, we ask Python to replace any garbage. + if isinstance(result, bytes): + text = result.decode("utf8", "replace") + else: + text = result + print(text) else: return result def copy(self): @@ -273,14 +270,14 @@ class DataSub(Data): if ifdef: result.append("#endif\n") - bytes = "" + output = b"" for r in result: if r is not None: # For networking, we need actual bytestream here, not unicode. - if type(r) is unicode: r = r.encode("utf8") - bytes += str(r) + if type(r) is str: r = r.encode("utf8") + output += bytes(r) - return bytes + return output def is_empty(self): return len(self.data) == 0 @@ -369,7 +366,7 @@ class DataSub(Data): def compare(self, other): if len(self.data) != len(other.data): return False - for i in xrange(self.data): + for i in range(self.data): if not self.data[i].compare(other.data[i]): return False return True @@ -592,7 +589,7 @@ class DataSub(Data): value = str(value) # read existing values q = [] - for d in xrange(3): + for d in range(3): q += [self.get_quantity(name, d, value)] q[difficulty] = value @@ -605,7 +602,7 @@ class DataSub(Data): if q[0] == q[1] == q[2]: self.set_text_val(name, value) else: - for d in xrange(3): + for d in range(3): ifdef = self.get_or_create_ifdef(["EASY", "NORMAL", "HARD"][d]) ifdef.set_text_val(name, q[d]) @@ -642,14 +639,3 @@ class DataIfDef(DataSub): copy = DataSub.copy(self) copy.type = self.type return copy - -def read_file(filename, root_name = "WML"): - """ - Read in a file from disk and return a WML data object, with the WML in the - file placed under an entry with the name root_name. - """ - parser = wmlparser.Parser(None) - parser.parse_file(filename) - data = DataSub(root_name) - parser.parse_top(data) - return data diff --git a/data/tools/wesnoth/wmlparser.py b/data/tools/wesnoth/wmlparser.py index a6554e4f48e..597cd9d0ab2 100755 --- a/data/tools/wesnoth/wmlparser.py +++ b/data/tools/wesnoth/wmlparser.py @@ -1,6 +1,11 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # encoding: utf-8 -import wmldata, os, glob, sys + +try: + from . import wmldata +except ImportError: + import wmldata +import os, glob, sys import re """ @@ -19,7 +24,7 @@ class Error(Exception): def __init__(self, parser, text): self.text = "%s:%d: %s" % (parser.filename, parser.line, text) - for i in xrange(len(parser.texts)): + for i in range(len(parser.texts)): parent = parser.texts[-1 - i] self.text += "\n " + " " * i + "from %s:%d" % (parent.filename, parent.line) @@ -97,15 +102,14 @@ class Parser: line endings. """ try: - text = file(filename).read() + with open(filename, encoding="utf8") as f: + text = f.read() + except UnicodeDecodeError: + with open(filename, encoding="latin1") as f: + text = f.read() except IOError: sys.stderr.write("Cannot open file %s!\n" % filename) return "" - try: - u = text.decode("utf8") - except UnicodeDecodeError: - u = text.decode("latin1") - text = u text = text.replace("\r\n", "\n").replace("\t", " ").replace("\r", "\n") if text == "" or text[-1] != "\n": text += "\n" @@ -644,7 +648,7 @@ class Parser: values += [value] data = [] - for i in xrange(len(variables)): + for i in range(len(variables)): try: key = wmldata.DataText(variables[i], values[i], translatable) key.set_meta(filename, line) @@ -822,7 +826,7 @@ Convert a DataSub into JSON If verbose, insert a linebreak after every brace and comma (put every item on its own line), otherwise, condense everything into a single line. """ - print "{", + print("{", end=" ") first = True sdepth1 = "\n" + " " * depth sdepth2 = sdepth1 + " " @@ -833,11 +837,11 @@ If verbose, insert a linebreak after every brace and comma (put every item on it sys.stdout.write(",") if verbose: sys.stdout.write(sdepth2) - print'"%s":' % child.name, + print('"%s":' % child.name, end=" ") if child.get_type() == "DataSub": jsonify(child, verbose, depth + 1) else: - print json.dumps(child.get_value()), + print(json.dumps(child.get_value()), end=" ") if verbose: sys.stdout.write(sdepth1) sys.stdout.write("}") @@ -849,16 +853,16 @@ def xmlify(tree, verbose=False, depth=0): sdepth = " " * depth for child in tree.children(): if child.get_type() == "DataSub": - print '%s<%s>' % (sdepth, child.name) + print('%s<%s>' % (sdepth, child.name)) xmlify(child, verbose, depth + 1) - print '%s' % (sdepth, child.name) + print('%s' % (sdepth, child.name)) else: if "\n" in child.get_value() or "\r" in child.get_value(): - print sdepth + '<' + child.name + '>' + \ - '' + '' + print(sdepth + '<' + child.name + '>' + \ + '' + '') else: - print sdepth + '<' + child.name + '>' + \ - escape(child.get_value()) + '' + print(sdepth + '<' + child.name + '>' + \ + escape(child.get_value()) + '') if __name__ == "__main__": import argparse, subprocess @@ -866,9 +870,6 @@ if __name__ == "__main__": except ImportError: pass else: psyco.full() - # Hack to make us not crash when we encounter characters that aren't ASCII - sys.stdout = __import__("codecs").getwriter('utf-8')(sys.stdout) - argumentparser = argparse.ArgumentParser("usage: %(prog)s [options]") argumentparser.add_argument("-p", "--path", help = "specify wesnoth data path") argumentparser.add_argument("-C", "--color", action = "store_true", @@ -893,7 +894,7 @@ if __name__ == "__main__": path = args.path else: try: - p = subprocess.Popen(["wesnoth", "--path"], stdout = subprocess.PIPE) + p = subprocess.Popen(["wesnoth", "--data-path"], stdout = subprocess.PIPE, encoding="utf8") path = p.stdout.read().strip() path = os.path.join(path, "data") except OSError: @@ -907,7 +908,7 @@ if __name__ == "__main__": if args.verbose: wmlparser.verbose = True def gt(domain, x): - print "gettext: '%s' '%s'" % (domain, x) + print("gettext: '%s' '%s'" % (domain, x)) return x wmlparser.gettext = gt -- 2.29.2 From 01c72117acc183e007ace3da2ecbf7baf7c870c1 Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Sun, 29 Sep 2019 11:07:59 +0200 Subject: [PATCH 14/31] Ported wiki_grabber to Python 3 (cherry picked from commit 4f667254c9af792147fa0740e5944d21b7e79095) --- utils/wiki_grabber.py | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/utils/wiki_grabber.py b/utils/wiki_grabber.py index 9a64e4dabc0..ee633b1bc36 100755 --- a/utils/wiki_grabber.py +++ b/utils/wiki_grabber.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # vim: tabstop=4: shiftwidth=4: expandtab: softtabstop=4: autoindent: """ @@ -18,18 +18,12 @@ page which can be used in the wiki. [1] https://wesnoth.org/wiki/Wiki_grabber """ -from __future__ import with_statement # For python < 2.6 import operator import os import sys import re - -try: - import argparse -except ImportError: - print 'Please install argparse by running "easy_install argparse"' - sys.exit(1) +import argparse if __name__ == "__main__": # setup and parse command line arguments @@ -132,7 +126,12 @@ if __name__ == "__main__": page = get_value(data, "@page") order = get_value(data, "@order") if order is None: - order = 10000 + # in Python 2, this was an int with value 10000 + # when compared to strings, ints were always lower + # in Python 3 such comparison raises an error + # for this reason, order is now an empty strings + # when compared with other strings, empty ones are always lower + order = "" return [page, order] @@ -501,15 +500,15 @@ if __name__ == "__main__": header = process_header(res[0][0]) body = process_body(res[0][1]) else: - print "File: " + current_file - print "Block:\n" + current_block - print "\n\nInvalid wiki block, discarded." + print("File: " + current_file) + print("Block:\n" + current_block) + print("\n\nInvalid wiki block, discarded.") return if not header[0]: - print "File: " + current_file - print "Block:\n" + current_block - print "\n\nNo page defined, dropped." + print("File: " + current_file) + print("Block:\n" + current_block) + print("\n\nNo page defined, dropped.") return if not header[0] in file_map: @@ -520,7 +519,7 @@ if __name__ == "__main__": def create_output(): """Generates the output""" - for file, data_list in file_map.iteritems(): + for file, data_list in file_map.items(): data_list.sort(key=operator.itemgetter(0)) with open(os.path.join(output_directory, file), "w") as fd: for i in data_list: @@ -571,7 +570,7 @@ if __name__ == "__main__": global macro_map if not macro.group(1) in macro_map: - print "Macro '%s' is not defined." % macro.group(1) + print("Macro '%s' is not defined." % macro.group(1)) return macro.group(0) return macro_map[macro.group(1)] @@ -586,7 +585,7 @@ if __name__ == "__main__": return data def create_macro_old(macro): - print "Found old style macro '%s'" % macro.group(1) + print("Found old style macro '%s'" % macro.group(1)) create_macro(macro) def create_macro(macro): @@ -595,7 +594,7 @@ if __name__ == "__main__": global macro_map if macro.group(1) in macro_map: - print "Macro '%s' is being redefined." % macro.group(1) + print("Macro '%s' is being redefined." % macro.group(1)) macro_map[macro.group(1)] = macro.group(2) -- 2.29.2 From 5e5ba5a70145715ebbfe5e401fff3a2339d032d5 Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Tue, 1 Oct 2019 11:36:35 +0200 Subject: [PATCH 15/31] Ported ai_test to Python 3 (cherry picked from commit ffc5edd3fe0f46762f3addf5b5af60c4a14abd0f) --- utils/ai_test/ai_test.cfg | 4 +-- utils/ai_test/ai_test.py | 64 +++++++++++++++++++-------------------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/utils/ai_test/ai_test.cfg b/utils/ai_test/ai_test.cfg index c0569ca8523..bfb71114227 100644 --- a/utils/ai_test/ai_test.cfg +++ b/utils/ai_test/ai_test.cfg @@ -7,7 +7,7 @@ title = Untitled -path_to_wesnoth_binary = ../../cmake_build_dir/wesnoth +path_to_wesnoth_binary = ../../wesnoth # this is the first parts of arguments @@ -37,7 +37,7 @@ repeat = 1 # of the AI's cfg-file: # {core/macros} ai_config1 = ai/ais/ai_default_rca.cfg -ai_config2 = ai/dev/idle_ai.cfg +ai_config2 = ai/ais/idle_ai.cfg # leave empty for random fractions diff --git a/utils/ai_test/ai_test.py b/utils/ai_test/ai_test.py index 26445cce501..97fda324a87 100755 --- a/utils/ai_test/ai_test.py +++ b/utils/ai_test/ai_test.py @@ -1,9 +1,9 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 from subprocess import Popen, PIPE from time import clock, time import datetime import sqlite3 -import ConfigParser +import configparser import os import string import random @@ -48,7 +48,7 @@ def construct_command_line(cfg, test, switched_side): repeats = cfg.getint('default', 'repeat') repeats_param = '--multiplayer-repeat ' + str(repeats) if repeats > 1: - print 'Be patient, ' + str(repeats) + ' repeats are going to take a while.' + print('Be patient, ' + str(repeats) + ' repeats are going to take a while.') side1 = test.ai_config1 if not switched_side else test.ai_config2 side2 = test.ai_config2 if not switched_side else test.ai_config1 @@ -82,7 +82,7 @@ def do_filter(str, substring): def run_game(cfg, test, switched_side): command_line = construct_command_line(cfg, test, switched_side) - print 'Running: ' + command_line + print('Running: ' + command_line) game_results = [] game_result = None @@ -90,7 +90,7 @@ def run_game(cfg, test, switched_side): faction2 = '' debugout = '' - p = Popen(command_line, shell=True, bufsize=10000000, stdout=PIPE, stderr=PIPE) + p = Popen(command_line, shell=True, bufsize=10000000, stdout=PIPE, stderr=PIPE, encoding="utf8") for line in p.stderr: l = filter_non_printable(line.strip()) @@ -111,7 +111,7 @@ def run_game(cfg, test, switched_side): test.version_string = s continue - n, s = do_filter(l , 'info ai/testing: AI_IDENTIFIER1:') + n, s = do_filter(l , 'info ai/testing: AI_IDENTIFIER 1:') if(n > -1): if switched_side: test.ai_ident2 = s @@ -126,7 +126,7 @@ def run_game(cfg, test, switched_side): game_result['is_success'] = False continue - n, s = do_filter(l , 'info ai/testing: AI_IDENTIFIER2:') + n, s = do_filter(l , 'info ai/testing: AI_IDENTIFIER 2:') if(n > -1): if switched_side: test.ai_ident1 = s @@ -140,8 +140,8 @@ def run_game(cfg, test, switched_side): winner = 1 else: winner = 2 - print 'AND THE WINNER IS: ' + str(winner) - if game_result.has_key('winner'): + print('AND THE WINNER IS: ' + str(winner)) + if 'winner' in game_result: game_result['is_success'] = False break game_result['winner'] = winner @@ -150,8 +150,8 @@ def run_game(cfg, test, switched_side): n, s = do_filter(l , 'info ai/testing: DRAW:') if(n > -1): - print 'AND THE WINNER IS: DRAW' - if game_result.has_key('winner'): + print('AND THE WINNER IS: DRAW') + if 'winner' in game_result: game_result['is_success'] = False break game_result['winner'] = 0 @@ -164,9 +164,9 @@ def run_game(cfg, test, switched_side): # so we do some checking here and adding # game_result to game_results. - print 'AND THE VICTORY_TURN IS: ' + s + print('AND THE VICTORY_TURN IS: ' + s) - if game_result.has_key('end_turn'): + if 'end_turn' in game_result: game_result['is_success'] = False break @@ -183,7 +183,7 @@ def run_game(cfg, test, switched_side): n, s = do_filter(l , 'error') if(n > -1): # forward errors from stderr. - print 'stderr give: error ' + s + print('stderr give: error ' + s) continue @@ -192,14 +192,14 @@ def run_game(cfg, test, switched_side): return game_results def print_error(debugout): - print 'Warning: not success!' - print '====================' - print 'stderr:' - print debugout - print '====================' + print('Warning: not success!') + print('====================') + print('stderr:') + print(debugout) + print('====================') def save_result_logfile(cfg, test, game_result, log_file): - print 'Saving to log file....' + print('Saving to log file....') log_file.write('"' + test.ai_config1 + '", "' + test.ai_config2 + '", "' + test.ai_ident1 + '", "' + @@ -214,10 +214,10 @@ def save_result_logfile(cfg, test, game_result, log_file): str(game_result['winner']) + '"\n'); log_file.flush(); - print 'Saved to log file' + print('Saved to log file') def save_result_database(cfg, test, game_result, sqlite_file): - print 'Saving to DB....' + print('Saving to DB....') query = ('INSERT INTO games("test_id","faction1","faction2","switched_side","is_success","end_turn","winner")' + 'VALUES (?,?,?,?,?,?,?)') @@ -233,7 +233,7 @@ def save_result_database(cfg, test, game_result, sqlite_file): game_result['winner'])) conn.commit() conn.close() - print 'Saved to DB' + print('Saved to DB') def executions(cfg, test): structured = cfg.getboolean('default', 'structured_test') @@ -242,7 +242,7 @@ def executions(cfg, test): i = 1 for faction1 in factions: for faction2 in factions: - print 'EXECUTION: ' + str(i) + '/36 --- ' + faction1 + ' against ' + faction2 + print('EXECUTION: ' + str(i) + '/36 --- ' + faction1 + ' against ' + faction2) test.faction1 = faction1 test.faction2 = faction2 game_results = run_game(cfg, test, False) @@ -257,7 +257,7 @@ def executions(cfg, test): for i in range(0, n): switched_side = (random.randint(0, 1) == 1) if randomize else False - print 'EXECUTION ' + str(i + 1) + print('EXECUTION ' + str(i + 1)) game_results = run_game(cfg, test, switched_side) yield game_results @@ -268,7 +268,7 @@ def executions(cfg, test): # main -cfg = ConfigParser.ConfigParser() +cfg = configparser.ConfigParser(interpolation=None) cfg.read('ai_test.cfg') ai1 = cfg.get('default', 'ai_config1').strip() @@ -285,7 +285,7 @@ test = Test(ai1, ai2, faction1, faction2, map, title) # only 'test the test' with GUI / start one game then exit if len(sys.argv) > 1 and sys.argv[1] == '-p': - executions(cfg, test).next() + next(executions(cfg, test)) sys.exit(0) log_file = None @@ -327,11 +327,11 @@ for game_results in executions(cfg, test): ai1_won = ai1_won + 1 elif game_result['winner'] == 2: ai2_won = ai2_won + 1 - print '\n=====Status=====' - print 'Total games: ' + str(total) - print 'AI1(' + ai1 + ') won: ' + str(ai1_won) + "/" + str(ai1_won * 100 / total) + '%' - print 'AI2(' + ai2 + ') won: ' + str(ai2_won) + "/" + str(ai2_won * 100 / total) + '%' - print 'Draws: ' + str(draw) + "/" + str(draw * 100 / total) + '%\n' + print('\n=====Status=====') + print('Total games: ' + str(total)) + print('AI1(' + ai1 + ') won: ' + str(ai1_won) + "/" + str(ai1_won * 100 / total) + '%') + print('AI2(' + ai2 + ') won: ' + str(ai2_won) + "/" + str(ai2_won * 100 / total) + '%') + print('Draws: ' + str(draw) + "/" + str(draw * 100 / total) + '%\n') if sqlite_file: conn = sqlite3.connect(sqlite_file) cur = conn.cursor() -- 2.29.2 From 6e5cf500d4f5e85d96647256ca195b152543a959 Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Tue, 1 Oct 2019 11:37:26 +0200 Subject: [PATCH 16/31] Ported unused_functions to Python 3 (cherry picked from commit 636f29b0b96bc9dae3637a7e6d259a9e4b788fa1) --- utils/unused_functions.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/utils/unused_functions.py b/utils/unused_functions.py index 13ac06b5a2e..64ba08d1376 100755 --- a/utils/unused_functions.py +++ b/utils/unused_functions.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 """ This script is used to detect functions in the source code which are no longer @@ -19,16 +19,13 @@ def nm(filename): return os.popen("nm -C %s" % filename).read() output1 = [] -for o in glob.glob("src/*.o") + glob.glob("src/*/*.o") + \ - glob.glob("src/*/*/*.o") + glob.glob("src/*/*/*/*.o"): +for o in glob.glob("build/release/*.o") + glob.glob("build/release/*/*.o") + \ + glob.glob("build/release/*/*/*.o") + glob.glob("build/release/*/*/*/*.o"): output1.append((o, nm(o))) -output2 = nm("src/wesnoth") -output2 += nm("src/campaignd") -output2 += nm("src/exploder") -output2 += nm("src/cutter") -output2 += nm("src/wesnothd") -output2 += nm("src/test") +output2 = nm("wesnoth") +output2 += nm("campaignd") +output2 += nm("wesnothd") def extract(line): return line[line.find(" T ") + 3:] @@ -47,6 +44,7 @@ for o in output1: found += [symbol] if found: - print "%s:" % o[0] - print "\n".join(found) - print + print("%s:" % o[0]) + print("\n".join(found)) + print() + -- 2.29.2 From f1b7b3ceca3a68f7c78486747bebd2e4885c176f Mon Sep 17 00:00:00 2001 From: Steve Cotton Date: Mon, 14 Oct 2019 06:26:55 +0200 Subject: [PATCH 17/31] trackplacer3 a utility to export and import tmx files (#4365) The python2 trackplacer included both the handling of the file format, and the GUI application. This trackplacer3 is a library for the file format, without the GUI. The new tmx_trackplacer is a command-line tool for exporting the data to Tiled's .tmx format, and re-importing it back to .cfg files, so that the GUI of Tiled can be used to avoid reimplementing the GUI of Trackplacer in Python 3. The implementation uses Tiled's Object Layers (not Tile Layers). This allows additional journey markers to be added with the "Insert Tile" tool, and additional journeys to be added as new layers. It can also read in a .cfg and then re-export it to a new .cfg file, to see if the data is preserved. The format is chosen by the output filename. The old trackplacer2 isn't removed in this commit - before removing it, I think trackplacer3 needs some way to preview the animation. ----- Comments on the mainline campaigns: ----- AToTB, DM, LoW, NR and THoT will work with this. But: Northern Rebirth's bigmap.cfg has a track RECOVERY whose STAGE1 starts with an OLD_REST - that isn't handled by trackplacer, it must have been hand-edited. That OLD_REST will be lost when read by either trackplacer2 or trackplacer3, similarly the OLD_BATTLE of LoW's SAURIANS track will be lost. Delfador's Memoirs SEARCH_STAGE1 is omitted from all subsequent parts of SEARCH. Also in DM, SEARCH_STAGE3 has a point which is then moved in STAGE4 onwards - I guess a hand edit. Both of this will be overwritten if the file is edited with either this tool or with the python2 trackplacer. SotA's journey_chapter*.cfg files and WoV's bigmap.cfg file have some of the trackplacer comments removed, they won't be handled by this tool, at least not until better error handling is added. (cherry picked from commit 3a8dc9c361d8f5f54a764147cff3a81fbf0e4aed) --- data/tools/tmx_trackplacer | 71 ++++++ data/tools/wesnoth/trackplacer3/__init__.py | 11 + .../wesnoth/trackplacer3/cfgfileformat.py | 208 +++++++++++++++++ data/tools/wesnoth/trackplacer3/datatypes.py | 128 ++++++++++ .../wesnoth/trackplacer3/tmxfileformat.py | 219 ++++++++++++++++++ 5 files changed, 637 insertions(+) create mode 100755 data/tools/tmx_trackplacer create mode 100644 data/tools/wesnoth/trackplacer3/__init__.py create mode 100644 data/tools/wesnoth/trackplacer3/cfgfileformat.py create mode 100755 data/tools/wesnoth/trackplacer3/datatypes.py create mode 100644 data/tools/wesnoth/trackplacer3/tmxfileformat.py diff --git a/data/tools/tmx_trackplacer b/data/tools/tmx_trackplacer new file mode 100755 index 00000000000..52653d19f87 --- /dev/null +++ b/data/tools/tmx_trackplacer @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +""" +tmx_trackplacer -- a tool for placing map tracks using Tiled + +This creates .tmx files (Tiled's format) based on the trackplacer's journey data. +The journey can be edited in Tiled using the "Insert Tile" tool, and then the .tmx +data converted back to the trackplacer parts of the .cfg file. + +It can also read in a .cfg and then re-export it to a new .cfg file, to see if the +data is preserved. The format is chosen by the filename given to the --output option. + +If a .jpg or .png file is given as the input, then the output will be a template .tmx +or .cfg file for drawing a track on top of that image. + +Example usage: +* data/tools/tmx_trackplacer images/campaign_map.png --output temp.tmx +* data/tools/tmx_trackplacer data/campaigns/Northern_Rebirth/utils/bigmap.cfg --output temp.tmx +* data/tools/tmx_trackplacer data/campaigns/Northern_Rebirth/utils/bigmap.cfg --output temp.cfg +""" + +import wesnoth.trackplacer3 as tp3 + +import argparse + +if __name__ == "__main__": + ap = argparse.ArgumentParser(usage=__doc__) + ap.add_argument("file", metavar="string", help="Read input from this file") + ap.add_argument("-o", "--output", metavar="string", + help='Write output into the specified file') + ap.add_argument("--data-dir", metavar="dir", + help='Same as Wesnoth’s “--data-dir” argument') + options = ap.parse_args() + + if options.data_dir is None: + import os, sys + APP_DIR,APP_NAME=os.path.split(os.path.realpath(sys.argv[0])) + WESNOTH_ROOT_DIR=os.sep.join(APP_DIR.split(os.sep)[:-2]) # pop out "data" and "tools" + options.data_dir=os.path.join(WESNOTH_ROOT_DIR,"data") + + journey = None + if options.file: + if options.file.endswith(".cfg"): + reader = tp3.CfgFileFormat() + (journey, metadata) = reader.read(options.file) + elif options.file.endswith(".tmx"): + reader = tp3.TmxFileFormat(wesnoth_data_dir=options.data_dir) + (journey, metadata) = reader.read(options.file) + elif options.file.endswith(".png") or options.file.endswith(".jpg"): + journey = tp3.Journey() + journey.mapfile = options.file + metadata = None + else: + raise RuntimeError("Don't know how to handle input from this file type") + + if journey: + print("Read data:", str(journey)) + else: + raise RuntimeError("Failed to read journey data") + + if options.output: + if options.output.endswith(".cfg"): + print("Exporting as cfg") + exporter = tp3.CfgFileFormat() + exporter.write(options.output, journey, metadata) + elif options.output.endswith(".tmx"): + print("Exporting as tmx") + exporter = tp3.TmxFileFormat(wesnoth_data_dir=options.data_dir) + exporter.write(options.output, journey, metadata) + else: + raise RuntimeError("Don't know how to handle output to this file type") diff --git a/data/tools/wesnoth/trackplacer3/__init__.py b/data/tools/wesnoth/trackplacer3/__init__.py new file mode 100644 index 00000000000..81aaf0086db --- /dev/null +++ b/data/tools/wesnoth/trackplacer3/__init__.py @@ -0,0 +1,11 @@ +# When Python looks for a package, it considers all directories with +# a file named __init__.py inside them. Therefore we need this file. +# The code below is executed on "import wesnoth.trackplacer3", importing +# the main classes that are intended to be public. + +from wesnoth.trackplacer3.datatypes import Journey +from wesnoth.trackplacer3.datatypes import Track +from wesnoth.trackplacer3.datatypes import Waypoint + +from wesnoth.trackplacer3.cfgfileformat import CfgFileFormat +from wesnoth.trackplacer3.tmxfileformat import TmxFileFormat diff --git a/data/tools/wesnoth/trackplacer3/cfgfileformat.py b/data/tools/wesnoth/trackplacer3/cfgfileformat.py new file mode 100644 index 00000000000..cb83ffe9a85 --- /dev/null +++ b/data/tools/wesnoth/trackplacer3/cfgfileformat.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +"""Module for reading and writing .cfg files containing journey data. + +It will look for track information enclosed in special comments that look like +this: + + # trackplacer: tracks begin + # trackplacer: tracks end + +trackplacer will alter only what it finds inside these comments, except that it +will also generate a file epilog for undefining local symbols. The +epilog will begin with this comment: + + # trackplacer: epilog begins + +TODO: what to do about the epilog? It needs to come after the scenarios, otherwise +the undefs happen before the scenarios can use the journey data. + +Special comments may appear in the track section, looking like this: + + # trackplacer: = + +These set properties that trackplacer may use. At present there is +only one such property: "map", which records the name of the mapfile on +which your track is laid. + +Original (python2 + pygtk) implementation by Eric S. Raymond for the Battle For Wesnoth project, October 2008 +""" + +import re + +from wesnoth.trackplacer3.datatypes import * + +class IOException(Exception): + """Exception thrown while reading a track file.""" + def __init__(self, message, path, lineno=None): + self.message = message + self.path = path + self.lineno = lineno + +class CfgFileMetadata: + """Trackplacer is intended to write .cfg files that may also contain other information. + +When it reads a file, in addition to the Journey it keeps some other information so that +it can write back to the same file while preserving the other data in the .cfg.""" + def __init__(self): + self.properties = {} + self.before = self.after = "" + +class CfgFileFormat(FileFormatHandler): + """Translate a Journey to/from a .cfg file, preserving non-trackplacer data.""" + def __init__(self): + pass + + def read(self, fp): + if type(fp) == type(""): + try: + fp = open(fp, "r") + except IOError: + raise IOException("Cannot read file.", fp) + journey = Journey() + metadata = CfgFileMetadata() + selected_track = None + if not fp.name.endswith(".cfg"): + raise IOException("Cannot read this filetype.", fp.name) + waypoint_re = re.compile("{NEW_(" + "|".join(icon_presentation_order) + ")" \ + + " +([0-9]+) +([0-9]+)}") + property_re = re.compile("# *trackplacer: ([^=]+)=(.*)") + define_re = re.compile("#define (.*)_STAGE[0-9]+(_END|_COMPLETE)?") + state = "before" + ignore = True # True when the most recent define_re match found an END or COMPLETE group, or before any track has been defined + for line in fp: + if line.startswith("# trackplacer: epilog begins"): + break + # This is how we ignore stuff outside of track sections + if state == "before": + if line.startswith("# trackplacer: tracks begin"): + state = "tracks" # And fall through... + else: + metadata.before += line + continue + elif state == "after": + metadata.after += line + continue + elif line.startswith("# trackplacer: tracks end"): + state = "after" + continue + # Which track are we appending to? + m = re.search(define_re, line) + if m: + selected_track = journey.findTrack(m.group(1)) + if selected_track == None: + selected_track = Track(m.group(1)) + journey.tracks.append(selected_track) + ignore = bool(m.group(2)) + continue + # Is this a track marker? + m = re.search(waypoint_re, line) + if m and not ignore: + try: + tag = m.group(1) + x = int(m.group(2)) + y = int(m.group(3)) + selected_track.waypoints.append(Waypoint(tag, x, y)) + continue + except ValueError: + raise IOException("Invalid coordinate field.", fp.name, i+1) + # \todo: Northern Rebirth has some tracks that start with an OLD_REST + # before any of the NEW_JOURNEY markers. Maybe add a special-case for + # that, that adds them if and only if len(selected_track.waypoints)==0 + + # Is it a property setting? + m = re.search(property_re, line) + if m: + metadata.properties[m.group(1)] = m.group(2) + continue + if "map" in metadata.properties: + journey.mapfile = metadata.properties['map'] + else: + raise IOException("Missing map declaration.", fp.name) + fp.close() + return (journey, metadata) + + def write(self, filename, journey, metadata=None): + if metadata is None: + metadata = CfgFileMetadata() + + if not filename.endswith(".cfg"): + raise IOException("File must have .cfg extension.", fp.name) + + # If we're writing to an existing file, preserve the non-trackplacer parts + # by ignoring the provided metadata and re-reading it from the file. + try: + ignored, metadata = self.read(filename) + print("Preserving non-trackplacer data from the destination file") + except: + pass + + fp = open(filename, "w") + fp.write(metadata.before) + fp.write("# trackplacer: tracks begin\n#\n") + fp.write("# Hand-hack this section strictly at your own risk.\n") + fp.write("#\n") + if not metadata.before and not metadata.after: + fp.write("#\n# wmllint: no translatables\n\n") + if journey.mapfile: + metadata.properties["map"] = journey.mapfile + for (key, val) in list(metadata.properties.items()): + fp.write("# trackplacer: %s=%s\n" % (key, val)) + fp.write("#\n") + definitions = [] + for track in journey.tracks: + if len(track.waypoints) == 0: + print("Warning: track {name} has no waypoints".format(name=track.name)) + continue + name = track.name + endpoints = [] + for i in range(0, len(track.waypoints)): + if track.waypoints[i].action in segmenters: + endpoints.append(i) + if track.waypoints[-1].action not in segmenters: + endpoints.append(len(track.waypoints)-1) + outname = name.replace(" ", "_").upper() + for (i, e) in enumerate(endpoints): + stagename = "%s_STAGE%d" % (outname, i+1,) + definitions.append(stagename) + fp.write("#define %s\n" % stagename) + for j in range(0, e+1): + age="OLD" + if i == 0 or j > endpoints[i-1]: + age = "NEW" + waypoint = track.waypoints[j] + marker = " {%s_%s %d %d}\n" % (age, waypoint.action, waypoint.x, waypoint.y) + fp.write(marker) + fp.write("#enddef\n\n") + endname = "%s_END" % stagename + fp.write("#define %s\n" % endname) + definitions.append(endname) + for j in range(0, e+1): + age="OLD" + if j == endpoints[i]: + age = "NEW" + waypoint = track.waypoints[j] + marker = " {%s_%s %d %d}\n" % (age, waypoint.action, waypoint.x, waypoint.y) + fp.write(marker) + fp.write("#enddef\n\n") + completename = "%s_COMPLETE" % outname + fp.write("#define %s\n" % completename) + definitions.append(completename) + for waypoint in track.waypoints: + marker = " {%s_%s %d %d}\n" % ("OLD", waypoint.action, waypoint.x, waypoint.y) + fp.write(marker) + fp.write("#enddef\n\n") + fp.write("# trackplacer: tracks end\n") + fp.write(metadata.after) + + # \todo: what to do about the epilogue? It must wait until after the scenarios have used the journey data. + # fp.write ("# trackplacer: epilog begins\n\n") + # for name in definitions: + # if "{" + name + "}" not in metadata.after: + # fp.write("#undef %s\n" % name) + # fp.write ("\n# trackplacer: epilog ends\n") + + fp.close() + +if __name__ == "__main__": + print("This isn't intended to be run directly") diff --git a/data/tools/wesnoth/trackplacer3/datatypes.py b/data/tools/wesnoth/trackplacer3/datatypes.py new file mode 100755 index 00000000000..178b1aafda9 --- /dev/null +++ b/data/tools/wesnoth/trackplacer3/datatypes.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +""" +trackplacer3.datatypes -- file-format-independent handling of journeys + +A journey is an object containing a map file name and a (possibly +empty) list of tracks, each with a name and each consisting of a +sequence of track markers. + +Original (python2 + pygtk) implementation by Eric S. Raymond for the Battle For Wesnoth project, October 2008 +""" + +import math + +# All dependencies on the shape of the data tree live here +# The code does no semantic interpretation of these icons at all; +# to add new ones, just fill in a dictionary entry. +imagedir = "core/images/" +selected_icon_dictionary = { + "JOURNEY": imagedir + "misc/new-journey.png", + "BATTLE": imagedir + "misc/new-battle.png", + "REST": imagedir + "misc/flag-red.png", + } +unselected_icon_dictionary = { + "JOURNEY": imagedir + "misc/dot-white.png", + "BATTLE": imagedir + "misc/cross-white.png", + "REST": imagedir + "misc/flag-white.png", + } +icon_presentation_order = ("JOURNEY", "BATTLE", "REST") +segmenters = ("BATTLE","REST") + +# Basic functions for bashing points and rectangles + +def _distance(point1, point2): + "Euclidean distance between two waypoints." + return math.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2) + +class Waypoint: + """Represents a single dot, battle or restpoint.""" + def __init__(self, action, x, y): + self.action = action + self.x = x + self.y = y + + def __str__(self): + return "".format(action=self.action, x=self.x, y=self.y) + +class Track: + """An ordered list of Waypoints, users are expected to directly access the data members.""" + def __init__(self, name): + self.name = name + self.waypoints = [] + + def insert_at_best_fit(self, w): + """Utility function to add a new Waypoint, working out from from its coordinates where in the sequence it should be added. + + If the new point should definitely be at the end, you can use waypounts.append instead of this method. + """ + if len(self.waypoints) < 2: + self.waypoints.append(w) + return + + # Find the index of the member of self.waypoints nearest to the new point + closest = min(range(len(self.waypoints)), key=lambda i: _distance(w, self.waypoints[i])) + if closest == 0: + if _distance(self.waypoints[0], self.waypoints[1]) < _distance(w, self.waypoints[1]): + self.waypoints.insert(0, w) + else: + self.waypoints.insert(1, w) + elif closest == len(self.waypoints)-1: + if _distance(self.waypoints[-1], self.waypoints[-2]) < _distance(w, self.waypoints[-2]): + self.waypoints.append(w) + else: + self.waypoints.insert(-1, w) + elif len(self.waypoints) == 2: + self.waypoints.insert(1, w) + elif _distance(w, self.waypoints[closest-1]) < _distance(self.waypoints[closest], self.waypoints[closest-1]): + self.waypoints.insert(closest, w) + else: + self.waypoints.insert(closest+1, w) + +class Journey: + """Collection of all Tracks, and the corresponding background image""" + def __init__(self): + self.mapfile = None # Map background of the journey + self.tracks = [] # ordered list of Tracks + + def findTrack(self, name): + for track in self.tracks: + if name == track.name: + return track + return None + + def __str__(self): + return "" % (self.mapfile, + ",".join([track.name for track in self.tracks])) + +class FileFormatHandler: + """Interface for reading and writing files""" + + def __init__(self): + raise NotImplementedError() + + def read(self, file_or_filename): + """Return a (Journey, metadata) pair. + + The metadata may be None, and is information about the source file that + isn't represented in the Journey object. The purpose of it is to check + whether data is lost by reading a file and then writing the data to a new + file. + """ + raise NotImplementedError() + + def write(self, file_or_filename, journey, metadata=None): + """Create a new file or overwriting an existing file. + + When overwriting an existing file, this may try to preserve non-journey + data. + + When creating a new file, if the metadata is non-None then this may try + to recreate the non-journey data from the original file. This is intended + to be used from checking what data is lost in a round-trip, by making the + copy of a file with only the journey parts changed. + + If metadata is non-None when overwriting an existing file, it's currently + implementation defined which set of data (or neither) will be preserved. + """ + raise NotImplementedError() diff --git a/data/tools/wesnoth/trackplacer3/tmxfileformat.py b/data/tools/wesnoth/trackplacer3/tmxfileformat.py new file mode 100644 index 00000000000..186fc6abe0e --- /dev/null +++ b/data/tools/wesnoth/trackplacer3/tmxfileformat.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python3 +# encoding: utf-8 + +"""Module for Tiled (Tile Editor) .tmx files containing journey data. + +Uses Tiled's Object Layers (not Tile Layers). This allows additional journey +markers to be added with the "Insert Tile" tool, and additional journeys to be +added as new layers. +""" + +from wesnoth.trackplacer3.datatypes import * + +import xml.etree.ElementTree as ET + +# Although this doesn't show images, it needs to read their width and height +import PIL.Image + +class ParseException(Exception): + """There's a lot of expectations about the .tmx file, this generally + means that one of those assumptions didn't hold. + """ + def __init__(self, message, element=None): + self.message = message + self.element = element + +class _IdCounter: + """It seems that .tmx has several independent sequences of ids, and can have the same id used for different purposes. However, they can all have gaps in the sequence, for simplicity this code has only one ID counter, and makes each id unique.""" + def __init__(self): + self.counter = 1 + + def peek(self): + """Return the id that the next call to get() will return, without changing the id""" + return str(self.counter) + + def get(self): + counter = self.counter + self.counter += 1 + return str(counter) + +class _TmxTileset: + def read(tileset): + """Returns a dict mapping tiles' gid attributes to actions. + + Argument should be the ETree element for the tileset.""" + base_id = int(tileset.attrib["firstgid"]) + tileset_to_action = {} + for tile in tileset.findall("tile"): + tile_id = base_id + int(tile.attrib["id"]) + image_source = tile.find("image").attrib["source"] + action = None + # This matches by endswith, so that changes to the wesnoth_data_dir won't + # cause unnecessary breakage of .tmx files. + for k in selected_icon_dictionary: + if image_source.endswith(selected_icon_dictionary[k]): + action = k + break + if action is None: + raise ParseException("unrecognised action in tileset") + tileset_to_action[str(tile_id)] = action + return tileset_to_action + +class TmxFileFormat(FileFormatHandler): + """Translate a Journey to and from a Tiled (Tile Editor) TMX file.""" + def __init__(self, wesnoth_data_dir): + """The data dir is the same as wesnoth's --data-dir argument. + + The data dir is only used for the journey marker images. + """ + self.wesnoth_data_dir = wesnoth_data_dir + + def write(self, file_or_filename, journey, metadata=None): + id_counter = _IdCounter() + + # Read the size of the background image + try: + with PIL.Image.open(journey.mapfile) as image: + background_width, background_height = image.size + except: + print("Can't open background image, assuming 1024x768", journey.mapfile) + background_width, background_height = 1024, 768 + + tmxmap = ET.Element("map", attrib={ + "version": "1.2", + "orientation":"orthogonal", + "renderorder":"right-down", + # There's no problem if the width and height don't exactly match the image, this + # just determines the size of the grid shown in the UI and used for tile layers. + # For track placement, tmx_trackplacer uses object layers instead of tile layers. + "width": str(int(background_width / 32)), + "height": str(int(background_height / 32)), + "tilewidth":"32", + "tileheight":"32", + "infinite":"0" + }) + + # Wesnoth's NEW_JOURNEY (etc) macros use the coordinates as the center of the image, + # Tiled uses them as the bottom-left corner of the image. This is a dictionary of + # [adjustment to x, adjustment to y] pairs, in the direction wesnoth -> tmx. + # If any of these images can't be found then there's no point continuing. + image_offset = {} + for action in selected_icon_dictionary: + with PIL.Image.open(self.wesnoth_data_dir + "/" + selected_icon_dictionary[action]) as image: + image_offset[action] = [int (- image.size[0] / 2), int (image.size[1] / 2)] + + # embed a tileset in the map, corresponding to the journey icons + action_to_tileset = {} + base_id = id_counter.get() + tileset = ET.SubElement(tmxmap, "tileset", attrib={ + "firstgid":base_id, + "name":"wesnoth journey icons", + "tilewidth":"1", + "tileheight":"1", + "tilecount":"3" + }) + ET.SubElement(tileset, "grid", attrib={"orientation":"orthogonal", "width":"1", "height":"1"}) + for i, action in enumerate(selected_icon_dictionary): + action_to_tileset[action] = {"gid":str(int(base_id) + i)} + tile = ET.SubElement(tileset, "tile", attrib={"id":str(i)}) + ET.SubElement(tile, "image", attrib={ + "source":self.wesnoth_data_dir + "/" + selected_icon_dictionary[action] + }) + # increment the id_counter + id_counter.get() + + # background image + layer = ET.SubElement(tmxmap, "imagelayer", attrib={"id": "1", "name": "background"}) + ET.SubElement(layer, "image", attrib={"source": journey.mapfile}) + + # journey tracks + for track in journey.tracks: + name = track.name + layer = ET.SubElement(tmxmap, "objectgroup", attrib={"id": id_counter.get(), "name": name}) + for point in track.waypoints: + if point.action not in action_to_tileset: + raise KeyError("Unknown action: " + point.action) + attrib = action_to_tileset[point.action] + attrib["id"] = id_counter.get() + attrib["x"] = str(point.x + image_offset[point.action][0]) + attrib["y"] = str(point.y + image_offset[point.action][1]) + o = ET.SubElement(layer, "object", attrib=attrib) + + # The points in each journey need to be kept in the correct order, so that the animation + # shows movement in the correct direction. If we know which points are newly-added, then the + # logic in Track.insert_at_best_fit will handle them. With Tiled, ids are never reused, so + # we can use the id_counter to work out which points are newly added. + custom_properties = ET.SubElement(tmxmap, "properties") + ET.SubElement(custom_properties, "property", attrib={"name":"tmx_trackplacer_export_id", "type":"int", "value":id_counter.get()}) + + # These need to be higher than all ids used elsewhere in the file, so add these after everything else's id is assigned + tmxmap.set("nextlayerid", id_counter.get()) + tmxmap.set("nextobjectid", id_counter.get()) + + tree = ET.ElementTree(tmxmap) + tree.write(file_or_filename, encoding="UTF-8", xml_declaration=True) + + def read(self, fp): + if type(fp) == type(""): + # if this raises IOError, let it pass to the caller + fp = open(fp, "r") + + tree = ET.parse(fp) + tmxmap = tree.getroot() + + journey = Journey() + + if tmxmap.attrib["orientation"] != "orthogonal": + raise ParseException("expected an orthogonal TMX map") + + # parse the tileset + if len(tmxmap.findall("tileset")) != 1: + raise ParseException("expected exactly one tileset") + tileset_to_action = _TmxTileset.read(tmxmap.find("tileset")) + + # Wesnoth's NEW_JOURNEY (etc) macros use the coordinates as the center of the image, + # Tiled uses them as the bottom-left corner of the image. This is a dictionary of + # [adjustment to x, adjustment to y] pairs, in the direction tmx -> wesnoth. + # If any of these images can't be found then there's no point continuing. + image_offset = {} + for action in selected_icon_dictionary: + with PIL.Image.open(self.wesnoth_data_dir + "/" + selected_icon_dictionary[action]) as image: + image_offset[action] = [int (image.size[0] / 2), int (- image.size[1] / 2)] + + + # background image + if len(tmxmap.findall("imagelayer")) != 1 or tmxmap.find("imagelayer").attrib["name"] != "background": + raise ParseException("expected exactly one imagelayer") + if len(tmxmap.findall("imagelayer/image")) != 1: + raise ParseException("expected exactly one image in the imagelayer") + if tmxmap.find("imagelayer/image").attrib["source"] is None: + raise ParseException("expected a background image") + journey.mapfile = tmxmap.find("imagelayer/image").attrib["source"] + + # metadata that was added in write(), to track which points have been added in Tiled + export_id_prop = tmxmap.find("properties/property[@name='tmx_trackplacer_export_id']") + if export_id_prop is not None: + export_id = int(export_id_prop.attrib["value"]) + def added_in_tiled(item): + return export_id < int(item.attrib["id"]) + else: + def added_in_tiled(item): + return true + + # journey tracks + for layer in tmxmap.findall("objectgroup"): + track = Track(layer.attrib["name"]) + for point in layer.findall("object"): + gid = point.attrib["gid"] + if gid not in tileset_to_action: + raise KeyError("Unknown action gid: " + gid) + action = tileset_to_action[gid] + x = round(float(point.attrib["x"])) + image_offset[action][0] + y = round(float(point.attrib["y"])) + image_offset[action][1] + if added_in_tiled(point): + track.insert_at_best_fit(Waypoint(action, x, y)) + else: + track.waypoints.append(Waypoint(action, x, y)) + journey.tracks.append(track) + + return (journey, None) -- 2.29.2 From 81d45b74936f19dc66c084c17d82bf2deebb8742 Mon Sep 17 00:00:00 2001 From: Steve Cotton Date: Mon, 18 Nov 2019 18:23:12 +0100 Subject: [PATCH 18/31] Update changelog and readme for trackplacer python3 port --- data/tools/README.md | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/data/tools/README.md b/data/tools/README.md index e29f653e3e5..a14d8a140b7 100644 --- a/data/tools/README.md +++ b/data/tools/README.md @@ -10,10 +10,15 @@ Remove nearly transparent pixels from images using GIMP. It currently affects only one image at a time. Batch processing is available within GIMP, but it would be useful to expand this to skip files where the pixels did not change. -=== trackplacer === +=== `tmx_trackplacer` === -A visual editor for journey tracks, the icon sequences that appear on -Wesnoth story screens. +Converter for journey track files, the .cfg files which control the icon +sequences that appear on Wesnoth story screens. This can convert them to +or from the format needed for editing in Tiled. + +=== trackviewer.pyw === + +Previews the animation of journey tracks. === wesnoth_addon_manager === -- 2.29.2 From d6fe6cf34c81bc591f4dc730224cf470ae140a9d Mon Sep 17 00:00:00 2001 From: loonycyborg Date: Mon, 14 Oct 2019 16:14:46 +0300 Subject: [PATCH 19/31] Added tool to automatically add tags to appdata (cherry picked from commit 3ed601bb916c1b7b2525184e0fa9e6fdb37745d7) --- data/tools/update_appdata | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100755 data/tools/update_appdata diff --git a/data/tools/update_appdata b/data/tools/update_appdata new file mode 100755 index 00000000000..b521dbe8914 --- /dev/null +++ b/data/tools/update_appdata @@ -0,0 +1,37 @@ +#!/bin/env python + +import sys, requests, argparse +from xml.dom import minidom + +def fetch_date(version): + tag_info = requests.get("https://api.github.com/repos/wesnoth/wesnoth/git/refs/tags/"+version) + tag_info.raise_for_status() + url = tag_info.json()["object"]["url"] + result = requests.get(url) + result.raise_for_status() + return result.json()["tagger"]["date"] + +def update_appdata(version, appdata_path): + date = fetch_date(version) + + doc = minidom.parse(appdata_path) + releases = doc.getElementsByTagName("releases") + if releases: + releases = releases[0] + else: + releases = doc.getElementsByTagName("component")[-1].appendChild(doc.createElement("releases")) + + release = doc.createElement("release") + release.setAttribute("version", version) + release.setAttribute("date", date) + releases.insertBefore(release, releases.firstChild) + + doc.writexml(open(appdata_path, "w")) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('version', metavar='VERSION') + parser.add_argument('appdata', metavar='APPDATA_FILE') + + args = parser.parse_args() + update_appdata(args.version, args.appdata) -- 2.29.2 From 9cc580810df73b27df8b5e2f534afc38bd7139e3 Mon Sep 17 00:00:00 2001 From: loonycyborg Date: Wed, 6 Nov 2019 19:39:38 +0300 Subject: [PATCH 20/31] Add dockerfile for making mingw cross-compile builds (cherry picked from commit 42293897e563d0caad814aea260342662d74096c) --- utils/dockerbuilds/make_mingw_build | 5 +++++ utils/dockerbuilds/mingw/Dockerfile | 20 ++++++++++++++++++++ utils/dockerbuilds/mingw/get_dlls.py | 18 ++++++++++++++++++ 3 files changed, 43 insertions(+) create mode 100755 utils/dockerbuilds/make_mingw_build create mode 100644 utils/dockerbuilds/mingw/Dockerfile create mode 100755 utils/dockerbuilds/mingw/get_dlls.py diff --git a/utils/dockerbuilds/make_mingw_build b/utils/dockerbuilds/make_mingw_build new file mode 100755 index 00000000000..92faea8ff55 --- /dev/null +++ b/utils/dockerbuilds/make_mingw_build @@ -0,0 +1,5 @@ +#!/bin/sh -xe + +cd mingw +docker build -t mingw-wesnoth . +docker run -it -v "$PWD"/../../..:/wesnoth -v "$PWD"/../mingwbuild:/output mingw-wesnoth diff --git a/utils/dockerbuilds/mingw/Dockerfile b/utils/dockerbuilds/mingw/Dockerfile new file mode 100644 index 00000000000..a424563ff1d --- /dev/null +++ b/utils/dockerbuilds/mingw/Dockerfile @@ -0,0 +1,20 @@ +FROM rwgrim/msys2-cross +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && \ + apt-get install -y scons g++-mingw-w64-x86-64 pkg-config python3-pefile && \ + update-alternatives --set x86_64-w64-mingw32-g++ /usr/bin/x86_64-w64-mingw32-g++-posix && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists + +RUN pacman-cross -S --noconfirm \ + mingw-w64-x86_64-boost \ + mingw-w64-x86_64-SDL2 \ + mingw-w64-x86_64-SDL2_image \ + mingw-w64-x86_64-SDL2_mixer \ + mingw-w64-x86_64-SDL2_ttf \ + mingw-w64-x86_64-pango + +COPY get_dlls.py /scripts/get_dlls.py + +ENTRYPOINT mkdir /build && cd /build && scons -j `nproc` arch=x86-64 prefix=/windows/mingw64 gtkdir=/windows/mingw64 host=x86_64-w64-mingw32 -Y /wesnoth && cp /build/wesnoth.exe /output/ && cd /output && python3 /scripts/get_dlls.py diff --git a/utils/dockerbuilds/mingw/get_dlls.py b/utils/dockerbuilds/mingw/get_dlls.py new file mode 100755 index 00000000000..efc5481e458 --- /dev/null +++ b/utils/dockerbuilds/mingw/get_dlls.py @@ -0,0 +1,18 @@ +#!/bin/env python + +import pefile, pathlib, shutil + +dlls = set() +dllpath = pathlib.Path('/windows/mingw64/bin') +pe_modules = set([pefile.PE('wesnoth.exe')]) + +while pe_modules: + pe = pe_modules.pop() + for entry in pe.DIRECTORY_ENTRY_IMPORT: + path = dllpath / pathlib.Path(entry.dll.decode()) + if path not in dlls and path.exists(): + dlls.add(path) + pe_modules.add(pefile.PE(path)) + +for dll in dlls: + shutil.copy(dll, ".") -- 2.29.2 From 9645454d722301f0566119e8ea2b77edd666a4be Mon Sep 17 00:00:00 2001 From: loonycyborg Date: Tue, 15 Oct 2019 16:13:36 +0300 Subject: [PATCH 21/31] Move appdata script to a more appropriate place (cherry picked from commit d2ee0f4b73eec0ea6702b4dad168d569836aec12) --- data/tools/update_appdata | 37 ------------------------------------- utils/update_appdata | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 37 deletions(-) delete mode 100755 data/tools/update_appdata create mode 100755 utils/update_appdata diff --git a/data/tools/update_appdata b/data/tools/update_appdata deleted file mode 100755 index b521dbe8914..00000000000 --- a/data/tools/update_appdata +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/env python - -import sys, requests, argparse -from xml.dom import minidom - -def fetch_date(version): - tag_info = requests.get("https://api.github.com/repos/wesnoth/wesnoth/git/refs/tags/"+version) - tag_info.raise_for_status() - url = tag_info.json()["object"]["url"] - result = requests.get(url) - result.raise_for_status() - return result.json()["tagger"]["date"] - -def update_appdata(version, appdata_path): - date = fetch_date(version) - - doc = minidom.parse(appdata_path) - releases = doc.getElementsByTagName("releases") - if releases: - releases = releases[0] - else: - releases = doc.getElementsByTagName("component")[-1].appendChild(doc.createElement("releases")) - - release = doc.createElement("release") - release.setAttribute("version", version) - release.setAttribute("date", date) - releases.insertBefore(release, releases.firstChild) - - doc.writexml(open(appdata_path, "w")) - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('version', metavar='VERSION') - parser.add_argument('appdata', metavar='APPDATA_FILE') - - args = parser.parse_args() - update_appdata(args.version, args.appdata) diff --git a/utils/update_appdata b/utils/update_appdata new file mode 100755 index 00000000000..b521dbe8914 --- /dev/null +++ b/utils/update_appdata @@ -0,0 +1,37 @@ +#!/bin/env python + +import sys, requests, argparse +from xml.dom import minidom + +def fetch_date(version): + tag_info = requests.get("https://api.github.com/repos/wesnoth/wesnoth/git/refs/tags/"+version) + tag_info.raise_for_status() + url = tag_info.json()["object"]["url"] + result = requests.get(url) + result.raise_for_status() + return result.json()["tagger"]["date"] + +def update_appdata(version, appdata_path): + date = fetch_date(version) + + doc = minidom.parse(appdata_path) + releases = doc.getElementsByTagName("releases") + if releases: + releases = releases[0] + else: + releases = doc.getElementsByTagName("component")[-1].appendChild(doc.createElement("releases")) + + release = doc.createElement("release") + release.setAttribute("version", version) + release.setAttribute("date", date) + releases.insertBefore(release, releases.firstChild) + + doc.writexml(open(appdata_path, "w")) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('version', metavar='VERSION') + parser.add_argument('appdata', metavar='APPDATA_FILE') + + args = parser.parse_args() + update_appdata(args.version, args.appdata) -- 2.29.2 From b7f96b908b145006d3fc10b96a6843f13e420de4 Mon Sep 17 00:00:00 2001 From: Steve Cotton Date: Sat, 16 Nov 2019 20:36:08 +0100 Subject: [PATCH 22/31] Add trackviewer, remove trackplacer Trackviewer shows a journey without needing to start Wesnoth and refresh the cache. Combined with tmx_trackplacer, the python3 versions now have feature-parity with the old trackplacer, without the reliance on python2 or pygtk (fixes #4365). At least on Linux you can have both Tiled and this open on the same file. Save the file in Tiled, alt+tab to this, and press enter to reload the file. (cherry picked from commit 2c9caab3cee5c36e4612d27cc50ec6ae0a458eb4) --- data/tools/trackplacer | 1275 ------------------------------------ data/tools/trackviewer.pyw | 232 +++++++ 2 files changed, 232 insertions(+), 1275 deletions(-) delete mode 100755 data/tools/trackplacer create mode 100755 data/tools/trackviewer.pyw diff --git a/data/tools/trackplacer b/data/tools/trackplacer deleted file mode 100755 index 2b390a1a7c8..00000000000 --- a/data/tools/trackplacer +++ /dev/null @@ -1,1275 +0,0 @@ -#!/usr/bin/env python2 -''' -trackplacer -- map journey track editor. - -usage: trackplacer [-vh?] [filename] - -A journey is an object containing a map file name and a (possibly -empty) list of tracks, each with a name and each consisting of a -sequence of track markers. This program exists to visually edit -journeys represented as specially delimited sections in .cfg files. - -If the .cfg filename is not specified, trackplacer will enter a loop -in which it repeatedly pops up a file selector. Canceling the file -select ends the program; Selecting a file takes you to a main screen. -For command help on the main screen, click the Help button. - -Can be started with a map image, in which case we are editing a new journey. -Can be started with a .cfg file, in which case it will look for -track information enclosed in special comments that look like this: - - # trackplacer: tracks begin - # trackplacer: tracks end - -trackplacer will alter only what it finds inside these comments, except tht it -will also generate a file epilog for undefining local symbols. The -epilog will begin with this comment: - - # trackplacer: epilog begins - -Special comments may appear in the track section, looking like this: - - # trackplacer: = - -These set properties that trackplacer may use. At present there is -only one such property: "map", which records the name of the mapfile on -which your track is laid. - -Normally, trackplacer assumes it is running within a Battle for -Wesnoth source tree and changes directory to the root of the -tree. Paths saved in track files are relative to the tree root. All -pathnames in help and error messages are also relativized to that -root. - -The -v option enables verbose logging to standard error. - -The -d option sets the root directory to use. - -The -h or -? options display this summary. - -For details on the editing controls, click the Help button in the trackplacer -GUI. -''' - -gui_help = '''\ -You are editing or creating a set of named tracks; at any given time there will one track that is selected for editing. For campaigns with a linear narrative there will be only one track, always selected, and you will not have to concern yourself about its name. If your campaign has a non-linear structure, you will want to create one track for each segment. - -The radio buttons near the top left corner control which icon is placed by a left click. The two rightmost are special; when the trashcan is clicked a left click deletes already-placed icons, and the convert/copy icon tries to copy a nearby icon from an unselected track onto the selected one, preserving its pixel coordinates exactly. Every time you place an icon, it is added to the currently selected track. You may also drag icons with the middle button. - -The rule for adding markers to the selected track is as follows: if the two markers closest to the mouse pointer are adjacent on the track, insert the new marker between them in the track order. Otherwise, append it to the end of the track. - -Click the right button to examine features overlapping the pointer. Each marker on both selected and unselected tracks will be reported. - -The Animate button clears the icons off the map and places them with a delay after each placement, so you can see what order they are drawn in. If you have multiple tracks, only those currently visible will be animated. - -The Save button pops up a file selector asking you to supply a filename to which the track should be saved in .cfg format, as a series of macros suitable for inclusion in WML. Any other extension than .cfg on the filename will raise an error. - -The Properties button pops up a list of track properties - key/value pairs associated with the track. All tracks have the property "map" with their associated map name as the value. - -The Tracks button pops up a list of controls, one for each track. You can change the state of the checkboxes to control which tracks are visible. The radiobuttons can be used to select a track for editing. You can also add and rename tracks here. Hover over the controls for tooltips. - -The Help button displays this message. - -The Quit button ends your session, asking for confirmation if you have unsaved changes. -''' - -gui_about = '''\ -This is trackplacer, an editor for visually editing sets of journey tracks on Battle For Wesnoth maps. - -By Eric S. Raymond for the Battle For Wesnoth project, October 2008 -''' - - -import sys, os, re, math, time, exceptions, getopt - -import pygtk -pygtk.require('2.0') -import gtk - -import wesnoth.wmltools - -# All dependencies on the shape of the data tree live here -# The code does no semantic interpretation of these icons at all; -# to add new ones, just fill in a dictionary entry. -imagedir = "data/core/images/" -default_map = imagedir + "maps/wesnoth.png" -selected_icon_dictionary = { - "JOURNEY": imagedir + "misc/new-journey.png", - "BATTLE": imagedir + "misc/new-battle.png", - "REST": imagedir + "misc/flag-red.png", - } -unselected_icon_dictionary = { - "JOURNEY": imagedir + "misc/dot-white.png", - "BATTLE": imagedir + "misc/cross-white.png", - "REST": imagedir + "misc/flag-white.png", - } -icon_presentation_order = ("JOURNEY", "BATTLE", "REST") -segmenters = ("BATTLE","REST") - -class IOException(exceptions.Exception): - "Exception thrown while reading a track file." - def __init__(self, message, path, lineno=None): - self.message = message - self.path = path - self.lineno = lineno - -# Basic functions for bashing points and rectangles - -def distance(x1, y1, x2, y2): - "Euclidean distance." - return math.sqrt((x1 - x2)**2 + abs(y1 - y2)**2) - -def within(x, y, (l, t, r, d)): - "Is point within specified rectangle?" - if x >= l and x <= l + r - 1 and y >= t and y <= t + d - 1: - return True - return False - -def overlaps(p1, p2): - "Do two rectangles overlap?" - (x1,y1,x1d,y1d) = p1 - (x2,y2,x2d,y2d) = p2 - return within(x1, y1, p2) or \ - within(x1+x1d, y1, p2) or \ - within(x1, y1+y1d, p2) or \ - within(x1+x1d, y1+y1d, p2) or \ - within(x2, y2, p1) or \ - within(x2+x2d, y2, p1) or \ - within(x2, y2+y2d, p1) or \ - within(x2+x2d, y2+y2d, p1) - -class JourneyTracks: - "Represent a set of named journey tracks on a map." - def __init__(self): - self.mapfile = None # Map background of the journey - self.tracks = {} # Dict of lists of (action, x, y) tuples - self.selected_id = None - self.modifications = 0 - self.track_order = [] - self.properties = {} - self.modified = 0 - self.before = self.after = "" - def selected_track(self): - "Select a track for modification" - return self.tracks[self.selected_id] - def set_selected_track(self, name): - self.selected_id = name - def write(self, filename): - "Record a set of named journey tracks." - if filename.endswith(".cfg"): - fp = open(filename, "w") - fp.write(self.before) - fp.write("# trackplacer: tracks begin\n#\n") - fp.write("# Hand-hack this section strictly at your own risk.\n") - fp.write("#\n") - if not self.before and not self.after: - fp.write("#\n# wmllint: no translatables\n\n") - for (key, val) in self.properties.items(): - fp.write("# trackplacer: %s=%s\n" % (key, val)) - fp.write("#\n") - definitions = [] - for name in self.track_order: - track = self.tracks[name] - index_tuples = zip(xrange(len(track)), track) - index_tuples = filter(lambda (i, (a, x, y)): a in segmenters, - index_tuples) - endpoints = map(lambda (i, t): i, index_tuples) - if track[-1][0] not in segmenters: - endpoints.append(len(track)-1) - outname = name.replace(" ", "_").upper() - for (i, e) in enumerate(endpoints): - stagename = "%s_STAGE%d" % (outname, i+1,) - definitions.append(stagename) - fp.write("#define %s\n" % stagename) - for j in xrange(0, e+1): - age="OLD" - if i == 0 or j > endpoints[i-1]: - age = "NEW" - waypoint = (age,) + tuple(track[j]) - marker = " {%s_%s %d %d}\n" % waypoint - fp.write(marker) - fp.write("#enddef\n\n") - endname = "%s_END" % stagename - fp.write("#define %s\n" % endname) - definitions.append(endname) - for j in xrange(0, e+1): - age="OLD" - if j == endpoints[i]: - age = "NEW" - waypoint = (age,) + tuple(track[j]) - marker = " {%s_%s %d %d}\n" % waypoint - fp.write(marker) - fp.write("#enddef\n\n") - completename = "%s_COMPLETE" % name - fp.write("#define %s\n" % completename) - definitions.append(completename) - for j in xrange(len(track)): - waypoint = track[j] - fp.write(" {OLD_%s %d %d}\n" % tuple(waypoint)) - fp.write("#enddef\n\n") - fp.write("# trackplacer: tracks end\n") - fp.write(self.after) - fp.write ("# trackplacer: epilog begins\n\n") - for name in definitions: - if "{" + name + "}" not in self.after: - fp.write("#undef %s\n" % name) - fp.write ("\n# trackplacer: epilog ends\n") - fp.close() - self.modified = 0 - else: - raise IOException("File must have .cfg extension.", fp.name) - def read(self, fp): - "Initialize a journey from map and track information." - if type(fp) == type(""): - try: - fp = open(fp, "rU") - except IOError: - raise IOException("Cannot read file.", fp) - if self.tracks: - raise IOException("Reading with tracks nonempty.", fp.name) - if fp.name.endswith(".png") or fp.name.endswith(".jpg"): - self.mapfile = self.properties['map'] = fp.name - self.selected_id = "JOURNEY" - self.add_track(self.selected_id) - self.modified = 0 - return - if not fp.name.endswith(".cfg"): - raise IOException("Cannot read this filetype.", fp.name) - waypoint_re = re.compile("{NEW_(" + "|".join(icon_presentation_order) + ")" \ - + " +([0-9]+) +([0-9]+)}") - property_re = re.compile("# *trackplacer: ([^=]+)=(.*)") - define_re = re.compile("#define (.*)_STAGE[0-9]+(_END|_COMPLETE)?") - state = "before" - ignore = False - for line in fp: - if line.startswith("# trackplacer: epilog begins"): - break - # This is how we ignore stuff outside of track sections - if state == "before": - if line.startswith("# trackplacer: tracks begin"): - state = "tracks" # And fall through... - else: - self.before += line - continue - elif state == "after": - self.after += line - continue - elif line.startswith("# trackplacer: tracks end"): - state = "after" - continue - # Which track are we appending to? - m = re.search(define_re, line) - if m: - self.selected_id = m.group(1) - ignore = m.group(2) - if self.selected_id not in self.track_order: - self.track_order.append(self.selected_id) - self.tracks[self.selected_id] = [] - continue - # Is this a track marker? - m = re.search(waypoint_re, line) - if m and not ignore: - try: - tag = m.group(1) - x = int(m.group(2)) - y = int(m.group(3)) - self.tracks[self.selected_id].append((tag, x, y)) - continue - except ValueError: - raise IOException("Invalid coordinate field.", fp.name, i+1) - # Is it a property setting? - m = re.search(property_re, line) - if m: - self.properties[m.group(1)] = m.group(2) - continue - if "map" in self.properties: - self.mapfile = self.properties['map'] - else: - raise IOException("Missing map declaration.", fp.name) - fp.close() - self.modified = 0 - def __getitem__(self, n): - return self.tracks[self.selected_id][n] - def __setitem__(self, n, v): - if self.tracks[self.selected_id][n] != v: - self.modified += 1 - self.tracks[self.selected_id][n] = v - def add_track(self, name): - if name not in self.track_order: - self.tracks[name] = [] - self.track_order.append(name) - if self.selected_id is None: - self.selected_id = name - self.modified += 1 - def remove_track(self, name): - if name in self.track_order: - del self.tracks[name] - self.track_order.remove(name) - if not self.track_order: - self.add_track("JOURNEY") - self.modified += 1 - def rename_track(self, oldname, newname): - if oldname in self.tracklist and newname not in self.tracklist: - self.tracks[newname] = self.tracks[oldname] - self.track_order[self.track_order.index(oldname)] = newname - def has_unsaved_changes(self): - return self.modified - def neighbors(self, x, y): - "Return list of neighbors on selected track, enumerated and sorted by distance." - neighbors = [] - candidates = zip(xrange(len(self.selected_track())), self.selected_track()) - candidates.sort(lambda (i1, (a1, x1, y1)), (i2, (a2, x2, y2)): cmp(distance(x, y, x1, y1), distance(x, y, x2, y2))) - return candidates - def find(self, x, y): - "Find all actions at the given pointin in the selected track." - candidates = [] - for (i, (tag, xt, yt)) in enumerate(self.selected_track()): - if x == xt and y == yt: - candidates.append(i) - return candidates - def insert(self, (action, x, y)): - "Insert a feature in the selected track." - neighbors = self.neighbors(x, y) - # There are two or more markers and we're not nearest the end one - if len(neighbors) >= 2 and neighbors[0][0] != len(neighbors)-1: - closest = neighbors[0] - next_closest = neighbors[1] - # If the neighbors are adjacent, insert between them - if abs(closest[0] - next_closest[0]) == 1: - self.selected_track().insert(max(closest[0], next_closest[0]), (action, x, y)) - self.modified += 1 - return - # Otherwise, append - self.selected_track().append((action, x, y)) - self.modified += 1 - def remove(self, x, y): - "Remove a feature from the selected track." - found = self.find(x, y) - if found: - # Prefer to delete the most recent feature - track = self.selected_track() - self.tracks[self.selected_id] = track[:found[-1]] + track[found[-1]+1:] - self.modified += 1 - def __str__(self): - rep = self.mapfile + repr(self.track_order) + "\n" - for name in self.track_order: - track = self.tracks[name] - rep += name + ": " + repr(track) + ":\n" - return rep - -class ContextPopup: - def __init__(self, editor): - self.editor = editor - self.window = gtk.Window(gtk.WINDOW_POPUP) - self.window.set_transient_for(None) - self.window.set_position(gtk.WIN_POS_CENTER_ALWAYS) - self.window.set_name("trackplacer info") - self.frame=gtk.Frame() - self.window.add(self.frame) - self.frame.show() - self.vbox = gtk.VBox(False, 0) - self.frame.add(self.vbox) - self.vbox.show() - self.window.show() - self.position = gtk.Label() - self.vbox.pack_start(self.position, expand=False, fill=False) - self.position.show() - def inform(self, x, y): - self.position.set_text("At (%d, %d):" % (x, y)) - save_selected = self.editor.journey.selected_id - local = [] - for name in self.editor.journey.track_order: - # Gather info - self.editor.journey.set_selected_track(name) - for (possible, item) in self.editor.journey.neighbors(x, y): - if within(x, y, self.editor.box(item)): - stagecount = 0 - for i in xrange(possible): - (action, xn, yn) = self.editor.journey[i] - if action in segmenters: - stagecount += 1 - local.append((name, possible, self.editor.journey[possible], stagecount)) - self.editor.journey.set_selected_track(save_selected) - # Display it - if local: - for (name, index, (action, x, y), sc) in local: - legend = "%s at (%d, %d) is %s[%d], stage %d" \ - % (action.capitalize(), x,y, name, index, sc+1) - label = gtk.Label(legend) - label.show() - self.vbox.add(label) - else: - label = gtk.Label("No features") - label.show() - self.vbox.add(label) - def destroy(self): - self.window.destroy() - -class TrackEditorIcon: - def __init__(self, action, path): - self.action = action - # We need an image for the toolbar... - self.image = gtk.Image() - self.image.set_from_file(path) - # ...and a pixbuf for drawing on the map with. - self.icon = gtk.gdk.pixbuf_new_from_file(path) - self.icon_width = self.icon.get_width() - self.icon_height = self.icon.get_height() - def bounding_box(self, x, y): - "Return a bounding box for this icon when centered at (x, y)." - # The +1 is a slop factor allowing for even-sized icons - return (x-self.icon_width/2, y-self.icon_height/2, - self.icon_width+1, self.icon_height+1) - -class TrackController: - "Object for controlling an individual track in the Tracks dialog." - def __init__(self, editor, track_id, trackbox, basebutton): - self.editor = editor - self.track_id = track_id - self.hbox = gtk.HBox() - trackbox.add(self.hbox) - self.hbox.show() - self.radiobutton = gtk.RadioButton(basebutton) - self.radiobutton.set_active(track_id == editor.journey.selected_id) - self.radiobutton.connect("toggled", - editor.track_activity_callback, track_id) - self.radiobutton.set_tooltip_text("Select %s for editing" % track_id) - self.radiobutton.show() - self.hbox.add(self.radiobutton) - self.checkbox = gtk.CheckButton() - self.checkbox.set_active(track_id in editor.visible_set) - self.checkbox.connect("toggled", - editor.track_visibility_callback, track_id) - self.hbox.add(self.checkbox) - self.checkbox.set_tooltip_text("Toggle visibility of %s" % track_id) - self.checkbox.show() - self.rename = gtk.Entry() - self.rename.set_text(track_id) - self.rename.connect("activate", self.track_rename_handler, track_id) - self.rename.set_tooltip_text("Change name of track %s" % track_id) - self.rename.show() - self.hbox.add(self.rename) - # We really should have been able to do this: - # self.deleter = gtk.Button(stock=gtk.STOCK_DELETE, label="") - # Instead, we have to writhe and faint in coils because the - # stock argument forces the label. - self.deleter = gtk.Button() - delimage = gtk.Image() - delimage.set_from_stock(gtk.STOCK_DELETE, gtk.ICON_SIZE_SMALL_TOOLBAR) - bbox = gtk.HBox() - self.deleter.add(bbox) - bbox.add(delimage) - delimage.show() - bbox.show() - - self.deleter.connect("clicked", self.track_delete_handler, track_id) - self.hbox.add(self.deleter) - self.deleter.set_tooltip_text("Delete track %s" % track_id) - self.deleter.show() - editor.controller[track_id] = self - def track_delete_handler(self, w, track_id): - if track_id in self.editor.visible_set: - self.editor.visible_set.remove(track_id) - if track_id == self.editor.journey.selected_id: - self.editor.track_select(w, self.editor.visible_set[-1]) - # FIXME: This redraw fails when we delete the last track. - self.editor.redraw(self.editor.drawing_area) - self.editor.journey.remove_track(track_id) - self.hbox.hide() - del self.editor.controller[track_id] - def track_rename_handler(self, w, track_id): - editor.journey.rename(track_id, w.get_text()) - self.editor.controller[w.get_text()] = self.editor.controller[track_id] - del self.editor.controller[track_id] - -class TracksEditor: - def __init__(self, path=None, verbose=False, force_save=False): - self.verbose = verbose - self.force_save = force_save - # Initialize our info about the map and track - self.journey = JourneyTracks() - self.last_read = None - self.journey.read(path) - self.time_last_io = time.time() - if path.endswith(".cfg"): - self.last_read = path - self.log("Initial track is %s" % self.journey) - self.action = "JOURNEY" - self.selected = None - self.visible_set = self.journey.track_order[:] - self.context_popup = None - self.pixmap = None # Backing pixmap for drawing area - - # Grab the map into a pixmap - self.log("about to read map %s" % self.journey.mapfile) - try: - self.map = gtk.gdk.pixbuf_new_from_file(self.journey.mapfile) - self.map_width = self.map.get_width() - self.map_height = self.map.get_height() - self.map = self.map.render_pixmap_and_mask()[0] - except: - self.fatal_error("Error while reading background map %s" % self.journey.mapfile) - # Now get the icons we'll need for scribbling on the map with. - try: - self.selected_dictionary = {} - for (action, path) in selected_icon_dictionary.items(): - icon = TrackEditorIcon(action, path) - self.log("selected %s icon has size %d, %d" % \ - (action, icon.icon_width, icon.icon_height)) - self.selected_dictionary[action] = icon - self.unselected_dictionary = {} - for (action, path) in unselected_icon_dictionary.items(): - icon = TrackEditorIcon(action, path) - self.log("unselected %s icon has size %d, %d" % \ - (action, icon.icon_width, icon.icon_height)) - self.unselected_dictionary[action] = icon - except: - self.fatal_error("error while reading icons") - - # Window-layout time - self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) - self.window.set_name ("trackplacer") - - vbox = gtk.VBox(False, 0) - self.window.add(vbox) - vbox.show() - - self.window.connect("destroy", lambda w: gtk.main_quit()) - - # Set up toolbar style - toolbar = gtk.Toolbar() - toolbar.set_orientation(gtk.ORIENTATION_HORIZONTAL) - toolbar.set_style(gtk.TOOLBAR_BOTH) - toolbar.set_border_width(1) - vbox.pack_start(toolbar, expand = False) - toolbar.show() - - # Toolbar widget has a fit when we try to pack these separately. - radiobox1 = gtk.ToolItem() - radiobox = gtk.HBox() - radiobox1.add(radiobox) - radiobox1.show() - radiobox.show() - toolbar.insert(radiobox1, -1) - - # Marker selection - basebutton = None - for action in icon_presentation_order: - icon = self.selected_dictionary[action] - button = gtk.RadioButton(basebutton) - bbox = gtk.HBox() - button.add(bbox) - bbox.add(icon.image) - icon.image.show() - bbox.show() - if not basebutton: - button.set_active(True) - basebutton = button - button.connect("toggled", self.button_callback, icon.action) - radiobox.pack_start(button, padding=7) - button.show() - button.set_tooltip_text("Place %s markers" % action.lower()) - - # The delete button and its label - button = gtk.RadioButton(button) - delimage = gtk.Image() - delimage.set_from_stock(gtk.STOCK_DELETE, gtk.ICON_SIZE_SMALL_TOOLBAR) - bbox = gtk.HBox() - button.add(bbox) - bbox.add(delimage) - delimage.show() - bbox.show() - button.connect("toggled", self.button_callback, "DELETE") - radiobox.pack_start(button, padding=7) - button.show() - button.set_tooltip_text("Remove markers") - - # The copy button and its label - button = gtk.RadioButton(button) - copyimage = gtk.Image() - copyimage.set_from_stock(gtk.STOCK_CONVERT, gtk.ICON_SIZE_SMALL_TOOLBAR) - bbox = gtk.HBox() - button.add(bbox) - bbox.add(copyimage) - copyimage.show() - bbox.show() - button.connect("toggled", self.button_callback, "COPY") - radiobox.pack_start(button, padding=7) - button.show() - button.set_tooltip_text("Copy marker from an unselected track") - - # Sigh - causes elements to jumop around in the toolbar, - # because when it's not there the application wants the - # extra space for buttons. - #self.coordwin = gtk.Label("") - #coordwrapper = gtk.ToolItem() - #coordwrapper.add(self.coordwin) - #toolbar.add(coordwrapper) - #coordwrapper.set_expand(True) - #self.coordwin.show() - #coordwrapper.show() - - spacer = gtk.SeparatorToolItem() - toolbar.add(spacer) - spacer.set_draw(False) - spacer.set_expand(True) - spacer.show() - - quit = gtk.ToolButton(gtk.STOCK_QUIT) - toolbar.insert(quit, -1) - quit.set_tooltip_text("Leave this program.") - quit.connect("clicked", self.quit) - quit.show() - - save = gtk.ToolButton(gtk.STOCK_SAVE) - toolbar.insert(save, -1) - save.set_tooltip_text("Save journey tracks.") - save.connect("clicked", self.save_handler) - save.show() - - properties = gtk.ToolButton(gtk.STOCK_PROPERTIES) - toolbar.insert(properties, -1) - properties.set_tooltip_text("St properties of the tracks.") - properties.connect("clicked", self.properties_handler) - properties.show() - - animate = gtk.ToolButton(gtk.STOCK_REFRESH) - animate.set_label(label="Animate") - toolbar.insert(animate, -1) - animate.set_tooltip_text("Animate tracks as in story parts.") - animate.connect("clicked", self.animate_handler) - animate.show() - - tracks = gtk.ToolButton(gtk.STOCK_INDEX) - tracks.set_label(label="Tracks") - toolbar.insert(tracks, -1) - tracks.set_tooltip_text("Add, edit, delete and rename tracks.") - tracks.connect("clicked", self.tracks_handler) - tracks.show() - - help = gtk.ToolButton(gtk.STOCK_HELP) - toolbar.insert(help, -1) - help.set_tooltip_text("Get command help for this program.") - help.connect("clicked", self.help_handler) - help.show() - - about = gtk.ToolButton(gtk.STOCK_ABOUT) - toolbar.insert(about, -1) - about.set_tooltip_text("See credits for this program.") - about.connect("clicked", self.about_handler) - about.show() - - # Create the drawing area on a viewport that scrolls, if needed. - self.drawing_area = gtk.DrawingArea() - self.drawing_area.set_size_request(self.map_width, self.map_height) - screen_width = gtk.gdk.screen_width() - screen_height = gtk.gdk.screen_height() - if self.map_width < 0.75 * screen_width and self.map_height < 0.75 * screen_width: - # Screen is large relative to the image. Grab enough - # space to display the entire map. and never scroll. - # There should be enough space around the edges for window - # decorations, task bars, etc. - vbox.pack_start(self.drawing_area, expand=True, fill=True, padding=0) - self.drawing_area.show() - else: - # Screen is small. Grab all the space the window manager will - # give us and deal with scrolling. - scroller = gtk.ScrolledWindow() - scroller.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) - scroller.add_with_viewport(self.drawing_area) - vbox.pack_start(scroller) - self.window.maximize() - self.drawing_area.show() - scroller.show() - - # Signals used to handle backing pixmap - self.drawing_area.connect("expose_event", self.expose_event) - self.drawing_area.connect("configure_event", self.configure_event) - - # Event signals - self.drawing_area.connect("motion_notify_event", self.motion_notify_event) - self.drawing_area.connect("button_press_event", self.button_press_event) - - self.drawing_area.connect("button_release_event", self.button_release_event) - - self.drawing_area.connect("leave_notify_event", self.leave_area_event) - - self.drawing_area.set_events(gtk.gdk.EXPOSURE_MASK - | gtk.gdk.LEAVE_NOTIFY_MASK - | gtk.gdk.BUTTON_PRESS_MASK - | gtk.gdk.BUTTON_RELEASE_MASK - | gtk.gdk.POINTER_MOTION_MASK - | gtk.gdk.POINTER_MOTION_HINT_MASK) - - - self.window.show() - gtk.main() - self.log("initialization successful") - - def button_callback(self, widget, data=None): - "Radio button callback, changes selected editing action." - if widget.get_active(): - self.action = data - - def refresh_map(self, x=0, y=0, xs=-1, ys=-1): - "Refresh part of the drawing area with the appropriate map rectangle." - self.log("Refreshing map in (%d, %d, %d, %d, %d, %d}" % (x,y,x,y,xs,ys)) - self.pixmap.draw_drawable(self.default_gc, self.map, x, y, x, y, xs, ys) - - def box(self, (action, x, y)): - "Compute the bounding box for an icon of type ACTION at X, Y." - # Assumes selected and unselected icons are the same size - return self.selected_dictionary[action].bounding_box(x, y) - - def snap_to(self, x, y): - "Snap a location to the nearest feature on the selected track whose bounding box holds it." - self.log("Neighbors of %d, %d are %s" % (x, y, self.journey.neighbors(x, y))) - for (i, item) in self.journey.neighbors(x, y): - if within(x, y, self.box(item)): - return i - else: - return None - - def neighbors(self, (action, x, y)): - "Return all track items with bounding boxes overlapping this one:" - rect = self.selected_dictionary[action].bounding_box(x, y) - return filter(lambda item: overlaps(rect, self.box(item)), - self.journey.selected_track()) - - def erase_feature(self, widget, (action, x, y)): - "Erase specified (active) icon from the map." - # Erase all nearby features that might have been damaged. - save_select = self.journey.selected_id - for (id, track) in self.journey.tracks.items(): - if id not in self.visible_set: - continue - self.journey.set_selected_track(id) - neighbors = self.neighbors((action, x, y)) - for (na, nx, ny) in neighbors: - rect = self.box((na, nx, ny)) - self.log("Erasing action=%s, dest=%s" % (na, rect)) - self.refresh_map(*rect) - widget.queue_draw_area(*rect) - # Redraw all nearby features except what we're erasing. - for (na, nx, ny) in neighbors: - if x != nx and y != ny: - self.log("Redrawing action=%s" % ((na, nx, ny),)) - self.draw_feature(widget, - (na, nx, ny), - save_select == self.journey.selected_id) - self.journey.set_selected_track(save_select) - - def draw_feature(self, widget, (action, x, y), selected): - "Draw specified icon on the map." - rect = self.box((action, x, y)) - self.log("Drawing action=%s (%s), dest=%s" % (action, selected, rect)) - if selected: - icon = self.selected_dictionary[action].icon - else: - icon = self.unselected_dictionary[action].icon - self.pixmap.draw_pixbuf(self.default_gc, icon, 0, 0, *rect) - widget.queue_draw_area(*rect) - - def flush(self, widget): - "Force pending events out." - self.expose_event(widget) - while gtk.events_pending(): - gtk.main_iteration(False) - - def redraw(self, widget, delay=0): - "Redraw the map and tracks." - self.refresh_map() - for track_id in self.journey.track_order: - if track_id not in self.visible_set: - continue - for item in self.journey.tracks[track_id]: - self.draw_feature(widget, item, track_id == self.journey.selected_id) - if delay: - time.sleep(delay) - self.flush(widget) - # To ensure items on selected track are on top, redraw them - if self.journey.track_order: - for item in self.journey.selected_track(): - self.draw_feature(widget, item, True) - self.flush(widget) - - def configure_event(self, widget, event): - "Create a new backing pixmap of the appropriate size." - x, y, width, height = widget.get_allocation() - self.pixmap = gtk.gdk.Pixmap(widget.window, width, height) - self.default_gc = self.drawing_area.get_style().fg_gc[gtk.STATE_NORMAL] - self.redraw(widget) - return True - - def expose_event(self, widget, event=None): - "Redraw the screen from the backing pixmap" - if event: - x , y, width, height = event.area - else: - x, y, width, height = widget.get_allocation() - widget.window.draw_drawable(self.default_gc, - self.pixmap, x, y, x, y, width, height) - return False - - def button_press_event(self, widget, event): - if self.pixmap is None: - return - if self.journey.selected_track() is None: - w = gtk.MessageDialog(type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK) - w.set_markup("No track to edit!") - w.run() - return - # Pick up state information whatever button is pressed - a = self.action - x = int(event.x) - y = int(event.y) - self.selected = self.snap_to(x, y) - # Event button 1 - draw - if event.button == 1: - # Skip the redraw in half the cases - self.log("Action %s at (%d, %d): feature = %s" % (self.action, x, y, self.selected)) - if self.selected == None and self.action == "COPY": - save_selected = self.journey.selected_id - most_recent = None - for name in self.journey.track_order: - if name != save_selected: - self.journey.set_selected_track(name) - possible = self.snap_to(x, y) - if possible is not None: - print "Found possible on", name - most_recent = (name, possible, self.journey[possible]) - self.journey.set_selected_track(save_selected) - if most_recent: - (nn, np, (an, xn, yn)) = most_recent - self.log("Copy feature: %s[%d] = %s" % (nn, np, (an,xn,yn))) - (a, x, y) = (an, xn, yn) - else: - return - if (self.selected == None) and (a == "DELETE"): - return - if (self.selected != None) and (a != "DELETE"): - return - # Actual drawing and mutation of the journey track happens here - if not self.selected and a != "DELETE": - self.draw_feature(widget, (a, x, y), True) - self.journey.insert((a, x, y)) - elif self.selected != None and a == "DELETE": - (a, x, y) = self.journey[self.selected] - self.log("Deletion snapped to feature %d %s" % (self.selected,(a,x,y))) - self.erase_feature(widget, (a, x, y)) - self.journey.remove(x, y) - self.log("Tracks are %s" % self.journey) - # Event button 3 - query - if event.button == 3: - self.context_popup = ContextPopup(self) - self.context_popup.inform(x, y) - return True - - def button_release_event(self, widget, event): - if self.context_popup is not None: - self.context_popup.destroy() - - def motion_notify_event(self, widget, event): - if event.is_hint: - x, y, state = event.window.get_pointer() - else: - x = event.x - y = event.y - #self.coordwin.set_text("(%d, %d)" % (x, y)) - state = event.state - - # This code enables dragging icons wit h the middle button. - if state & gtk.gdk.BUTTON2_MASK and self.pixmap != None: - if self.selected is not None: - (action, lx, ly) = self.journey[self.selected] - self.erase_feature(widget, (action, lx, ly)) - self.journey[self.selected] = (action, x, y) - self.journey.modified += 1 - self.draw_feature(widget, (action, x, y), True) - self.log("Tracks are %s" % self.journey) - return True - - def leave_area_event(self, w, e): - if self.context_popup: - self.context_popup.destroy() - #self.coordwin.set_text("") - - def quit(self, w): - if self.journey.has_unsaved_changes(): - self.quit_check = gtk.Dialog(title="Really quit?", - parent=None, - flags=gtk.DIALOG_MODAL, - buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, - gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)) - label = gtk.Label("Track has unsaved changes. OK to quit?") - self.quit_check.vbox.pack_start(label) - label.show() - response = self.quit_check.run() - self.quit_check.destroy() - if response == gtk.RESPONSE_ACCEPT: - sys.exit(0) - else: - sys.exit(0) - - def save_handler(self, w): - "Save track data," - if not self.journey.has_unsaved_changes() and not self.force_save: - w = gtk.MessageDialog(type=gtk.MESSAGE_INFO, - flags=gtk.DIALOG_DESTROY_WITH_PARENT, - buttons=gtk.BUTTONS_OK) - w.set_markup("You have no unsaved changes.") - w.run() - w.destroy() - else: - # Request save file name - dialog = gtk.FileChooserDialog("Save track macros", - None, - gtk.FILE_CHOOSER_ACTION_SAVE, - (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, - gtk.STOCK_SAVE, gtk.RESPONSE_OK)) - dialog.set_default_response(gtk.RESPONSE_CANCEL) - if self.last_read: - dialog.set_filename(self.last_read) - dialog.set_show_hidden(False) - - sfilter = gtk.FileFilter() - sfilter.set_name("Track files") - sfilter.add_pattern("*.cfg") - dialog.add_filter(sfilter) - - response = dialog.run() - filename = dialog.get_filename() - dialog.destroy() - if response == gtk.RESPONSE_CANCEL: - return - - # Relativize file path to current directory - if filename.startswith(os.getcwd() + os.sep): - filename = filename[len(os.getcwd())+1:] - - # Request overwrite confirmation in some circumstances - confirmation_required = None - if os.path.exists(filename): - if filename != self.last_read: - confirmation_required = "You have requested saving "\ - "to a file other than %s, " \ - "and that file already exists." \ - % self.last_read - elif os.stat(filename).st_mtime > self.time_last_io: - confirmation_required = "File has changed "\ - "since last read or written." - if confirmation_required: - confirmation_required += "\nReally overwrite %s?" % filename - save_check = gtk.Dialog(title="Really overwrite?", - parent=None, - flags=gtk.DIALOG_MODAL, - buttons=(gtk.STOCK_CANCEL, - gtk.RESPONSE_REJECT, - gtk.STOCK_OK, - gtk.RESPONSE_ACCEPT)) - label = gtk.Label(confirmation_required) - save_check.vbox.pack_start(label) - label.show() - response = save_check.run() - save_check.destroy() - if response == gtk.RESPONSE_REJECT: - return - - # Actual I/O - self.log("Writing track data to %s" % filename) - try: - self.journey.write(filename) - if not self.journey.mapfile: - self.journey.mapfile = filename - self.time_last_io = time.time() - except IOError: - w = gtk.MessageDialog(type=gtk.MESSAGE_INFO, - flags=gtk.DIALOG_DESTROY_WITH_PARENT, - buttons=gtk.BUTTONS_OK) - w.set_markup("Cannot write" + filename) - w.run() - w.destroy() - - def help_handler(self, w): - "Display help." - w = gtk.MessageDialog(type=gtk.MESSAGE_INFO, - flags=gtk.DIALOG_DESTROY_WITH_PARENT, - buttons=gtk.BUTTONS_OK) - w.set_markup(gui_help) - w.run() - w.destroy() - - def about_handler(self, w): - "Display about information." - w = gtk.MessageDialog(type=gtk.MESSAGE_INFO, - flags=gtk.DIALOG_DESTROY_WITH_PARENT, - buttons=gtk.BUTTONS_OK) - w.set_markup(gui_about) - w.run() - w.destroy() - - def tracks_handler(self, w): - "Modify the visible set of tracks." - self.visibility = gtk.Dialog(title="Edit track visibility", - buttons=(gtk.STOCK_CLOSE, - gtk.RESPONSE_ACCEPT)) - label = gtk.Label("The radiobuttons select a track for editing.") - self.visibility.vbox.pack_start(label) - self.visibility_toggles = {} - label.show() - label = gtk.Label("The checkbuttons toggle the visibility of tracks.") - self.visibility.vbox.pack_start(label) - label.show() - self.controller = {} - basebutton = None - self.trackbox = gtk.VBox() - self.visibility.vbox.add(self.trackbox) - self.trackbox.show() - basebutton = gtk.RadioButton() # Dummy, don't show it. - for (i, track_id) in enumerate(self.journey.track_order): - TrackController(self, track_id, self.trackbox, basebutton) - movebox = gtk.HBox() - label = gtk.Label("The up and down buttons change the track order.") - self.visibility.vbox.pack_start(label) - label.show() - upbutton = gtk.Button(stock=gtk.STOCK_GO_UP) - movebox.pack_start(upbutton, expand=True, fill=True) - upbutton.connect("clicked", lambda w: self.track_move(backward=True)) - upbutton.show() - downbutton = gtk.Button(stock=gtk.STOCK_GO_DOWN) - movebox.pack_start(downbutton, expand=True, fill=True) - downbutton.connect("clicked", lambda w: self.track_move(backward=False)) - downbutton.show() - movebox.show() - addbox = gtk.HBox() - addbox.show() - addlabel = gtk.Label("Add New Track:") - addlabel.show() - addbox.add(addlabel) - addentry = gtk.Entry() - addentry.show() - addbox.add(addentry) - addentry.connect("activate", self.track_add_callback, basebutton) - self.visibility.vbox.add(movebox) - self.visibility.vbox.add(addbox) - self.visibility.connect("response", self.track_visibility_revert) - self.visibility.show() - def track_move(self, backward): - where = self.journey.track_order.index(self.journey.selected_id) - if backward: - where += (len(self.journey.track_order) - 1) - else: - where += 1 - where %= len(self.journey.track_order) - self.journey.track_order.remove(self.journey.selected_id) - self.journey.track_order.insert(where, self.journey.selected_id) - self.trackbox.reorder_child(self.controller[self.journey.selected_id].hbox, where) - def track_select(self, w, track_id): - "Make the specified track the selected one for editing." - self.journey.set_selected_track(track_id) - self.controller[track_id].checkbox.set_active(True) - self.controller[track_id].radiobutton.set_active(True) - if track_id not in self.visible_set: - self.track_visibility_callback(self.controller[track_id], track_id) - else: - self.redraw(self.drawing_area) - def track_activity_callback(self, w, track_id): - "Called (twice) when a track activity radiobutton is toggled." - if w.get_active(): - self.track_select(w, track_id) - def track_visibility_callback(self, w, track_id): - "Called when a track visibility checkbutton is toggled." - if len(self.visible_set) <= 1 and track_id in self.visible_set: - w = gtk.MessageDialog(type=gtk.MESSAGE_INFO, - flags=gtk.DIALOG_DESTROY_WITH_PARENT, - buttons=gtk.BUTTONS_OK) - w.set_markup("At least one track must remain visible.") - self.controller[track_id].checkbox.set_active(True) - w.run() - w.destroy() - return - self.log("Toggling visibility of %s" % track_id) - if track_id in self.visible_set: - self.visible_set.remove(track_id) - else: - self.visible_set.append(track_id) - self.log("Visibility set is now %s" % self.visible_set) - if self.journey.selected_id not in self.visible_set: - self.controller[track_id].radiobutton.set_active(False) - self.journey.set_selected_track(self.visible_set[-1]) - self.controller[self.visible_set[-1]].radiobutton.set_active(True) - else: - self.redraw(self.drawing_area) - def track_add_callback(self, w, basebutton): - "Add a new track, and the controller for it, and select it." - track_id = w.get_text() - w.set_text("") - TrackController(self, track_id, self.trackbox, basebutton) - self.journey.add_track(track_id) - self.track_select(w, track_id) - def track_visibility_revert(self, w, response_id): - "On response or window distruction, restore visibility set." - self.visible_set = self.journey.track_order - self.redraw(self.drawing_area) - self.visibility.destroy() - - def properties_handler(self, w): - "Display a dialog for editing track properties." - w = gtk.Dialog(title="Track properties editor", - parent=None, - flags=gtk.DIALOG_MODAL, - buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, - gtk.STOCK_OK, gtk.RESPONSE_ACCEPT)) - label = gtk.Label("You can enter a key/value pair for a new property on the last line.") - label.show() - w.vbox.pack_start(label) - table = gtk.Table(len(self.journey.properties)+1, 2) - table.show() - w.vbox.pack_start(table) - keys = self.journey.properties.keys() - keys.sort() - labels = [] - entries = [] - for (i, key) in enumerate(keys): - labels.append(gtk.Label(key)) - labels[-1].show() - table.attach(labels[-1], 0, 1, i, i+1) - entries.append(gtk.Entry()) - entries[-1].set_text(self.journey.properties[key]) - entries[-1].set_width_chars(50) - entries[-1].show() - table.attach(entries[-1], 1, 2, i, i+1) - new_key = gtk.Entry() - new_key.set_width_chars(12) - new_key.show() - table.attach(new_key, 0, 1, len(keys)+1, len(keys)+2) - new_value = gtk.Entry() - new_value.set_width_chars(50) - table.attach(new_value, 1, 2, len(keys)+1, len(keys)+2) - new_value.show() - response = w.run() - w.destroy() - if response == gtk.RESPONSE_ACCEPT: - for (label, entry) in zip(labels, entries): - self.journey.properties[label.get_text()] = entry.get_text() - if new_key.get_text() and new_label.get_text(): - self.journey.properties[new_key.get_text()] = new_entry.get_text() - - def animate_handler(self, w): - "Animate dot placing as though on a storyboard." - self.refresh_map() - self.expose_event(self.drawing_area) - self.redraw(self.drawing_area, 0.5) - - def log(self, msg): - "Debugging report." - if self.verbose: - print >>sys.stderr, "trackplacer:", msg - - def fatal_error(self, msg): - "Notify user of error and die." - w = gtk.MessageDialog(type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK) - w.set_markup(msg) - w.run() - sys.exit(1) - -if __name__ == "__main__": - (options, arguments) = getopt.getopt(sys.argv[1:], "d:fhv?", - ['directory=', 'force', 'help', 'verbose']) - verbose = force_save = False - top = None - for (opt, val) in options: - if opt in ('-d', '--directory'): - top = val - elif opt in ('-f', '--force'): - force_save = True - elif opt in ('-?', '-h', '--help'): - print __doc__ - sys.exit(0) - elif opt in ('-v', '--verbose'): - verbose = True - - here = os.getcwd() - if top: - os.chdir(top) - else: - wesnoth.wmltools.pop_to_top("trackplacer") - if arguments: - try: - filename = os.path.join(here, arguments[0]) - # Relativize file path to current directory - if filename.startswith(os.getcwd() + os.sep): - filename = filename[len(os.getcwd())+1:] - TracksEditor(path=filename, verbose=verbose, force_save=force_save) - except IOException, e: - if e.lineno: - sys.stderr.write(('"%s", line %d: ' % (e.path, e.lineno)) + e.message + "\n") - else: - sys.stderr.write(e.path + ": " + e.message + "\n") - else: - while True: - try: - dialog = gtk.FileChooserDialog("Open track file", - None, - gtk.FILE_CHOOSER_ACTION_OPEN, - (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, - gtk.STOCK_OPEN, gtk.RESPONSE_OK)) - dialog.set_default_response(gtk.RESPONSE_OK) - dialog.set_filename(default_map) - dialog.set_show_hidden(False) - - ofilter = gtk.FileFilter() - ofilter.set_name("Images and Tracks") - ofilter.add_mime_type("image/png") - ofilter.add_mime_type("image/jpeg") - ofilter.add_mime_type("image/gif") - ofilter.add_pattern("*.png") - ofilter.add_pattern("*.jpg") - ofilter.add_pattern("*.gif") - ofilter.add_pattern("*.tif") - ofilter.add_pattern("*.xpm") - ofilter.add_pattern("*.cfg") - dialog.add_filter(ofilter) - - ofilter = gtk.FileFilter() - ofilter.set_name("Images only") - ofilter.add_mime_type("image/png") - ofilter.add_mime_type("image/jpeg") - ofilter.add_mime_type("image/gif") - ofilter.add_pattern("*.png") - ofilter.add_pattern("*.jpg") - ofilter.add_pattern("*.gif") - ofilter.add_pattern("*.tif") - ofilter.add_pattern("*.xpm") - dialog.add_filter(ofilter) - - ofilter = gtk.FileFilter() - ofilter.set_name("Tracks only") - ofilter.add_pattern("*.cfg") - dialog.add_filter(ofilter) - - response = dialog.run() - if response == gtk.RESPONSE_OK: - filename = dialog.get_filename() - elif response == gtk.RESPONSE_CANCEL: - sys.exit(0) - dialog.destroy() - - # Relativize file path to current directory - if filename.startswith(os.getcwd() + os.sep): - filename = filename[len(os.getcwd())+1:] - - TracksEditor(filename, verbose=verbose, force_save=force_save) - except IOException, e: - w = gtk.MessageDialog(type=gtk.MESSAGE_ERROR, - flags=gtk.DIALOG_DESTROY_WITH_PARENT, - buttons=gtk.BUTTONS_OK) - if e.lineno: - errloc = '"%s", line %d:' % (e.path, e.lineno) - # Emacs friendliness - sys.stderr.write(errloc + " " + e.message + "\n") - else: - errloc = e.path + ":" - w.set_markup(errloc + "\n\n" + e.message) - w.run() - w.destroy() diff --git a/data/tools/trackviewer.pyw b/data/tools/trackviewer.pyw new file mode 100755 index 00000000000..731dbf6ae43 --- /dev/null +++ b/data/tools/trackviewer.pyw @@ -0,0 +1,232 @@ +#!/usr/bin/env python3 +# encoding: utf-8 +""" +%(prog)s example.cfg +%(prog)s example.tmx --track JOURNEY_PART1 + +Map journey track animation preview tool, shows the journey without needing to +start Wesnoth and refresh the cache. Currently this does not have editing +functions, instead it’s a support program which assumes you’re either editing +the .cfg file with a text editor or using Tiled with tmx_trackplacer. + +At least on Linux you can have both Tiled and this open on the same file. Save +the file in Tiled, alt+tab to this, and press enter to reload the file. +""" + +# Gtk version by Eric S. Raymond for the Battle For Wesnoth project, October 2008. +# Tkinter version by Steve Cotton for the Battle For Wesnoth project, 2019. + +import wesnoth.trackplacer3 as tp3 + +import argparse +import tkinter +import tkinter.ttk as ttk +import PIL.Image +import PIL.ImageTk + +def waypoint_generator(journey): + for track in journey.tracks: + for point in track.waypoints: + yield (track, point) + +class ReloadFactory: + """Encapsulates both the file-handling and any command line options that affect + which tracks will be shown. + """ + def __init__(self): + raise NotImplementedError() + + def load_waypoints(self): + """Parsing the most recent version of the file, return + an iterator for the waypoints that should be drawn. + """ + raise NotImplementedError() + +class MapAndWaypointCanvas: + def __init__(self, window, journey, reload_factory, wesnoth_data_dir): + self.window = window + self.journey = journey + self.reload_factory = reload_factory + self.waypoint_generator = None + self.animation_loop_id = None + self.latest_drawn = [tkinter.StringVar(), tkinter.StringVar(), tkinter.StringVar()] + + try: + mapimage = PIL.Image.open(self.journey.mapfile) + except: + try: + mapimage = PIL.Image.open(wesnoth_data_dir + "/../" + self.journey.mapfile) + except: + raise Exception("Can’t open map image") + + background_width, background_height = mapimage.size + self.canvas = tkinter.Canvas(self.window, scrollregion=(0, 0, background_width, background_height)) + self.mapphotoimage = PIL.ImageTk.PhotoImage(mapimage) + self.canvas.create_image((0, 0), anchor="nw", image=self.mapphotoimage) + + # hope this fits on screen + self.canvas.configure(width=background_width, height=background_height) + + self.action_image = {} + for action in tp3.datatypes.selected_icon_dictionary: + with PIL.Image.open(wesnoth_data_dir + "/" + tp3.datatypes.selected_icon_dictionary[action]) as image: + self.action_image[action] = PIL.ImageTk.PhotoImage(image) + + # start with all waypoints visible + for (track, point) in reload_factory.load_waypoints(): + self.draw_waypoint(point) + + def get_widget(self): + return self.canvas + + def get_lastest_drawn(self): + """Returns an array of StringVars which will be updated with the details + of the most recently drawn point.""" + return self.latest_drawn + + def draw_waypoint(self, point): + if point.action in self.action_image: + self.canvas.create_image((point.x, point.y), image=self.action_image[point.action], tags="waypoint") + else: + self.canvas.create_text((point.x, point.y), text=point.action, fill="red", tags="waypoint") + + def clear_all_drawn_waypoints(self): + self.canvas.delete("waypoint") + + def _next_frame(self): + try: + (track, point) = next(self.waypoint_generator) + self.draw_waypoint(point) + self.latest_drawn[0].set(track.name) + self.latest_drawn[1].set("{x}, {y}".format(x=point.x, y=point.y)) + self.latest_drawn[2].set(point.action) + self.animation_loop_id = self.window.after(500, self._next_frame) + except StopIteration: + self.animation_loop_id = None + pass + + def toggle_pause(self): + if self.animation_loop_id: + self.window.after_cancel(self.animation_loop_id) + self.animation_loop_id = None + elif self.waypoint_generator is not None: + self.animation_loop_id = self.window.after(0, self._next_frame) + + def restart_animation(self): + """This rereads the file each time, in case the file has been edited.""" + if self.animation_loop_id: + self.window.after_cancel(self.animation_loop_id) + self.animation_loop_id = None + self.clear_all_drawn_waypoints() + self.waypoint_generator = self.reload_factory.load_waypoints() + self.animation_loop_id = self.window.after(0, self._next_frame) + +class Controls: + def __init__(self, window, mapAndWaypointCanvas): + self.window = window + self.canvas = mapAndWaypointCanvas + self.frame = tkinter.Frame(self.window) + + self.window.bind("", self.quit) + self.window.bind("", self.toggle_pause) + self.window.bind("", self.restart_animation) + self.window.bind("", self.restart_animation) + + button = ttk.Button(self.frame, takefocus=False, text="Quit\n(escape)", command=self.quit) + button.pack() + + button = ttk.Button(self.frame, takefocus=False, text="Pause\n(space)", command=self.toggle_pause) + button.pack() + + button = ttk.Button(self.frame, takefocus=False, text="Reload\n(enter)", command=self.restart_animation) + button.pack() + + label = ttk.Label(self.frame, text="\n".join([ + "This tool only", + "previews the", + "animation, it does", + "not have editing", + "capabilities", + "", + "To change the data,", + "either edit the", + ".cfg file or use", + "tmx_trackplacer."])) + label.pack() + + for x in self.canvas.get_lastest_drawn(): + label = ttk.Label(self.frame, textvariable=x) + label.pack() + + def get_widget(self): + return self.frame + + def _debug_log_event(self, event): + print(repr(event)) + + def quit(self, event=None): + self.window.quit() + + def toggle_pause(self, event=None): + self.canvas.toggle_pause() + + def restart_animation(self, event=None): + self.canvas.restart_animation() + +class LoaderImpl(ReloadFactory): + def __init__(self, options): + self.options = options + + if options.file is None: + raise RuntimeError("Need a filename to read from") + + if options.file.endswith(".cfg"): + self.reader = tp3.CfgFileFormat() + elif options.file.endswith(".tmx"): + self.reader = tp3.TmxFileFormat(wesnoth_data_dir=options.data_dir) + else: + raise RuntimeError("Don’t know how to handle input from this file type") + + def load_journey(self): + (journey, metadata) = self.reader.read(self.options.file) + return journey + + def load_waypoints(self): + journey = self.load_journey() + if options.track is not None: + found_track = None + for track in journey.tracks: + if track.name == options.track: + found_track = track + if found_track is None: + raise RuntimeError("Named track not found in journey") + journey.tracks = [found_track] + return waypoint_generator(journey) + +if __name__ == "__main__": + ap = argparse.ArgumentParser(usage=__doc__) + ap.add_argument("file", metavar="filename", help="Read input from this file") + ap.add_argument("--data-dir", metavar="dir", + help='Same as Wesnoth’s “--data-dir” argument') + ap.add_argument("--track", metavar="track_name", + help='Only show the animation for the named track') + options = ap.parse_args() + + if options.data_dir is None: + import os, sys + APP_DIR,APP_NAME=os.path.split(os.path.realpath(sys.argv[0])) + WESNOTH_ROOT_DIR=os.sep.join(APP_DIR.split(os.sep)[:-2]) # pop out "data" and "tools" + options.data_dir=os.path.join(WESNOTH_ROOT_DIR,"data") + + reload_factory = LoaderImpl(options) + + journey = reload_factory.load_journey() + print("Read data:", str(journey)) + + window = tkinter.Tk() + window.title("trackplacer animation preview") + canvas = MapAndWaypointCanvas(window, journey, reload_factory, options.data_dir) + controls = Controls(window, canvas) + controls.get_widget().pack(side=tkinter.LEFT) + canvas.get_widget().pack(side=tkinter.LEFT) + window.mainloop() -- 2.29.2 From 946363d62a3ec6537ba2afc78d268261ff5f44b2 Mon Sep 17 00:00:00 2001 From: Steve Cotton Date: Wed, 29 Jul 2020 14:35:54 +0200 Subject: [PATCH 23/31] Disambiguate some python shebang lines to be "python3" Loonycyborg already uses these tools with Python 3. The python launcher tool for Windows has magic handling for some shebang lines, however `#!/bin/env python` isn't recognised without the `/usr`. Had the `/usr` been included with the old code then these scripts would likely have been run with Python 2. https://docs.python.org/dev/using/windows.html#shebang-lines (cherry picked from commit c1e4178338a3d5b65c152de75e59999380d060f7) --- utils/dockerbuilds/mingw/get_dlls.py | 2 +- utils/update_appdata | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/utils/dockerbuilds/mingw/get_dlls.py b/utils/dockerbuilds/mingw/get_dlls.py index efc5481e458..c514b620b90 100755 --- a/utils/dockerbuilds/mingw/get_dlls.py +++ b/utils/dockerbuilds/mingw/get_dlls.py @@ -1,4 +1,4 @@ -#!/bin/env python +#!/usr/bin/env python3 import pefile, pathlib, shutil diff --git a/utils/update_appdata b/utils/update_appdata index b521dbe8914..483fd16dd6e 100755 --- a/utils/update_appdata +++ b/utils/update_appdata @@ -1,4 +1,4 @@ -#!/bin/env python +#!/usr/bin/env python3 import sys, requests, argparse from xml.dom import minidom -- 2.29.2 From 82b8b59cf3bba28c6d98a0e8e207210fe7c5e87b Mon Sep 17 00:00:00 2001 From: Steve Cotton Date: Thu, 30 Jul 2020 22:19:52 +0200 Subject: [PATCH 24/31] Remove the python2 rmtrans tool (#5014) It seems unused, as there's: * Wiki: no mention of rmtrans * main Git repo: only the brief paragraph in the tools readme * resources repo: no mention of it * forums: only as part of a general list of Python files * Github issues and PRs: only as part of a general list of Python files FWIW, I think it's already possible to do with Gimp's default tools ("Colors", "Curves ...", select channel "Alpha", select curve type "Freehand", click in the bottom-left of the box and sweep down, then to the left edge of the box, then as far right as required). (cherry picked from commit ebbd9c5dfa41e2330624a73d494f733b81d77695) --- data/tools/README.md | 6 ----- data/tools/rmtrans/README.md | 7 ------ data/tools/rmtrans/rmtrans.py | 41 ----------------------------------- 3 files changed, 54 deletions(-) delete mode 100644 data/tools/rmtrans/README.md delete mode 100755 data/tools/rmtrans/rmtrans.py diff --git a/data/tools/README.md b/data/tools/README.md index a14d8a140b7..19f7c1c467f 100644 --- a/data/tools/README.md +++ b/data/tools/README.md @@ -4,12 +4,6 @@ also belong here. Other utils are in utils/. == Scripts == -=== rmtrans === - -Remove nearly transparent pixels from images using GIMP. It currently affects -only one image at a time. Batch processing is available within GIMP, but it -would be useful to expand this to skip files where the pixels did not change. - === `tmx_trackplacer` === Converter for journey track files, the .cfg files which control the icon diff --git a/data/tools/rmtrans/README.md b/data/tools/rmtrans/README.md deleted file mode 100644 index b3fad703328..00000000000 --- a/data/tools/rmtrans/README.md +++ /dev/null @@ -1,7 +0,0 @@ -This is a GIMP script, written in python. -See the following address to install the script: -https://en.wikibooks.org/wiki/GIMP/Installing_Plugins#Copying_the_plugin_to_the_GIMP_plugin_directory - -When installed, the script will be listed under the "Colors" menu. - -It requires a relatively recent version of GIMP. 2.6 is too old. diff --git a/data/tools/rmtrans/rmtrans.py b/data/tools/rmtrans/rmtrans.py deleted file mode 100755 index 4cbc57c8c59..00000000000 --- a/data/tools/rmtrans/rmtrans.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python2 - -from gimpfu import * - -def rmtrans(img,tdrawable): - pdb.gimp_image_undo_group_start(img) - if pdb.gimp_selection_is_empty(img): - pdb.gimp_selection_all(img) - selection = pdb.gimp_selection_save(img) - pdb.gimp_selection_none(img) - alpha,temp,temp,temp = pdb.plug_in_decompose(img,tdrawable,"Alpha",1) # get alpha channel - alpha = pdb.gimp_image_get_active_layer(alpha) # turn it into a layer - pdb.gimp_edit_copy(alpha) - pdb.gimp_floating_sel_to_layer(pdb.gimp_edit_paste(tdrawable,TRUE)) - alpha = pdb.gimp_image_get_active_layer(img) # move alpha layer into image (copy-paste) - pdb.gimp_context_set_antialias(False) - pdb.gimp_context_set_sample_threshold(0.0) # configuration for color selection - for i in xrange(10): - pdb.gimp_image_select_color(img,CHANNEL_OP_REPLACE,alpha,(i,i,i)) # select alpha values <= 10 - pdb.gimp_image_select_item(img,CHANNEL_OP_INTERSECT,selection) # bound it to the previously selected area (before plugin execution) - if not(pdb.gimp_selection_is_empty(img)): - pdb.gimp_edit_clear(tdrawable) # and clear it - print(alpha) - pdb.gimp_image_remove_layer(img,alpha) - pdb.gimp_selection_none(img) - pdb.gimp_image_undo_group_end(img) - -register( - "python_fu_rmtrans", - "Remove all pixels under a given alpha threshold.", - "Remove all pixels under a given alpha threshold.", - "Samuel Kim", - "Samuel Kim", - "2012", - "/Colors/Remove almost-transparent pixels", - "*", - [], - [], - rmtrans) - -main() -- 2.29.2 From 019f55170cd203ced49339ab4a60c7324c3c1b7d Mon Sep 17 00:00:00 2001 From: Steve Cotton Date: Wed, 5 Aug 2020 21:56:12 +0200 Subject: [PATCH 25/31] Final part of the Python 2 removal, including updating install-pytools (#5027) Update the list of files installed by "scons install-pytools", dropping Python2 versions and adding their Python3 replacements. This doesn't update any `import` statements, as the tools have already been ported to use the Python3 modules - the scons script was out of date and was still bundling the wrong versions. Note for anyone looking at the history - although most of the current tools have a '3' in the name just to indicate that they're the Python 3 version, there's a double-meaning to that number in wmlparser3.py's name. There was a `wmlparser2.py`, where the `2` indicates a redesign of the original. The data/tools/README.md file was renamed from plain README without being converted to Markdown format. This commit touches about half of that file; headings in that half are updated to be Markdown, but to reduce the diff the other headings aren't updated. (cherry picked from commit 935fa41379b936ee1633868e6798176505ef29f6) --- SConstruct | 2 +- data/tools/README.md | 75 ++- data/tools/wesnoth/README.md | 28 +- data/tools/wesnoth/wmliterator.py | 505 ---------------- data/tools/wesnoth/wmlparser.py | 2 +- data/tools/wesnoth/wmlparser2.py | 815 -------------------------- data/tools/wesnoth/wmltools.py | 938 ------------------------------ 7 files changed, 62 insertions(+), 2303 deletions(-) delete mode 100644 data/tools/wesnoth/wmliterator.py delete mode 100755 data/tools/wesnoth/wmlparser2.py delete mode 100644 data/tools/wesnoth/wmltools.py diff --git a/SConstruct b/SConstruct index 6ad09cb1c60..0c99140a1dd 100755 --- a/SConstruct +++ b/SConstruct @@ -698,7 +698,7 @@ else: env["localedir"] = "$datadir/$localedirname" pythontools = Split("wmlscope wmllint wmlindent wesnoth_addon_manager") -pythonmodules = Split("wmltools.py wmlparser.py wmldata.py wmliterator.py campaignserver_client.py __init__.py") +pythonmodules = Split("wmltools3.py wmlparser.py wmlparser3.py wmldata.py wmliterator3.py campaignserver_client.py __init__.py") def CopyFilter(fn): "Filter out data-tree things that shouldn't be installed." diff --git a/data/tools/README.md b/data/tools/README.md index 19f7c1c467f..a4d984b2887 100644 --- a/data/tools/README.md +++ b/data/tools/README.md @@ -70,62 +70,93 @@ terrain type to mainline. A script that generates general information about mainline campaigns and outputs wiki-ready output -== Python API == +Python API +---------- -=== wmltools.py === +The `3` in `wmltools3` and `wmliterator3` names refers to them being the +Python3 versions of the tool. + +Both `wmlparser` and `wmlparser3` are Python3, with wmlparser3 being a rewrite +with a different design of the implementation. Both versions are kept as they +have different APIs. + +Historical note - the redesign of wmlparser was originally called wmlparser2; +both were originally written in Python2. For readability this document ignores +that detail and refers to the rewrite as wmlparser3. + +### wmltools3.py The main facility in this module is a cross-referencer class. It also contains utility methods for working with the data tree. See the header comment of wmltools.py for details -=== wmliterator.py === +### wmliterator3.py A WML codewalker class. Not a full parser, but that's useful because it doesn't get confused by unbalanced macros; instead it lets you walk through lines in a text file parsing out tags and elements -=== wmlparser.py === +### wmlparser3.py + +This provides an interface for parsing WML files. The implementation uses the +game engine's `--preprocess` option to handle preprocessing of the input, so +it requires the C++ engine to be available if the WML needs preprocessing. + +The API currently is very sparsely documented, but several of the tools can be +used for reference. + +### wmlparser.py This python module contains code originally developed for CampGen - it contains -a general WML Parser written in Python, just like the Perl one. So if you want -to develop tools in Python instead of Perl and need a WML parser, it may save -some time. +a general WML Parser written in Python. So if you want to develop tools in +Python instead of Perl and need a WML parser, it may save some time. -The API currently is very sparsely documented, but I plan to extend this. In -general, wmlparser.py contains the WML Parser, which is used like: +The API currently is very sparsely documented. In general, wmlparser.py +is used like this: -parser = wmlparser.Parser(datapath) + parser = wmlparser.Parser(datapath) Then: -parser.parse_file(filename) + parser.parse_file(filename) + or -parser.parse_stream(file) + + parser.parse_stream(file) + or -parser.parse_text(string) + + parser.parse_text(string) to set what to parse, and finally: -wmldata = parser.parse() + wmldata = parser.parse() to read everything into a Python representation of the parsed data. -=== wmldata.py === +### wmldata.py -This file has several utility methods defined to access and manipulate -WML data. Most of them will not be generally useful. An example: +This module has several utility methods defined to access and manipulate +WML data, and is part of the API for wmlparser.py (not wmlparser3.py). -for unit in wmldata.get_all("unit"): - print unit.get_text_val("id") + for unit in wmldata.get_all("unit"): + print unit.get_text_val("id") -== Standalone use == +Standalone use +-------------- -=== wmlparser === +### wmlparser If called standalone, wmlparser.py will parse whatever you give it and dump back to stdout. For example: -python wmlparser.py -e {game.cfg} > game.dump + python wmlparser.py -e {game.cfg} > game.dump Should produce a nice several 100000 lines file of the complete configuration with all included files and macros expanded to a single file. + +### wmlparser3 + +If called standalone, wmlparser3.py will parse whatever you give it and +dump back to stdout. Running it with the argument `--help` lists the supported +arguments. diff --git a/data/tools/wesnoth/README.md b/data/tools/wesnoth/README.md index f085b0a7dfa..02cacb1afb0 100644 --- a/data/tools/wesnoth/README.md +++ b/data/tools/wesnoth/README.md @@ -1,10 +1,12 @@ -# @file README +# README The programs in this directory data/tools/wesnoth -are for checking, analysing and maintenance of WML-files, -written in python. +are for checking, analysing and maintenance of WML-files. -### +The modules intended to be imported by other programs are +documented in the parent directory's README.md. These are +wmltools3.py, wmldata.py, wmlparser.py, wmlparser3.py, and +wmliterator3.py. __init__.py Cause Python to execute any code in this directory on "import wesnoth". @@ -18,19 +20,7 @@ wescamp.py as base) * update the translations in a campaign (in the packed campaign) -wmldata.py - This module represents the internal appearance of WML. - -wmliterator.py - Python routines for navigating a Battle For Wesnoth WML tree - -wmlparser.py - Module implementing a WML parser. - -wmltools.py - Python routines for working with a Battle For Wesnoth WML tree - -### +-------------------------------------------------- From IRC #wesnoth-dev - 2007-11-27 @@ -55,7 +45,3 @@ From IRC #wesnoth-dev - 2007-11-27 it just says "reading x.cfg" and "y lines read" right, no errors it iterated successfully - - -# vim: tabstop=4: shiftwidth=4: expandtab: softtabstop=4: autoindent: - diff --git a/data/tools/wesnoth/wmliterator.py b/data/tools/wesnoth/wmliterator.py deleted file mode 100644 index 9de20d4b994..00000000000 --- a/data/tools/wesnoth/wmliterator.py +++ /dev/null @@ -1,505 +0,0 @@ -#!/usr/bin/env python2 - -""" -wmliterator.py -- Python routines for navigating a Battle For Wesnoth WML tree -Author: Sapient (Patrick Parker), 2007 - -Purpose: - The WmlIterator class can be used to analyze and search the structure of WML - files non-invasively (i.e. preserving existing line structure), and its main - use is to determine when a transformation of deprecated content needs to take - place. (I wrote it was because wmllint was trying to do a lot of things with - regular expressions which really required a more algorithmic approach. Also, - wmllint was often inconsistent with correct handling of comments and values - inside strings.) - -Limitations: - The WmlIterator does not attempt to expand macros, only to recognize them as - another level of nesting. Also, the handling of multiple assignment syntax - is somewhat limited (for similar reasons). Adding seamless support for these - would be ideal, but it presents a design challenge since the iteration is - supposed to be non-invasive. Thus, the current behavior is considered good - enough for now. -""" - -from __future__ import print_function, unicode_literals, division -from future_builtins import filter, map, zip -input = raw_input -range = xrange - -from functools import total_ordering -import sys, re, copy, codecs -keyPattern = re.compile('(\w+)(,\s?\w+)*\s*=') -keySplit = re.compile(r'[=,\s]') -tagPattern = re.compile(r'(^|(?= 0: - endquote = -1 - beginofend = beginquote+2 - while endquote < 0: - endquote = text.find('>>', beginofend) - if endquote < 0: - if self.lineno + span >= len(lines): - self.printError('reached EOF due to unterminated string at line', self.lineno+1) - return text, span - beginofend = len(text) - text += lines[self.lineno + span] - span += 1 - begincomment = text.find('#', endquote+2) - if begincomment < 0: - begincomment = None - beginquote = text[:begincomment].find('<<', endquote+2) - beginquote = text[:begincomment].find('"') - while beginquote >= 0: - endquote = -1 - beginofend = beginquote+1 - while endquote < 0: - endquote = text.find('"', beginofend) - if endquote < 0: - if self.lineno + span >= len(lines): - self.printError('reached EOF due to unterminated string at line', self.lineno+1) - return text, span - beginofend = len(text) - text += lines[self.lineno + span] - span += 1 - begincomment = text.find('#', endquote+1) - if begincomment < 0: - begincomment = None - beginquote = text[:begincomment].find('"', endquote+1) - return text, span - - def closeScope(self, scopes, closerElement): - """Close the most recently opened scope. Return false if not enough scopes. - note: directives close all the way back to the last open directive - non-directives cannot close a directive and will no-op in that case.""" - try: - if isDirective(closerElement): - while not isDirective(scopes.pop()): - pass - elif (closerElement==closeMacroType): - elem = '' - while not elem.startswith('{'): - closed = scopes.pop() - elem = closed - if isinstance(closed, WmlIterator): - elem = closed.element - if isDirective(elem): - self.printScopeError(closerElement) - scopes.append(closed) # to reduce additional errors (hopefully) - return True - elif not isDirective(scopes[-1]): - closed = scopes.pop() - elem = closed - if isinstance(closed, WmlIterator): - elem = closed.element - if (elem.startswith('{') and closerElement != closeMacroType): - scopes.append(closed) - elif (isOpener(elem) and closerElement != '[/'+elem[1:] - and '+'+closerElement != elem[1]+'[/'+elem[2:]): - self.printError('reached', closerElement, 'at line', self.lineno+1, 'before closing scope', elem) - scopes.append(closed) # to reduce additional errors (hopefully) - return True - except IndexError: - return False - - def parseElements(self, text): - """Remove any closed scopes, return a list of element names - and list of new unclosed scopes - Element Types: - tags: one of "[tag_name]" or "[/tag_name]" - [tag_name] - opens a scope - [/tag_name] - closes a scope - keys: either "key=" or ("key1=", "key2=") for multi-assignment - key= - does not affect the scope - key1,key2= - multi-assignment returns multiple elements - directives: one of "#ifdef", "#ifndef", "#ifhave", "#ifnhave", "#ifver", "#ifnver", "#else", "#endif", "#define", "#enddef" - #ifdef - opens a scope - #ifndef - opens a scope - #ifhave - opens a scope - #ifnhave - opens a scope - #ifver - opens a scope - #ifnver - opens a scope - #else - closes a scope, also opens a new scope - #endif - closes a scope - #define - opens a scope - #enddef - closes a scope - macro calls: "{MACRO_NAME}" - {MACRO_NAME - opens a scope - } - closes a scope - """ - elements = [] #(elementType, sortPos, scopeDelta) - # first remove any lua strings - beginquote = text.find('<<') - while beginquote >= 0: - endquote = text.find('>>', beginquote+2) - if endquote < 0: - text = text[:beginquote] - beginquote = -1 #terminate loop - else: - text = text[:beginquote] + text[endquote+2:] - beginquote = text.find('<<') - # remove any quoted strings - beginquote = text.find('"') - while beginquote >= 0: - endquote = text.find('"', beginquote+1) - if endquote < 0: - text = text[:beginquote] - beginquote = -1 #terminate loop - else: - text = text[:beginquote] + text[endquote+1:] - beginquote = text.find('"') - # next remove any comments - text = text.lstrip() - commentSearch = 1 - if text.startswith('#ifdef'): - return (['#ifdef'],)*2 - elif text.startswith('#ifndef'): - return (['#ifndef'],)*2 - elif text.startswith('#ifhave'): - return (['#ifhave'],)*2 - elif text.startswith('#ifnhave'): - return (['#ifnhave'],)*2 - elif text.startswith('#ifver'): - return (['#ifver'],)*2 - elif text.startswith('#ifnver'): - return (['#ifnver'],)*2 - elif text.startswith('#else'): - if not self.closeScope(self.scopes, '#else'): - self.printScopeError('#else') - return (['#else'],)*2 - elif text.startswith('#endif'): - if not self.closeScope(self.scopes, '#endif'): - self.printScopeError('#endif') - return ['#endif'], [] - elif text.startswith('#define'): - return (['#define'],)*2 - elif text.find('#enddef') >= 0: - elements.append(('#enddef', text.find('#enddef'), -1)) - elif text.startswith('#po:') or text.startswith('# po:'): - elements.append(("#po", 0, 0)) - else: - commentSearch = 0 - begincomment = text.find('#', commentSearch) - if begincomment >= 0: - text = text[:begincomment] - #now find elements in a loop - for m in tagPattern.finditer(text): - delta = 1 - if isCloser(m.group(2)): - delta = -1 - elements.append((m.group(2), m.start(), delta)) - for m in keyPattern.finditer(text): - for i, k in enumerate(keySplit.split(m.group(0))): - if k: - elements.append((k+'=', m.start()+i, 0)) - for m in macroOpenPattern.finditer(text): - elements.append((m.group(1), m.start(), 1)) - for m in macroClosePattern.finditer(text): - elements.append((closeMacroType, m.start(), -1)) - #sort by start position - elements.sort(key=lambda x:x[1]) - resultElements = [] - openedScopes = [] - for elem, sortPos, scopeDelta in elements: - while scopeDelta < 0: - if not(self.closeScope(openedScopes, elem)\ - or self.closeScope(self.scopes, elem)): - self.printScopeError(elem) - scopeDelta += 1 - while scopeDelta > 0: - openedScopes.append(elem) - scopeDelta -= 1 - resultElements.append(elem) - return resultElements, openedScopes - - def printScopeError(self, elementType): - """Print out warning if a scope was unable to close""" - self.printError('attempt to close empty scope at', elementType, 'line', self.lineno+1) - - def __iter__(self): - """The magic iterator method""" - return self - - def __eq__(self, other): - return (self.fname, self.lineno, self.element) == \ - (other.fname, other.lineno, other.element) - - def __gt__(self, other): - return (self.fname, self.lineno, self.element) > \ - (other.fname, other.lineno, other.element) - - def reset(self): - """Reset any line tracking information to defaults""" - self.lineno = -1 - self.scopes = [] - self.nextScopes = [] - self.text = "" - self.span = 1 - self.element = "" - return self - - def seek(self, lineno, clearEnd=True): - """Move the iterator to a specific line number""" - if clearEnd: - self.endScope = None - if lineno < self.lineno: - for scope in reversed(self.scopes): - # if moving backwards, try to re-use a scope iterator - if scope.lineno <= lineno: - # copy the scope iterator's state to self - self.__dict__ = dict(scope.__dict__) - self.scopes = scope.scopes[:] - self.nextScopes = scope.nextScopes[:] - break - else: - # moving backwards past all scopes forces a reset - self.reset() - while self.lineno + self.span - 1 < lineno: - self.next() - return self - - def ancestors(self): - """Return a list of tags enclosing this location, outermost first.""" - return tuple([x.element for x in self.scopes]) - - def hasNext(self): - """Some loops may wish to check this method instead of calling next() - and handling StopIteration... note: inaccurate for ScopeIterators""" - return len(self.lines) > self.lineno + self.span - - def copy(self): - """Return a copy of this iterator""" - itor = copy.copy(self) - itor.scopes = self.scopes[:] - itor.nextScopes = self.nextScopes[:] - return itor - - def __str__(self): - """Return a pretty string representation""" - if self.lineno == -1: - return 'beginning of file' - loc = ' at line ' + str(self.lineno+1) - if self.element: - return str(self.element) + loc - if self.text.strip(): - return 'text' + loc - return 'whitespace' + loc - - def __repr__(self): - """Return a very basic string representation""" - return 'WmlIterator<' + repr(self.element) +', line %d>'%(self.lineno+1) - - def next(self): - """Move the iterator to the next line number - note: May raise StopIteration""" - if not self.hasNext(): - if self.scopes: - self.printError("reached EOF with open scopes", self.scopes) - raise StopIteration - self.lineno = self.lineno + self.span - self.text, self.span = self.parseQuotes(self.lines) - self.scopes.extend(self.nextScopes) - self.element, nextScopes = self.parseElements(self.text) - self.nextScopes = [] - for elem in nextScopes: - # remember scopes by storing a copy of the iterator - copyItor = self.copy() - copyItor.element = elem - self.nextScopes.append(copyItor) - copyItor.nextScopes.append(copyItor) - if(len(self.element) == 1): - # currently we only wish to handle simple single assignment syntax - self.element = self.element[0] - if self.endScope is not None and not self.scopes.count(self.endScope): - raise StopIteration - return self - - def isOpener(self): - return isOpener(self) - - def isCloser(self): - return isCloser(self) - - def isExtender(self): - return isExtender(self) - - def isMacroOpener(self): - return isMacroOpener(self) - - def isMacroCloser(self): - return isMacroCloser(self) - - def isAttribute(self): - return isAttribute(self) - - def iterScope(self): - """Return an iterator for the current scope""" - if not self.scopes: - return WmlIterator(self.lines, self.fname) - scopeItor = self.scopes[-1].copy() - scopeItor.endScope = self.scopes[-1] - return scopeItor - - def printError(nav, *misc): - """Print error associated with a given file; avoid printing duplicates""" - if nav.fname: - silenceValue = ' '.join(map(str, misc)) - if nav.fname not in silenceErrors: - print(nav.fname, file=sys.stderr) - silenceErrors[nav.fname] = set() - elif silenceValue in silenceErrors[nav.fname]: - return # do not print a duplicate error for this file - silenceErrors[nav.fname].add(silenceValue) - print('wmliterator:', end=" ", file=sys.stderr) - for item in misc: - print(item, end=" ", file=sys.stderr) - print("", file=sys.stderr) #terminate line - -if __name__ == '__main__': - """Perform a test run on a file or directory""" - import os, glob - didSomething = False - flist = sys.argv[1:] - if not flist: - print('Current directory is', os.getcwd()) - flist = glob.glob(os.path.join(os.getcwd(), input('Which file(s) would you like to test?\n'))) - while flist: - fname = flist.pop() - if os.path.isdir(fname): - flist += glob.glob(fname + os.path.sep + '*') - continue - if not os.path.isfile(fname) or os.path.splitext(fname)[1] != '.cfg': - continue - print('Reading', fname+'...') - didSomething = True - with codecs.open(fname, "r", "utf8") as f: - itor = WmlIterator(f.readlines()) - for i in itor: - pass - print(itor.lineno + itor.span, 'lines read.') - if not didSomething: - print('That is not a valid .cfg file') - if os.name == 'nt' and os.path.splitext(__file__)[0].endswith('wmliterator') and not sys.argv[1:]: - os.system('pause') - -# wmliterator.py ends here diff --git a/data/tools/wesnoth/wmlparser.py b/data/tools/wesnoth/wmlparser.py index 597cd9d0ab2..5961587f5df 100755 --- a/data/tools/wesnoth/wmlparser.py +++ b/data/tools/wesnoth/wmlparser.py @@ -9,7 +9,7 @@ import os, glob, sys import re """ -NOTE: You should use wmlparser2.py instead which uses the C++ +NOTE: You should use wmlparser3.py instead which uses the C++ preprocessor. Module implementing a WML parser in pure python. diff --git a/data/tools/wesnoth/wmlparser2.py b/data/tools/wesnoth/wmlparser2.py deleted file mode 100755 index 161991bc313..00000000000 --- a/data/tools/wesnoth/wmlparser2.py +++ /dev/null @@ -1,815 +0,0 @@ -#!/usr/bin/env python2 -# encoding: utf-8 - -""" -This parser uses the --preprocess option of wesnoth so a working -wesnoth executable must be available at runtime. - -If you are using this you shold instead use wmlparser3.py and upgrade -your code to Python 3. -""" - -import os, glob, sys, re, subprocess, argparse, tempfile, shutil -import atexit - -tempdirs_to_clean = [] - -@atexit.register -def cleaner(): - for temp_dir in tempdirs_to_clean: - shutil.rmtree(temp_dir, ignore_errors=True) - -class WMLError(Exception): - """ - Catch this exception to retrieve the first error message from - the parser. - """ - def __init__(self, parser=None, message=None): - if parser: - self.line = parser.parser_line - self.wml_line = parser.last_wml_line - self.message = message - self.preprocessed = parser.preprocessed - - def __str__(self): - return """WMLError: - %s %s - %s - %s -""" % (str(self.line), self.preprocessed, self.wml_line, self.message) - -class StringNode: - """ - One part of an attribute's value. Because a single WML string - can be made from multiple translatable strings we need to model - it this way (as a list of several StringNode). - """ - def __init__(self, data): - self.textdomain = None # non-translatable by default - self.data = data - - def debug(self): - if self.textdomain: - return "_<%s>%s" % (self.textdomain, repr(self.data)) - else: - return repr(self.data) - -class AttributeNode: - """ - A WML attribute. For example the "id=Elfish Archer" in: - [unit] - id=Elfish Archer - [/unit] - """ - def __init__(self, name, location=None): - self.name = name - self.location = location - self.value = [] # List of StringNode - - def debug(self): - return self.name + "=" + " .. ".join( - [v.debug() for v in self.value]) - - def get_text(self, translation=None): - r = u"" - for s in self.value: - ustr = s.data.decode("utf8", "ignore") - if translation: - r += translation(ustr, s.textdomain) - else: - r += ustr - return r - -class TagNode: - """ - A WML tag. For example the "unit" in this example: - [unit] - id=Elfish Archer - [/unit] - """ - def __init__(self, name, location=None): - self.name = name - self.location = location - # List of child elements, which are either of type TagNode or - # AttributeNode. - self.data = [] - - self.speedy_tags = {} - - def debug(self): - s = "[%s]\n" % self.name - for sub in self.data: - for subline in sub.debug().splitlines(): - s += " %s\n" % subline - s += "[/%s]\n" % self.name - return s - - def get_all(self, **kw): - """ - This gets all child tags or child attributes of the tag. - For example: - - [unit] - name=A - name=B - [attack] - [/attack] - [attack] - [/attack] - [/unit] - - unit.get_all(att = "name") - will return two nodes for "name=A" and "name=B" - - unit.get_all(tag = "attack") - will return two nodes for the two [attack] tags. - - unit.get_all() - will return 4 nodes for all 4 sub-elements. - - unit.get_all(att = "") - Will return the two attribute nodes. - - unit.get_all(tag = "") - Will return the two tag nodes. - - If no elements are found an empty list is returned. - """ - if len(kw) == 1 and "tag" in kw and kw["tag"]: - return self.speedy_tags.get(kw["tag"], []) - - r = [] - for sub in self.data: - ok = True - for k, v in kw.items(): - if k == "tag": - if not isinstance(sub, TagNode): ok = False - elif v != "" and sub.name != v: ok = False - elif k == "att": - if not isinstance(sub, AttributeNode): ok = False - elif v != "" and sub.name != v: ok = False - if ok: - r.append(sub) - return r - - def get_text_val(self, name, default=None, translation=None, val=-1): - """ - Returns the value of the specified attribute. If the attribute - is given multiple times, the value number val is returned (default - behaviour being to return the last value). If the - attribute is not found, the default parameter is returned. - - If a translation is specified, it should be a function which - when passed a unicode string and text-domain returns a - translation of the unicode string. The easiest way is to pass - it to gettext.translation if you have the binary message - catalogues loaded. - """ - x = self.get_all(att=name) - if not x: return default - return x[val].get_text(translation) - - def append(self, node): - self.data.append(node) - - if isinstance(node, TagNode): - if node.name not in self.speedy_tags: - self.speedy_tags[node.name] = [] - self.speedy_tags[node.name].append(node) - -class RootNode(TagNode): - """ - The root node. There is exactly one such node. - """ - def __init__(self): - TagNode.__init__(self, None) - - def debug(self): - s = "" - for sub in self.data: - for subline in sub.debug().splitlines(): - s += subline + "\n" - return s - -class Parser: - def __init__(self, wesnoth_exe, config_dir, data_dir, - no_preprocess): - """ - path - Path to the file to parse. - wesnoth_exe - Wesnoth executable to use. This should have been - configured to use the desired data and config directories. - """ - self.wesnoth_exe = wesnoth_exe - self.config_dir = None - if config_dir: self.config_dir = os.path.abspath(config_dir) - self.data_dir = None - if data_dir: self.data_dir = os.path.abspath(data_dir) - self.keep_temp_dir = None - self.temp_dir = None - self.no_preprocess = no_preprocess - self.preprocessed = None - self.verbose = False - - self.last_wml_line = "?" - self.parser_line = 0 - self.line_in_file = 42424242 - self.chunk_start = "?" - - def parse_file(self, path, defines=""): - self.path = path - if not self.no_preprocess: - self.preprocess(defines) - self.parse() - - def parse_text(self, text, defines=""): - temp = tempfile.NamedTemporaryFile(prefix="wmlparser_", - suffix=".cfg") - temp.write(text) - temp.flush() - self.path = temp.name - if not self.no_preprocess: - self.preprocess(defines) - self.parse() - - def preprocess(self, defines): - """ - Call wesnoth --preprocess to get preprocessed WML which we - can subsequently parse. - - If this is not called then the .parse method will assume the - WML is already preprocessed. - """ - if self.keep_temp_dir: - output = self.keep_temp_dir - else: - output = tempfile.mkdtemp(prefix="wmlparser_") - tempdirs_to_clean.append(output) - - self.temp_dir = output - commandline = [self.wesnoth_exe] - if self.data_dir: - commandline += ["--data-dir", self.data_dir] - if self.config_dir: - commandline += ["--config-dir", self.config_dir] - commandline += ["--preprocess", self.path, output] - if defines: - commandline += ["--preprocess-defines", defines] - if self.verbose: - print(" ".join(commandline)) - p = subprocess.Popen(commandline, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - out, err = p.communicate() - if self.verbose: - print(out + err) - self.preprocessed = output + "/" + os.path.basename(self.path) +\ - ".plain" - if not os.path.exists(self.preprocessed): - first_line = open(self.path).readline().strip() - raise WMLError(self, "Preprocessor error:\n" + - " ".join(commandline) + "\n" + - "First line: " + first_line + "\n" + - out + - err) - - def parse_line_without_commands(self, line): - """ - Once the .plain commands are handled WML lines are passed to - this. - """ - if not line: return - - if line.strip(): - self.skip_newlines_after_plus = False - - if self.in_tag: - self.handle_tag(line) - return - - if self.in_arrows: - arrows = line.find('>>') - if arrows >= 0: - self.in_arrows = False - self.temp_string += line[:arrows] - self.temp_string_node = StringNode(self.temp_string) - self.temp_string = "" - self.temp_key_nodes[self.commas].value.append( - self.temp_string_node) - self.in_arrows = False - self.parse_line_without_commands(line[arrows + 2:]) - else: - self.temp_string += line - return - - quote = line.find('"') - - if not self.in_string: - arrows = line.find('<<') - if arrows >= 0 and (quote < 0 or quote > arrows): - self.parse_line_without_commands(line[:arrows]) - self.in_arrows = True - self.parse_line_without_commands(line[arrows + 2:]) - return - - if quote >= 0: - if self.in_string: - # double quote - if quote < len(line) - 1 and line[quote + 1] == '"': - self.temp_string += line[:quote + 1] - self.parse_line_without_commands(line[quote + 2:]) - return - self.temp_string += line[:quote] - self.temp_string_node = StringNode(self.temp_string) - if self.translatable: - self.temp_string_node.textdomain = self.textdomain - self.translatable = False - self.temp_string = "" - if not self.temp_key_nodes: - raise WMLError(self, "Unexpected string value.") - - self.temp_key_nodes[self.commas].value.append( - self.temp_string_node) - - self.in_string = False - self.parse_line_without_commands(line[quote + 1:]) - else: - self.parse_outside_strings(line[:quote]) - self.in_string = True - self.parse_line_without_commands(line[quote + 1:]) - else: - if self.in_string: - self.temp_string += line - else: - self.parse_outside_strings(line) - - def parse_outside_strings(self, line): - """ - Parse a WML fragment outside of strings. - """ - if not line: return - if not self.temp_key_nodes: - line = line.lstrip() - if not line: return - - if line.startswith("#textdomain "): - self.textdomain = line[12:].strip() - return - - # Is it a tag? - if line[0] == "[": - self.handle_tag(line) - # No tag, must be an attribute. - else: - self.handle_attribute(line) - else: - for i, segment in enumerate(line.split("+")): - segment = segment.lstrip(" \t") - - if i > 0: - # If the last segment is empty (there was a plus sign - # at the end) we need to skip newlines. - self.skip_newlines_after_plus = not segment.strip() - - if not segment: continue - - if segment.rstrip() == '_': - self.translatable = True - segment = segment[1:].lstrip(" ") - if not segment: continue - self.handle_value(segment) - - - def handle_tag(self, line): - end = line.find("]") - if end < 0: - if line.endswith("\n"): - raise WMLError(self, "Expected closing bracket.") - self.in_tag += line - return - tag = (self.in_tag + line[:end])[1:] - self.in_tag = "" - if tag[0] == "/": - self.parent_node = self.parent_node[:-1] - else: - node = TagNode(tag, location=(self.line_in_file, self.chunk_start)) - if self.parent_node: - self.parent_node[-1].append(node) - self.parent_node.append(node) - self.parse_outside_strings(line[end + 1:]) - - def handle_attribute(self, line): - assign = line.find("=") - remainder = None - if assign >= 0: - remainder = line[assign + 1:] - line = line[:assign] - - self.commas = 0 - self.temp_key_nodes = [] - for att in line.split(","): - att = att.strip() - node = AttributeNode(att, location=(self.line_in_file, self.chunk_start)) - self.temp_key_nodes.append(node) - if self.parent_node: - self.parent_node[-1].append(node) - - if remainder: - self.parse_outside_strings(remainder) - - def handle_value(self, segment): - def add_text(segment): - segment = segment.rstrip() - if not segment: return - n = len(self.temp_key_nodes) - maxsplit = n - self.commas - 1 - if maxsplit < 0: maxsplit = 0 - for subsegment in segment.split(",", maxsplit): - self.temp_string += subsegment.strip() - self.temp_string_node = StringNode(self.temp_string) - self.temp_string = "" - self.temp_key_nodes[self.commas].value.append( - self.temp_string_node) - if self.commas < n - 1: - self.commas += 1 - - # Finish assignment on newline, except if there is a - # plus sign before the newline. - add_text(segment) - if segment[-1] == "\n" and not self.skip_newlines_after_plus: - self.temp_key_nodes = [] - - def parse(self): - """ - Parse preprocessed WML into a tree of tags and attributes. - """ - - # parsing state - self.temp_string = "" - self.temp_string_node = None - self.commas = 0 - self.temp_key_nodes = [] - self.in_string = False - self.in_arrows = False - self.textdomain = "wesnoth" - self.translatable = False - self.root = RootNode() - self.parent_node = [self.root] - self.skip_newlines_after_plus = False - self.in_tag = "" - - command_marker_byte = chr(254) - - input = self.preprocessed - if not input: input = self.path - - for rawline in open(input, "rb"): - compos = rawline.find(command_marker_byte) - self.parser_line += 1 - # Everything from chr(254) to newline is the command. - if compos != 0: - self.line_in_file += 1 - if compos >= 0: - self.parse_line_without_commands(rawline[:compos]) - self.handle_command(rawline[compos + 1:-1]) - else: - self.parse_line_without_commands(rawline) - - if self.keep_temp_dir is None and self.temp_dir: - if self.verbose: - print("removing " + self.temp_dir) - shutil.rmtree(self.temp_dir, ignore_errors=True) - - def handle_command(self, com): - if com.startswith("line "): - self.last_wml_line = com[5:] - _ = self.last_wml_line.split(" ") - self.chunk_start = [(_[i+1], int(_[i])) for i in range(0, len(_), 2)] - self.line_in_file = self.chunk_start[0][1] - elif com.startswith("textdomain "): - self.textdomain = com[11:] - else: - raise WMLError(self, "Unknown parser command: " + com) - - def get_all(self, **kw): - return self.root.get_all(**kw) - - def get_text_val(self, name, default=None, translation=None): - return self.root.get_text_val(name, default, translation) - - -import json -def jsonify(tree, verbose=False, depth=0): - """ -Convert a DataSub into JSON - -If verbose, insert a linebreak after every brace and comma (put every item on its own line), otherwise, condense everything into a single line. -""" - print "{", - first = True - sdepth1 = "\n" + " " * depth - sdepth2 = sdepth1 + " " - for pair in tree.speedy_tags.iteritems(): - if first: - first = False - else: - sys.stdout.write(",") - if verbose: - sys.stdout.write(sdepth2) - print '"%s":' % pair[0], - if verbose: - sys.stdout.write(sdepth1) - print '[', - first_tag = True - for tag in pair[1]: - if first_tag: - first_tag = False - else: - sys.stdout.write(",") - if verbose: - sys.stdout.write(sdepth2) - jsonify(tag, verbose, depth + 2) - if verbose: - sys.stdout.write(sdepth2) - sys.stdout.write("]") - for child in tree.data: - if isinstance(child, TagNode): - continue - if first: - first = False - else: - sys.stdout.write(",") - if verbose: - sys.stdout.write(sdepth2) - print '"%s":' % child.name, - print json.dumps(child.get_text()), - if verbose: - sys.stdout.write(sdepth1) - sys.stdout.write("}") - -from xml.sax.saxutils import escape -def xmlify(tree, verbose=False, depth=0): - sdepth = "" - if verbose: - sdepth = " " * depth - for child in tree.data: - if isinstance(child, TagNode): - print '%s<%s>' % (sdepth, child.name) - xmlify(child, verbose, depth + 1) - print '%s' % (sdepth, child.name) - else: - if "\n" in child.get_text() or "\r" in child.get_text(): - print sdepth + '<' + child.name + '>' + \ - '' + '' - else: - print sdepth + '<' + child.name + '>' + \ - escape(child.get_text()) + '' - -if __name__ == "__main__": - # Hack to make us not crash when we encounter characters that aren't ASCII - sys.stdout = __import__("codecs").getwriter('utf-8')(sys.stdout) - arg = argparse.ArgumentParser() - arg.add_argument("-a", "--data-dir", help="directly passed on to wesnoth.exe") - arg.add_argument("-c", "--config-dir", help="directly passed on to wesnoth.exe") - arg.add_argument("-i", "--input", help="a WML file to parse") - arg.add_argument("-k", "--keep-temp", help="specify directory where to keep temp files") - arg.add_argument("-t", "--text", help="WML text to parse") - arg.add_argument("-w", "--wesnoth", help="path to wesnoth.exe") - arg.add_argument("-d", "--defines", help="comma separated list of WML defines") - arg.add_argument("-T", "--test", action="store_true") - arg.add_argument("-j", "--to-json", action="store_true") - arg.add_argument("-n", "--no-preprocess", action="store_true") - arg.add_argument("-v", "--verbose", action="store_true") - arg.add_argument("-x", "--to-xml", action="store_true") - args = arg.parse_args() - - if not args.input and not args.text and not args.test: - sys.stderr.write("No input given. Use -h for help.\n") - sys.exit(1) - - if not args.no_preprocess and (not args.wesnoth or not - os.path.exists(args.wesnoth)): - sys.stderr.write("Wesnoth executable not found.\n") - sys.exit(1) - - if args.test: - print("Running tests") - p = Parser(args.wesnoth, args.config_dir, - args.data_dir, args.no_preprocess) - if args.keep_temp: - p.keep_temp_dir = args.keep_temp - if args.verbose: p.verbose = True - - only = None - def test2(input, expected, note, function): - if only and note != only: return - input = input.strip() - expected = expected.strip() - p.parse_text(input) - output = function(p).strip() - if output != expected: - print("__________") - print("FAILED " + note) - print("INPUT:") - print(input) - print("OUTPUT:") - print(output) - print("EXPECTED:") - print(expected) - print("__________") - else: - print("PASSED " + note) - - def test(input, expected, note): - test2(input, expected, note, lambda p: p.root.debug()) - - test( -""" -[test] -a=1 -[/test] -""", """ -[test] - a='1' -[/test] -""", "simple") - - test( -""" -[test] -a, b, c = 1, 2, 3 -[/test] -""", """ -[test] - a='1' - b='2' - c='3' -[/test] -""", "multi assign") - - test( -""" -[test] -a, b = 1, 2, 3 -[/test] -""", """ -[test] - a='1' - b='2, 3' -[/test] -""", "multi assign 2") - - test( -""" -[test] -a, b, c = 1, 2 -[/test] -""", """ -[test] - a='1' - b='2' - c= -[/test] -""", "multi assign 3") - - test( -""" -#textdomain A -#define X - _ "abc" -#enddef -#textdomain B -[test] -x = _ "abc" + {X} -[/test] -""", """ -[test] - x=_'abc' .. _'abc' -[/test] -""", "textdomain") - - test( -""" -[test] -x,y = _1,_2 -[/test] -""", """ -[test] - x='_1' - y='_2' -[/test] -""", "underscores") - - test( -""" -[test] -a = "a ""quoted"" word" -[/test] -""", -""" -[test] - a='a "quoted" word' -[/test] -""", "quoted") - - test( -""" -[test] -code = << - "quotes" here - ""blah"" ->> -[/test] -""", -""" -[test] - code='\\n "quotes" here\\n ""blah""\\n' -[/test] -""", "quoted2") - - test( -""" -foo="bar"+ - - - -"baz" -""", -""" -foo='bar' .. 'baz' -""", "multi line string") - - test( -""" -#define baz - -"baz" -#enddef -foo="bar"+{baz} -""", -""" -foo='bar' .. 'baz' -""", "defined multi line string") - - test( -""" -foo="bar" + "baz" # blah -""", -""" -foo='bar' .. 'baz' -""", "comment after +") - - test( -""" -#define baz -"baz" -#enddef -foo="bar" {baz} -""", -""" -foo='bar' .. 'baz' -""", "defined string concatenation") - - test( -""" -#define A BLOCK -[{BLOCK}] -[/{BLOCK}] -#enddef -{A blah} -""", -""" -[blah] -[/blah] -""", "defined tag") - - test2( -""" -[test] - a=1 - b=2 - a=3 - b=4 -[/test] -""", "3, 4", "multiatt", - lambda p: - p.get_all(tag = "test")[0].get_text_val("a") + ", " + - p.get_all(tag = "test")[0].get_text_val("b")) - - sys.exit(0) - - p = Parser(args.wesnoth, args.config_dir, args.data_dir, - args.no_preprocess) - if args.keep_temp: - p.keep_temp_dir = args.keep_temp - if args.verbose: p.verbose = True - if args.input: p.parse_file(args.input, args.defines) - elif args.text: p.parse_text(args.text, args.defines) - if args.to_json: - jsonify(p.root, True) - print - elif args.to_xml: - print '' - print '' - xmlify(p.root, True, 1) - print '' - else: - print(p.root.debug()) diff --git a/data/tools/wesnoth/wmltools.py b/data/tools/wesnoth/wmltools.py deleted file mode 100644 index 88dcadfb875..00000000000 --- a/data/tools/wesnoth/wmltools.py +++ /dev/null @@ -1,938 +0,0 @@ -#!/usr/bin/env python2 - -""" -wmltools.py -- Python routines for working with a Battle For Wesnoth WML tree - -""" - -from __future__ import print_function, unicode_literals, division -from future_builtins import filter, map, zip -input = raw_input -range = xrange - -from functools import total_ordering -import collections, codecs -import sys, os, re, sre_constants, hashlib, glob, gzip -import string - -map_extensions = ("map", "mask") -image_extensions = ("png", "jpg", "jpeg") -sound_extensions = ("ogg", "wav") -vc_directories = (".git", ".svn") -l10n_directories = ("l10n",) -resource_extensions = map_extensions + image_extensions + sound_extensions -image_reference = r"[A-Za-z0-9{}.][A-Za-z0-9_/+{}.-]*\.(png|jpe?g)(?=(~.*)?)" - -def is_root(dirname): - "Is the specified path the filesystem root?" - return dirname == os.sep or (os.sep == '\\' and dirname.endswith(':\\')) - -def pop_to_top(whoami): - "Pop upward to the top-level directory." - upwards = os.getcwd().split(os.sep) - upwards.reverse() - for pathpart in upwards: - # Loose match because people have things like git trees. - if os.path.basename(pathpart).find("wesnoth") > -1: - break - else: - os.chdir("..") - else: - print(whoami + ": must be run from within a Battle " - "for Wesnoth source tree.", file=sys.stderr) - sys.exit(1) - -def string_strip(value): - "String-strip the value" - if value.startswith('"'): - value = value[1:] - if value.endswith('"'): - value = value[:-1] - if value.startswith("'"): - value = value[1:] - if value.endswith("'"): - value = value[:-1] - return value - -def attr_strip(value): - "Strip away an (optional) translation mark and string quotes." - value = value.strip() - if value.startswith('_'): - value = value[1:] - value = value.strip() - return string_strip(value) - -def comma_split(csstring, list=None, strip="r"): - "Split a comma-separated string, and append the entries to a list if specified." - vallist = [x.lstrip() for x in csstring.split(",") if x.lstrip()] - # strip=: utils::split will remove trailing whitespace from items in comma- - # separated lists but the wml-tags.lua split function only removes leading - # whitespace. So two flags are offered to change default behavior: one to - # lstrip() only, the other to warn about trailing whitespace. - if 'w' in strip: - for item in vallist: - if re.search('\s$', item): - print('Trailing whitespace may be problematic: "%s" in "%s"' % (item, csstring)) - if 'l' not in strip: - vallist = [x.rstrip() for x in vallist] - if list is not None: - list.extend(vallist) - else: - return vallist - -def parse_attribute(line): - "Parse a WML key-value pair from a line." - if '=' not in line or line.find("#") > -1 and line.find("#") < line.find("="): - return None - where = line.find("=") - leader = line[:where] - after = line[where+1:] - after = after.lstrip() - if re.search("\s#", after): - where = len(re.split("\s+#", after)[0]) - value = after[:where] - comment = after[where:] - else: - value = after.rstrip() - comment = "" - # Return four fields: stripped key, part of line before value, - # value, trailing whitespace and comment. - return (leader.strip(), leader+"=", string_strip(value), comment) - -class Forest: - "Return an iterable directory forest object." - def __init__(self, dirpath, exclude=None): - "Get the names of all files under dirpath, ignoring version-control directories." - self.forest = [] - self.dirpath = dirpath - roots = ["campaigns", "add-ons"] - for directory in dirpath: - subtree = [] - rooted = False - if os.path.isdir(directory): # So we skip .cfgs in a UMC mirror - oldmain = os.path.join(os.path.dirname(directory), os.path.basename(directory) + '.cfg') - if os.path.isfile(oldmain): - subtree.append(oldmain) - base = os.path.basename(os.path.dirname(os.path.abspath(directory))) - if base in roots or base == "core": - rooted = True - for root, dirs, files in os.walk(directory): - dirs.sort() - dirlist = [x for x in dirs] - # Split out individual campaigns/add-ons into their own subtrees - if not rooted: - if os.path.basename(root) == "core": - rooted = True - elif os.path.basename(root) in roots: - for subdir in dirlist: - if subdir + '.cfg' in files: - files.remove(subdir + '.cfg') - dirs.remove(subdir) - dirpath.append(os.path.join(root, subdir)) - rooted = True - elif "_info.cfg" in files or "info.cfg" in files: - rooted = True - roots.append(os.path.basename(os.path.dirname(os.path.abspath(root)))) - else: - stop = min(len(dirs), 5) - count = 0 - for subdir in dirlist[:stop]: - if os.path.isfile(os.path.join(root, subdir, '_info.cfg')): - count += 1 - elif os.path.isfile(os.path.join(root, subdir, 'info.cfg')): - if os.path.isfile(os.path.join(root, subdir, 'COPYING.txt')): - count += 1 - if count >= (stop // 2): - roots.append(os.path.basename(root)) - for subdir in dirlist: - if subdir + '.cfg' in files: - files.remove(subdir + '.cfg') - dirs.remove(subdir) - dirpath.append(os.path.join(root, subdir)) - subtree.extend([os.path.normpath(os.path.join(root, x)) for x in files]) - # Always look at _main.cfg first - maincfgs = [elem for elem in subtree if elem.endswith("_main.cfg")] - rest = [elem for elem in subtree if not elem.endswith("_main.cfg")] - subtree = sorted(maincfgs) + sorted(rest) - self.forest.append(subtree) - for i in self.forest: - # Ignore version-control subdirectories and Emacs tempfiles - for dirkind in vc_directories + l10n_directories: - i = [x for x in i if dirkind not in x] - i = [x for x in i if '.#' not in x] - i = [x for x in i if not os.path.isdir(x)] - if exclude: - i = [x for x in i if not re.search(exclude, x)] - i = [x for x in i if not x.endswith("-bak")] - # Compute cliques (will be used later for visibility checks) - self.clique = {} - counter = 0 - for tree in self.forest: - for filename in tree: - self.clique[filename] = counter - counter += 1 - def parent(self, filename): - "Return the directory root that caused this path to be included." - return self.dirpath[self.clique[filename]] - def neighbors(self, fn1, fn2): - "Are two files from the same tree?" - return self.clique[fn1] == self.clique[fn2] - def flatten(self): - "Return a flattened list of all files in the forest." - allfiles = [] - for tree in self.forest: - allfiles += tree - return allfiles - def generator(self): - "Return a generator that walks through all files." - for (directory, tree) in zip(self.dirpath, self.forest): - for filename in tree: - yield (directory, filename) - -def iswml(filename): - "Is the specified filename WML?" - return filename.endswith(".cfg") - -def issave(filename): - "Is the specified filename a WML save? (Detects compressed saves too.)" - if isresource(filename): - return False - if filename.endswith(".gz"): - with gzip.open(filename) as content: - firstline = content.readline() - else: - try: - with codecs.open(filename, "r", "utf8") as content: - firstline = content.readline() - except UnicodeDecodeError: - # our saves are in UTF-8, so this file shouldn't be one - return False - return firstline.startswith("label=") - -def isresource(filename): - "Is the specified name a resource?" - (root, ext) = os.path.splitext(filename) - return ext and ext[1:] in resource_extensions - -def parse_macroref(start, line): - brackdepth = parendepth = 0 - instring = False - args = [] - arg = "" - for i in range(start, len(line)): - if instring: - if line[i] == '"': - instring = False - arg += line[i] - elif line[i] == '"': - instring = not instring - arg += line[i] - elif line[i] == "{": - if brackdepth > 0: - arg += line[i] - brackdepth += 1 - elif line[i] == "}": - brackdepth -= 1 - if brackdepth == 0: - if not line[i-1].isspace(): - arg = arg.strip() - if arg.startswith('"') and arg.endswith('"'): - arg = arg[1:-1].strip() - args.append(arg) - arg = "" - break - else: - arg += line[i] - elif line[i] == "(": - parendepth += 1 - elif line[i] == ")": - parendepth -= 1 - elif not line[i-1].isspace() and \ - line[i].isspace() and \ - brackdepth == 1 and \ - parendepth == 0: - arg = arg.strip() - if arg.startswith('"') and arg.endswith('"'): - arg = arg[1:-1].strip() - args.append(arg) - arg = "" - elif not line[i].isspace() or parendepth > 0: - arg += line[i] - return (args, brackdepth, parendepth) - -def formaltype(f): - # Deduce the expected type of the formal - if f.startswith("_"): - f = f[1:] - if f == "SIDE" or f.endswith("_SIDE") or re.match("SIDE[0-9]", f): - ftype = "side" - elif f in ("SIDE", "X", "Y", "RED", "GREEN", "BLUE", "TURN", "PROB", "LAYER", "TIME", "DURATION") or f.endswith("NUMBER") or f.endswith("AMOUNT") or f.endswith("COST") or f.endswith("RADIUS") or f.endswith("_X") or f.endswith("_Y") or f.endswith("_INCREMENT") or f.endswith("_FACTOR") or f.endswith("_TIME") or f.endswith("_SIZE"): - ftype = "numeric" - elif f.endswith("PERCENTAGE"): - ftype = "percentage" - elif f in ("POSITION",) or f.endswith("_POSITION") or f == "BASE": - ftype = "position" - elif f.endswith("_SPAN"): - ftype = "span" - elif f == "SIDES" or f.endswith("_SIDES"): - ftype = "alliance" - elif f in ("RANGE",): - ftype = "range" - elif f in ("ALIGN",): - ftype = "alignment" - elif f in ("TYPES"): - ftype = "types" - elif f.startswith("ADJACENT") or f.startswith("TERRAINLIST") or f == "RESTRICTING": - ftype = "terrain_pattern" - elif f.startswith("TERRAIN") or f.endswith("TERRAIN"): - ftype = "terrain_code" - elif f in ("NAME", "NAMESPACE", "VAR", "IMAGESTEM", "ID", "FLAG", "BUILDER") or f.endswith("_NAME") or f.endswith("_ID") or f.endswith("_VAR") or f.endswith("_OVERLAY"): - ftype = "name" - elif f in ("ID_STRING", "NAME_STRING", "DESCRIPTION", "IPF"): - ftype = "optional_string" - elif f in ("STRING", "TYPE", "TEXT") or f.endswith("_STRING") or f.endswith("_TYPE") or f.endswith("_TEXT"): - ftype = "string" - elif f.endswith("IMAGE") or f == "PROFILE": - ftype = "image" - elif f.endswith("MUSIC",) or f.endswith("SOUND"): - ftype = "sound" - elif f.endswith("FILTER",): - ftype = "filter" - elif f == "WML" or f.endswith("_WML"): - ftype = "wml" - elif f in ("AFFIX", "POSTFIX", "ROTATION") or f.endswith("AFFIX"): - ftype = "affix" - # The regexp case avoids complaints about some wacky terrain macros. - elif f.endswith("VALUE") or re.match("[ARS][0-9]", f): - ftype = "any" - else: - ftype = None - return ftype - -def actualtype(a): - if a is None: - return None - # Deduce the type of the actual - if a.isdigit() or a.startswith("-") and a[1:].isdigit(): - atype = "numeric" - elif re.match(r"0\.[0-9]+\Z", a): - atype = "percentage" - elif re.match(r"-?[0-9]+,-?[0-9]+\Z", a): - atype = "position" - elif re.match(r"([0-9]+\-[0-9]+,?|[0-9]+,?)+\Z", a): - atype = "span" - elif a in ("melee", "ranged"): - atype = "range" - elif a in ("lawful", "neutral", "chaotic", "liminal"): - atype = "alignment" - elif a.startswith("{") or a.endswith("}") or a.startswith("$"): - atype = None # Can't tell -- it's a macro expansion - elif re.match(image_reference, a) or a == "unit_image": - atype = "image" - elif re.match(r"(\*|[A-Z][a-z]+)\^([A-Z][a-z\\|/]+\Z)?", a): - atype = "terrain_code" - elif a.endswith(".wav") or a.endswith(".ogg"): - atype = "sound" - elif a.startswith('"') and a.endswith('"') or (a.startswith("_") and a[1] not in string.ascii_lowercase): - atype = "stringliteral" - elif "=" in a: - atype = "filter" - elif re.match(r"[A-Z][a-z][a-z]?\Z", a): - atype = "shortname" - elif a == "": - atype = "empty" - elif not ' ' in a: - atype = "name" - else: - atype = "string" - return atype - -def argmatch(formals, actuals): - if len(formals) != len(actuals): - return False - for (f, a) in zip(formals, actuals): - # Here's the compatibility logic. First, we catch the situations - # in which a more restricted actual type matches a more general - # formal one. Then we have a fallback rule checking for type - # equality or wildcarding. - ftype = formaltype(f) - atype = actualtype(a) - if ftype == "any": - pass - elif (atype == "numeric" or a == "global") and ftype == "side": - pass - elif atype in ("filter", "empty") and ftype == "wml": - pass - elif atype in ("numeric", "position") and ftype == "span": - pass - elif atype in ("shortname", "name", "empty", "stringliteral") and ftype == "affix": - pass - elif atype in ("shortname", "name", "stringliteral") and ftype == "string": - pass - elif atype in ("shortname", "name", "string", "stringliteral", "empty") and ftype == "optional_string": - pass - elif atype in ("shortname",) and ftype == "terrain_code": - pass - elif atype in ("numeric", "position", "span", "empty") and ftype == "alliance": - pass - elif atype in ("terrain_code", "shortname", "name") and ftype == "terrain_pattern": - pass - elif atype in ("string", "shortname", "name") and ftype == "types": - pass - elif atype in ("numeric", "percentage") and ftype == "percentage": - pass - elif atype == "range" and ftype == "name": - pass - elif atype != ftype and ftype is not None and atype is not None: - return False - return True - -# the total_ordering decorator from functools allows to define only two comparison -# methods, and Python generates the remaining methods -# it comes with a speed penalty, but the alternative is defining six methods by hand... -@total_ordering -class Reference: - "Describes a location by file and line." - def __init__(self, namespace, filename, lineno=None, docstring=None, args=None): - self.namespace = namespace - self.filename = filename - self.lineno = lineno - self.docstring = docstring - self.args = args - self.references = collections.defaultdict(list) - self.undef = None - - def append(self, fn, n, a=None): - self.references[fn].append((n, a)) - - def dump_references(self): - "Dump all known references to this definition." - for (file, refs) in self.references.items(): - print(" %s: %s" % (file, repr([x[0] for x in refs])[1:-1])) - - def __eq__(self, other): - return self.filename == other.filename and self.lineno == other.lineno - - def __gt__(self, other): - # Major sort by file, minor by line number. This presumes that the - # files correspond to coherent topics and gives us control of the - # sequence. - if self.filename == other.filename: - return self.lineno > other.lineno - else: - return self.filename > other.filename - - def mismatches(self): - copy = Reference(self.namespace, self.filename, self.lineno, self.docstring, self.args) - copy.undef = self.undef - for filename in self.references: - mis = [(ln,a) for (ln,a) in self.references[filename] if a is not None and not argmatch(self.args, a)] - if mis: - copy.references[filename] = mis - return copy - def __str__(self): - if self.lineno: - return '"%s", line %d' % (self.filename, self.lineno) - else: - return self.filename - __repr__ = __str__ - -class CrossRef: - macro_reference = re.compile(r"\{([A-Z_][A-Za-z0-9_:]*)(?!\.)\b") - file_reference = re.compile(r"[A-Za-z0-9{}.][A-Za-z0-9_/+{}.@-]*\.(" + "|".join(resource_extensions) + ")(?=(~.*)?)") - tag_parse = re.compile("\s*([a-z_]+)\s*=(.*)") - def mark_matching_resources(self, pattern, fn, n): - "Mark all definitions matching a specified pattern with a reference." - pattern = pattern.replace("+", r"\+") - pattern = os.sep + pattern + "$" - if os.sep == "\\": - pattern = pattern.replace("\\", "\\\\") - try: - pattern = re.compile(pattern) - except sre_constants.error: - print("wmlscope: confused by %s" % pattern, file=sys.stderr) - return None - key = None - for trial in self.fileref: - if pattern.search(trial) and self.visible_from(trial, fn, n): - key = trial - self.fileref[key].append(fn, n) - return key - def visible_from(self, defn, fn, n): - "Is specified definition visible from the specified file and line?" - if isinstance(defn, basestring): - defn = self.fileref[defn] - if defn.undef is not None: - # Local macros are only visible in the file where they were defined - return defn.filename == fn and n >= defn.lineno and n <= defn.undef - if self.exports(defn.namespace): - # Macros and resources in subtrees with export=yes are global - return True - elif not self.filelist.neighbors(defn.filename, fn): - # Otherwise, must be in the same subtree. - return False - else: - # If the two files are in the same subtree, assume visibility. - # This doesn't match the actual preprocessor semantics. - # It means any macro without an undef is visible anywhere in the - # same argument directory. - # - # We can't do better than this without a lot of hairy graph- - # coloring logic to simulate include path interpretation. - # If that logic ever gets built, it will go here. - return True - def scan_for_definitions(self, namespace, filename): - ignoreflag = False - conditionalsflag = False - with codecs.open(filename, "r", "utf8") as dfp: - state = "outside" - latch_unit = in_base_unit = in_theme = False - for (n, line) in enumerate(dfp): - if self.warnlevel > 1: - print(repr(line)[1:-1]) - if line.strip().startswith("#textdomain"): - continue - m = re.search("# *wmlscope: warnlevel ([0-9]*)", line) - if m: - self.warnlevel = int(m.group(1)) - print('"%s", line %d: warnlevel set to %d (definition-gathering pass)' \ - % (filename, n+1, self.warnlevel)) - continue - m = re.search("# *wmlscope: set *([^=]*)=(.*)", line) - if m: - prop = m.group(1).strip() - value = m.group(2).strip() - if namespace not in self.properties: - self.properties[namespace] = {} - self.properties[namespace][prop] = value - m = re.search("# *wmlscope: prune (.*)", line) - if m: - name = m.group(1) - if self.warnlevel >= 2: - print('"%s", line %d: pruning definitions of %s' \ - % (filename, n+1, name )) - if name not in self.xref: - print("wmlscope: can't prune undefined macro %s" % name, file=sys.stderr) - else: - self.xref[name] = self.xref[name][:1] - continue - if "# wmlscope: start conditionals" in line: - if self.warnlevel > 1: - print('"%s", line %d: starting conditionals' \ - % (filename, n+1)) - conditionalsflag = True - elif "# wmlscope: stop conditionals" in line: - if self.warnlevel > 1: - print('"%s", line %d: stopping conditionals' \ - % (filename, n+1)) - conditionalsflag = False - if "# wmlscope: start ignoring" in line: - if self.warnlevel > 1: - print('"%s", line %d: starting ignoring (definition pass)' \ - % (filename, n+1)) - ignoreflag = True - elif "# wmlscope: stop ignoring" in line: - if self.warnlevel > 1: - print('"%s", line %d: stopping ignoring (definition pass)' \ - % (filename, n+1)) - ignoreflag = False - elif ignoreflag: - continue - if line.strip().startswith("#define"): - tokens = line.split() - if len(tokens) < 2: - print('"%s", line %d: malformed #define' \ - % (filename, n+1), file=sys.stderr) - else: - name = tokens[1] - here = Reference(namespace, filename, n+1, line, args=tokens[2:]) - here.hash = hashlib.md5() - here.docstring = line.lstrip()[8:] # Strip off #define_ - state = "macro_header" - continue - elif state != 'outside' and line.strip().endswith("#enddef"): - here.hash.update(line.encode("utf8")) - here.hash = here.hash.digest() - if name in self.xref: - for defn in self.xref[name]: - if not self.visible_from(defn, filename, n+1): - continue - elif conditionalsflag: - continue - elif defn.hash != here.hash: - print("%s: overrides different %s definition at %s" \ - % (here, name, defn), file=sys.stderr) - elif self.warnlevel > 0: - print("%s: duplicates %s definition at %s" \ - % (here, name, defn), file=sys.stderr) - if name not in self.xref: - self.xref[name] = [] - self.xref[name].append(here) - state = "outside" - elif state == "macro_header" and line.strip() and line.strip()[0] != "#": - state = "macro_body" - if state == "macro_header": - # Ignore macro header commends that are pragmas - if "wmlscope" not in line and "wmllint:" not in line: - here.docstring += line.lstrip()[1:] - if state in ("macro_header", "macro_body"): - here.hash.update(line.encode("utf8")) - elif line.strip().startswith("#undef"): - tokens = line.split() - name = tokens[1] - if name in self.xref and self.xref[name]: - self.xref[name][-1].undef = n+1 - else: - print("%s: unbalanced #undef on %s" \ - % (Reference(namespace, filename, n+1), name)) - if state == 'outside': - if '[unit_type]' in line: - latch_unit = True - elif '[/unit_type]' in line: - latch_unit = False - elif '[base_unit]' in line: - in_base_unit = True - elif '[/base_unit]' in line: - in_base_unit = False - elif '[theme]' in line: - in_theme = True - elif '[/theme]' in line: - in_theme = False - elif latch_unit and not in_base_unit and not in_theme and "id" in line: - m = CrossRef.tag_parse.search(line) - if m and m.group(1) == "id": - uid = m.group(2) - if uid not in self.unit_ids: - self.unit_ids[uid] = [] - self.unit_ids[uid].append(Reference(namespace, filename, n+1)) - latch_unit= False - def __init__(self, dirpath=[], exclude="", warnlevel=0, progress=False): - "Build cross-reference object from the specified filelist." - self.filelist = Forest(dirpath, exclude) - self.dirpath = [x for x in dirpath if not re.search(exclude, x)] - self.warnlevel = warnlevel - self.xref = {} - self.fileref = {} - self.noxref = False - self.properties = {} - self.unit_ids = {} - all_in = [] - if self.warnlevel >=2 or progress: - print("*** Beginning definition-gathering pass...") - for (namespace, filename) in self.filelist.generator(): - all_in.append((namespace, filename)) - if self.warnlevel > 1: - print(filename + ":") - if progress: - print(filename) - if isresource(filename): - self.fileref[filename] = Reference(namespace, filename) - elif iswml(filename): - # It's a WML file, scan for macro definitions - self.scan_for_definitions(namespace, filename) - elif filename.endswith(".def"): - # It's a list of names to be considered defined - self.noxref = True - with codecs.open(filename, "r", "utf8") as dfp: - for line in dfp: - self.xref[line.strip()] = True - # Next, decorate definitions with all references from the filelist. - self.unresolved = [] - self.missing = [] - formals = [] - state = "outside" - if self.warnlevel >=2 or progress: - print("*** Beginning reference-gathering pass...") - for (ns, fn) in all_in: - if progress: - print(filename) - if iswml(fn): - with codecs.open(fn, "r", "utf8") as rfp: - attack_name = None - beneath = 0 - ignoreflag = False - for (n, line) in enumerate(rfp): - if line.strip().startswith("#define"): - formals = line.strip().split()[2:] - elif line.startswith("#enddef"): - formals = [] - comment = "" - if '#' in line: - if "# wmlscope: start ignoring" in line: - if self.warnlevel > 1: - print('"%s", line %d: starting ignoring (reference pass)' \ - % (fn, n+1)) - ignoreflag = True - elif "# wmlscope: stop ignoring" in line: - if self.warnlevel > 1: - print('"%s", line %d: stopping ignoring (reference pass)' \ - % (fn, n+1)) - ignoreflag = False - m = re.search("# *wmlscope: self.warnlevel ([0-9]*)", line) - if m: - self.warnlevel = int(m.group(1)) - print('"%s", line %d: self.warnlevel set to %d (reference-gathering pass)' \ - % (fn, n+1, self.warnlevel)) - continue - fields = line.split('#') - line = fields[0] - if len(fields) > 1: - comment = fields[1] - if ignoreflag or not line: - continue - # Find references to macros - for match in re.finditer(CrossRef.macro_reference, line): - name = match.group(1) - candidates = [] - if self.warnlevel >=2: - print('"%s", line %d: seeking definition of %s' \ - % (fn, n+1, name)) - if name in formals: - continue - elif name in self.xref: - # Count the number of actual arguments. - # Set args to None if the call doesn't - # close on this line - (args, brackdepth, parendepth) = parse_macroref(match.start(0), line) - if brackdepth > 0 or parendepth > 0: - args = None - else: - args.pop(0) - #if args: - # print('"%s", line %d: args of %s is %s' \ - # % (fn, n+1, name, args)) - # Figure out which macros might resolve this - for defn in self.xref[name]: - if self.visible_from(defn, fn, n+1): - defn.append(fn, n+1, args) - candidates.append(str(defn)) - if len(candidates) > 1: - print("%s: more than one definition of %s is visible here (%s)." % (Reference(ns, fn, n), name, "; ".join(candidates))) - if len(candidates) == 0: - self.unresolved.append((name,Reference(ns,fn,n+1))) - # Don't be fooled by HTML image references in help strings. - if "" in line: - continue - # Find references to resource files - for match in re.finditer(CrossRef.file_reference, line): - name = match.group(0) - # Catches maps that look like macro names. - if (name.endswith(".map") or name.endswith(".mask")) and name[0] == '{': - name = name[1:] - if os.sep == "\\": - name = name.replace("/", "\\") - key = None - # If name is already in our resource list, it's easy. - if name in self.fileref and self.visible_from(name, fn, n): - self.fileref[name].append(fn, n+1) - continue - # If the name contains substitutable parts, count - # it as a reference to everything the substitutions - # could potentially match. - elif '{' in name or '@' in name: - pattern = re.sub(r"(\{[^}]*\}|@R0|@V)", '.*', name) - key = self.mark_matching_resources(pattern, fn,n+1) - if key: - self.fileref[key].append(fn, n+1) - else: - candidates = [] - for trial in self.fileref: - if trial.endswith(os.sep + name) and self.visible_from(trial, fn, n): - key = trial - self.fileref[trial].append(fn, n+1) - candidates.append(trial) - if len(candidates) > 1: - print("%s: more than one resource matching %s is visible here (%s)." % (Reference(ns,fn, n), name, ", ".join(candidates))) - if not key: - self.missing.append((name, Reference(ns,fn,n+1))) - # Notice implicit references through attacks - if state == "outside": - if "[attack]" in line: - beneath = 0 - attack_name = default_icon = None - have_icon = False - elif "name=" in line and not "no-icon" in comment: - attack_name = line[line.find("name=")+5:].strip() - default_icon = os.path.join("attacks", attack_name + ".png") - elif "icon=" in line and beneath == 0: - have_icon = True - elif "[/attack]" in line: - if attack_name and not have_icon: - candidates = [] - key = None - for trial in self.fileref: - if trial.endswith(os.sep + default_icon) and self.visible_from(trial, fn, n): - key = trial - self.fileref[trial].append(fn, n+1) - candidates.append(trial) - if len(candidates) > 1: - print("%s: more than one definition of %s is visible here (%s)." % (Reference(ns,fn, n), name, ", ".join(candidates))) - if not key: - self.missing.append((default_icon, Reference(ns,fn,n+1))) - elif line.strip().startswith("[/"): - beneath -= 1 - elif line.strip().startswith("["): - beneath += 1 - # Check whether each namespace has a defined export property - for namespace in self.dirpath: - if namespace not in self.properties or "export" not in self.properties[namespace]: - print("warning: %s has no export property" % namespace) - def exports(self, namespace): - return namespace in self.properties and self.properties[namespace].get("export") == "yes" - def subtract(self, filelist): - - "Transplant file references in files from filelist to a new CrossRef." - smallref = CrossRef() - for filename in self.fileref: - for (referrer, referlines) in self.fileref[filename].references.items(): - if referrer in filelist: - if filename not in smallref.fileref: - smallref.fileref[filename] = Reference(None, filename) - smallref.fileref[filename].references[referrer] = referlines - del self.fileref[filename].references[referrer] - return smallref - def refcount(self, name): - "Return a reference count for the specified resource." - try: - return len(self.fileref[name].references) - except KeyError: - return 0 - -# -# String translations from po files. The advantage of this code is that it -# does not require the gettext binary message catalogs to have been compiled. -# The disadvantage is that it eats lots of core! -# - - -class TranslationError(Exception): - def __init__(self, textdomain, isocode): - self.isocode = isocode - self.textdomain = textdomain - def __str__(self): - return "No translations found for %s/%s.\n" % ( - self.textdomain, self.isocode) - -class Translation(dict): - "Parses a po file to create a translation dictionary." - def __init__(self, textdomain, isocode, topdir=""): - self.textdomain = textdomain - self.isocode = isocode - self.gettext = {} - if self.isocode != "C": - isocode2 = isocode[:isocode.rfind("_")] - for code in [isocode, isocode2]: - fn = "po/%s/%s.po" % (textdomain, code) - if topdir: fn = os.path.join(topdir, fn) - try: - f = file(fn) - break - except IOError: - pass - else: - raise TranslationError(textdomain, self.isocode) - - expect = False - fuzzy = "#, fuzzy\n" - gettext = f.read().decode("utf8") - matches = re.compile("""(msgid|msgstr)((\s*".*?")+)""").finditer(gettext) - msgid = "" - for match in matches: - text = "".join(re.compile('"(.*?)"').findall(match.group(2))) - if match.group(1) == "msgid": - msgid = text.replace("\\n", "\n") - expect = gettext[match.start(1) - len(fuzzy):match.start(1)] != fuzzy - elif expect: - self.gettext[msgid] = text.replace("\\n", "\n") - def get(self, key, dflt): - if self.isocode == "C": - if key: - return key[key.find("^") + 1:] - return "?" - else: - t = self.gettext.get(key, dflt) - if not t: - if key: - return key[key.find("^") + 1:] - return "?" - return t - def __getitem__(self, key): - if self.isocode == "C": - return key - else: - return self.gettext[key] - def __contains__(self, key): - if self.isocode == "C": - return True - else: - return key in self.gettext - -class Translations: - "Wraps around Translation to support multiple languages and domains." - def __init__(self, topdir = ""): - self.translations = {} - self.topdir = topdir - def get(self, textdomain, isocode, key, default): - t = (textdomain, isocode) - if not t in self.translations: - try: - self.translations[t] = Translation(textdomain, isocode, self.topdir) - except TranslationError as e: - print(str(e), file=sys.stderr) - self.translations[t] = Translation(textdomain, "C", self.topdir) - result = self.translations[t].get(key, default) - return result - -## Namespace management -# -# This is the only part of the code that actually knows about the -# shape of the data tree. - -def scopelist(): - "Return a list of (separate) package scopes, core first." - return ["data/core"] + glob.glob("data/campaigns/*") - -def is_namespace(name): - "Is the name either a valid campaign name or core?" - return name in map(os.path.basename, scopelist()) - -def namespace_directory(name): - "Go from namespace to directory." - if name == "core": - return "data/core/" - else: - return "data/campaigns/" + name + "/" - -def directory_namespace(path): - "Go from directory to namespace." - if path.startswith("data/core/"): - return "core" - elif path.startswith("data/campaigns/"): - return path.split("/")[2] - else: - return None - -def namespace_member(path, namespace): - "Is a path in a specified namespace?" - ns = directory_namespace(path) - return ns is not None and ns == namespace - -def resolve_unit_cfg(namespace, utype, resource=None): - "Get the location of a specified unit in a specified scope." - if resource: - resource = os.path.join(utype, resource) - else: - resource = utype - loc = namespace_directory(namespace) + "units/" + resource - if not loc.endswith(".cfg"): - loc += ".cfg" - return loc - -def resolve_unit_image(namespace, subdir, resource): - "Construct a plausible location for given resource in specified namespace." - return os.path.join(namespace_directory(namespace), "images/units", subdir, resource) - -# And this is for code that does syntax transformation -baseindent = " " - -# wmltools.py ends here -- 2.29.2 From f5b54eac069c7b270826978e2341455c840de103 Mon Sep 17 00:00:00 2001 From: Elvish_Hunter Date: Sat, 21 Sep 2019 14:59:04 +0200 Subject: [PATCH 26/31] Fixed two bugs in wmlscope's square braces expansion The first bug happened when there were two or more square braces in a file name: only one expansion was performed, instead of all of them being applied at once. The second bug happened when a square brace range had leading zeros: these were just removed, resulting in false positives. (cherry picked from commit 20d1cf9a184d6fa74fec8099586c77a920212ee3) --- data/tools/wesnoth/wmltools3.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/data/tools/wesnoth/wmltools3.py b/data/tools/wesnoth/wmltools3.py index 5633634c78e..0694ae35b0a 100644 --- a/data/tools/wesnoth/wmltools3.py +++ b/data/tools/wesnoth/wmltools3.py @@ -67,8 +67,16 @@ if no expansion could be performed""" match_range = re.match("(\d+)~(\d+)", token) # range syntax, eg [1~4] if match_range: before, after = int(match_range.group(1)), int(match_range.group(2)) + # does one of the limits have leading zeros? If so, detect the length of the numbers used + if match_range.group(1).startswith("0") or match_range.group(2).startswith("0"): + leading_zeros = max(len(match_range.group(1)), len(match_range.group(2))) + else: + leading_zeros = 0 incr = 1 if before <= after else -1 # to allow iterating in reversed order, eg. [4~1] - substitutions[i].extend([Substitution(str(n), match.start(0), match.end(0)) for n in range(before, after + incr, incr)]) + # previously this code used a mere casting to str + # string formatting allows proper handling of leading zeros, if any + fmt_string = "{:0" + str(leading_zeros) + "d}" + substitutions[i].extend([Substitution(fmt_string.format(n), match.start(0), match.end(0)) for n in range(before, after + incr, incr)]) continue substitutions[i].append(Substitution(token, match.start(0), match.end(0))) # no operator found @@ -81,7 +89,8 @@ if no expansion could be performed""" for sub_array in substitutions: new_string = path for sub in reversed(sub_array): # to avoid creating "holes" in the strings - yield new_string[:sub.start] + sub.sub + new_string[sub.end:] # these are the expanded strings + new_string = new_string[:sub.start] + sub.sub + new_string[sub.end:] # these are the expanded strings + yield new_string def is_root(dirname): "Is the specified path the filesystem root?" -- 2.29.2 From d4da5bbfa16d6499f9cf1ac48fd6cf89c5bd60fb Mon Sep 17 00:00:00 2001 From: elias-pschernig Date: Mon, 25 Feb 2019 04:29:42 -0500 Subject: [PATCH 27/31] fix wmlunits crash (#3931) * fix wmlunits crash (cherry picked from commit a7e65de2dedd144fe879543d9027e09c85b555dc) --- data/tools/unit_tree/html_output.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/data/tools/unit_tree/html_output.py b/data/tools/unit_tree/html_output.py index 5b90a51fe21..3c8112077c7 100644 --- a/data/tools/unit_tree/html_output.py +++ b/data/tools/unit_tree/html_output.py @@ -326,7 +326,11 @@ class GroupByFaction: if group[1]: faction = era.faction_lookup[group[1]] name = T(faction, "name") - name = name[name.rfind("=") + 1:] + if name: + name = name[name.rfind("=") + 1:] + else: + name = "missing" + error_message("Warning: %s has no faction name\n" % group[1]) else: name = "factionless" return name -- 2.29.2 From 94ddf33874f7e30887a7438d08136e9a8c7a9549 Mon Sep 17 00:00:00 2001 From: Allefant Date: Wed, 6 Mar 2019 11:45:32 -0500 Subject: [PATCH 28/31] fix for #3965 (cherry picked from commit f117a4164602209000ee0e865ae07849a290d462) --- data/tools/unit_tree/html_output.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/data/tools/unit_tree/html_output.py b/data/tools/unit_tree/html_output.py index 3c8112077c7..f85920c1b2d 100644 --- a/data/tools/unit_tree/html_output.py +++ b/data/tools/unit_tree/html_output.py @@ -595,6 +595,8 @@ class HTMLOutput: rname = race.get_text_val("id") if race else "none" if not rname: rname = "none" + if not racename: + racename = rname r[m][racename] = rname racenames = sorted(r[0].items()) if list(r[1].items()): -- 2.29.2 From 7605cd6c37b4eff1f20f78d635e2fad2e0f74a4d Mon Sep 17 00:00:00 2001 From: gfgtdf Date: Mon, 19 Oct 2020 18:05:43 +0200 Subject: [PATCH 29/31] wmlindent: don't indent if #endarg is on the same line this might change behaviour of the arg value (cherry picked from commit ae9b533925204156c00bb4286d90ae13443cf556) --- data/tools/wmlindent | 1 + 1 file changed, 1 insertion(+) diff --git a/data/tools/wmlindent b/data/tools/wmlindent index b759e25ed73..8b198b49e03 100755 --- a/data/tools/wmlindent +++ b/data/tools/wmlindent @@ -69,6 +69,7 @@ opener_prefixes = ["{FOREACH "] def is_directive(str): "Identify things that shouldn't be indented." + if "#endarg" in str: return True return str.startswith(("#ifdef", "#ifndef", "#ifhave", "#ifnhave", "#ifver", "#ifnver", "#else", "#endif", "#define", "#enddef", "#undef", "#arg", "#endarg")) def closer(str): -- 2.29.2 From 881b5ddde09ef43190e74654fcd45b07debd4707 Mon Sep 17 00:00:00 2001 From: Gunter Labes Date: Sun, 16 Aug 2020 22:43:14 +0200 Subject: [PATCH 30/31] Use __WMLUNITS__ define for all parse calls Enables workarounds for wmlunits in core unit files. (cherry picked from commit 6e60db597d25b4c04671b376d929fe534e118b37) --- data/tools/wmlunits | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/tools/wmlunits b/data/tools/wmlunits index 0a2c3c4cb64..12bc261f40c 100755 --- a/data/tools/wmlunits +++ b/data/tools/wmlunits @@ -374,7 +374,7 @@ def process_campaign_or_era(addon, cid, define, batchlist): wesnoth.batchlist = batchlist wesnoth.cid = cid - wesnoth.parser.parse_text("{core/units.cfg}", "NORMAL") + wesnoth.parser.parse_text("{core/units.cfg}", "__WMLUNITS__,NORMAL") wesnoth.add_units("mainline") if define == "MULTIPLAYER": @@ -640,7 +640,7 @@ if __name__ == '__main__': parser = wmlparser3.Parser(options.wesnoth, options.config_dir, options.data_dir) - parser.parse_text("{languages}") + parser.parse_text("{languages}", "__WMLUNITS__") for locale in parser.get_all(tag="locale"): isocode = locale.get_text_val("locale") -- 2.29.2 From 4c49b8bcc3aa28c7fa2628f16e92cc570f3a4c70 Mon Sep 17 00:00:00 2001 From: hexagonrecursion Date: Wed, 13 Jan 2021 16:44:54 +0000 Subject: [PATCH 31/31] Add a note about wmllint-1.4 (cherry picked from commit d9ec9b9b55a5063a7709c52e6f28edaedad918a3) --- data/tools/README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/data/tools/README.md b/data/tools/README.md index a4d984b2887..aaad23d2570 100644 --- a/data/tools/README.md +++ b/data/tools/README.md @@ -27,10 +27,12 @@ in a specified way. Now supports only flipping the map around the Y axis, but provides a framework that should make other transformations easy. -=== wmllint === +=== wmllint and wmllint-1.4 === This tool lifts WML from older dialects to current and performs sanity checks. See the header comment of wmllint for a description and invocation options. +wmllint-1.4 is an older version of wmllint kept alive because wmllint is +incompatible with pre-1.4 syntax. === GUI.pyw === -- 2.29.2