diff options
author | Muthu Subramanian <sumuthu@novell.com> | 2010-11-17 19:28:42 +0530 |
---|---|---|
committer | Muthu Subramanian <sumuthu@novell.com> | 2010-11-17 19:28:42 +0530 |
commit | 6c2803b624ff8b19d518adb2fa042e853c1e15bc (patch) | |
tree | e92b436edf90b990ecdd2a5a606dd4b9c6fced38 /helpcontent2 | |
parent | fbe2b95824dd3bd2f14dfb5af5c3344925c77592 (diff) |
Changes to include localization strings from .sdf files.
Diffstat (limited to 'helpcontent2')
-rwxr-xr-x | helpcontent2/to-wiki/convall.py | 5 | ||||
-rwxr-xr-x | helpcontent2/to-wiki/wikiconv2.py | 52 |
2 files changed, 48 insertions, 9 deletions
diff --git a/helpcontent2/to-wiki/convall.py b/helpcontent2/to-wiki/convall.py index afc510e3dc..5e72666185 100755 --- a/helpcontent2/to-wiki/convall.py +++ b/helpcontent2/to-wiki/convall.py @@ -3,6 +3,7 @@ import os, sys titles = [[]] +localization = "" def loadallfiles(filename): global titles @@ -12,6 +13,8 @@ def loadallfiles(filename): titles.append(title) loadallfiles("alltitles.csv") +if len(sys.argv) > 1: + localization = sys.argv[1] for title in titles: command = "" @@ -20,7 +23,7 @@ for title in titles: if len(title) > 1: outfile = "wiki/"+title[1].strip() infile = title[0].strip() - command = "python to-wiki/wikiconv2.py "+infile+" "+title[1].strip()+" > "+outfile + command = "python to-wiki/wikiconv2.py "+infile+" "+title[1].strip()+" "+localization+" > "+outfile try: file = open(outfile,"r") diff --git a/helpcontent2/to-wiki/wikiconv2.py b/helpcontent2/to-wiki/wikiconv2.py index 55e938f4c7..112032f7c2 100755 --- a/helpcontent2/to-wiki/wikiconv2.py +++ b/helpcontent2/to-wiki/wikiconv2.py @@ -2,11 +2,14 @@ import sys, signal import xml.parsers.expat +import codecs root="source/" titles = [] +localization_data = [[]] + # list of elements that we can directly convert to wiki text replace_element = \ {'start':{'br': '<br/>', @@ -101,14 +104,14 @@ help_file_name = "" all_help_id_mappings = [[]] def load_all_help_ids(): - file = open("helpers/help_hid.lst") + file = codecs.open("helpers/help_hid.lst", "r", "utf-8") for line in file: ids = line.strip().upper().split(",") if len(ids) >= 2: all_help_id_mappings.append(ids) def get_help_id_res2(name): - file = open("helpers/hid.lst") + file = codecs.open("helpers/hid.lst", "r", "utf-8") for line in file: ids = line.strip().upper().split(" ") if len(ids) >= 2: @@ -148,6 +151,27 @@ def replace_text(text): text = text.replace(i[0],i[1]) return text +def load_localization_data(sdf_file): + try: + file = codecs.open(sdf_file, "r", "utf-8") + for line in file: + line = line.strip() + # TODO: Check if multiple \t needs to be merged + if line.find("#") == 0: + continue + localization_data.append(line.split("\t")) + except: + return + +def get_localized_text(id, text): + for line in localization_data: + try: + if line[4].strip() == id.strip(): + return line[10] + except: + pass + return text + def href_to_fname_id(href): link = href.replace('"', '') fname = link @@ -198,7 +222,7 @@ class ElementBase: # construct the wiki representation of this object, including the objects # held in self.objects (here only the text of the objects) def get_all(self): - text = '' + text = u'' for i in self.objects: text = text + i.get_all() return text @@ -299,7 +323,7 @@ class Bookmark(ElementBase): @staticmethod def save_bookmarks(): - file = open("bookmarks.h","a") + file = codecs.open("bookmarks.h", "a", "utf-8") for i in Bookmark.bookmarks_list: file.write(i.encode('ascii','replace')+"\n") file.close() @@ -601,6 +625,11 @@ class Paragraph(ElementBase): except: self.role = 'paragraph' + try: + self.id = attrs['id'] + except: + self.id = "" + try: self.level=int(attrs['level']) except: @@ -647,7 +676,7 @@ class Paragraph(ElementBase): pass def char_data(self, parser, data): - self.objects.append(Text(data)) + self.objects.append(Text(get_localized_text(self.id, data))) def get_all(self): role = self.role @@ -713,7 +742,7 @@ class XhpParser: self.filename = filename self.follow_embed = follow_embed - file = open(filename, "r") + file = codecs.open(filename, "r", "utf-8") p = xml.parsers.expat.ParserCreate() p.StartElementHandler = self.start_element @@ -741,7 +770,7 @@ class XhpParser: def loadallfiles(filename): global titles - file = open(filename, "r") + file = codecs.open(filename, "r", "utf-8") for line in file: title = line.split(";", 2) titles.append(title) @@ -752,12 +781,15 @@ def signal_handler(signal, frame): signal.signal(signal.SIGINT, signal_handler) if len(sys.argv) < 2: - print "wikiconv2.py <inputfile.xph>" + print "wikiconv2.py <inputfile.xph> [Help Filename] [localize.sdf]" sys.exit(1) if len(sys.argv) > 2: help_file_name = sys.argv[2] +if len(sys.argv) > 3: + load_localization_data(sys.argv[3]) + # TODO: Currently the following files are loaded for every # file which is converted. Combine the batch converter with # this file to generate quicker help files. @@ -765,6 +797,10 @@ load_all_help_ids() loadallfiles("alltitles.csv") parser = XhpParser(sys.argv[1], True) +# Enable these lines once the convall.py is combined with this one +# file1 = codecs.open(helpfilename, "wb", "utf-8") +# file1.write(parser.get_all())) +# file1.close() print parser.get_all().encode('ascii','replace') Bookmark.save_bookmarks() |