2017-05-12 09:59:18 +00:00
|
|
|
#!/usr/bin/env python2
|
2011-04-27 10:05:43 +00:00
|
|
|
#############################################################################
|
|
|
|
##
|
2020-01-09 19:47:23 +00:00
|
|
|
## Copyright (C) 2020 The Qt Company Ltd.
|
2016-01-15 12:36:27 +00:00
|
|
|
## Contact: https://www.qt.io/licensing/
|
2011-04-27 10:05:43 +00:00
|
|
|
##
|
|
|
|
## This file is part of the test suite of the Qt Toolkit.
|
|
|
|
##
|
2016-01-15 12:36:27 +00:00
|
|
|
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
|
2012-09-19 12:28:29 +00:00
|
|
|
## Commercial License Usage
|
|
|
|
## Licensees holding valid commercial Qt licenses may use this file in
|
|
|
|
## accordance with the commercial license agreement provided with the
|
|
|
|
## Software or, alternatively, in accordance with the terms contained in
|
2015-01-28 08:44:43 +00:00
|
|
|
## a written agreement between you and The Qt Company. For licensing terms
|
2016-01-15 12:36:27 +00:00
|
|
|
## and conditions see https://www.qt.io/terms-conditions. For further
|
|
|
|
## information use the contact form at https://www.qt.io/contact-us.
|
2012-09-19 12:28:29 +00:00
|
|
|
##
|
2016-01-15 12:36:27 +00:00
|
|
|
## GNU General Public License Usage
|
|
|
|
## Alternatively, this file may be used under the terms of the GNU
|
|
|
|
## General Public License version 3 as published by the Free Software
|
|
|
|
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
|
|
|
|
## included in the packaging of this file. Please review the following
|
|
|
|
## information to ensure the GNU General Public License requirements will
|
|
|
|
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
|
2011-04-27 10:05:43 +00:00
|
|
|
##
|
|
|
|
## $QT_END_LICENSE$
|
|
|
|
##
|
|
|
|
#############################################################################
|
2017-05-23 13:24:35 +00:00
|
|
|
"""Script to generate C++ code from CLDR data in qLocaleXML form
|
|
|
|
|
|
|
|
See ``cldr2qlocalexml.py`` for how to generate the qLocaleXML data itself.
|
|
|
|
Pass the output file from that as first parameter to this script; pass
|
|
|
|
the root of the qtbase check-out as second parameter.
|
|
|
|
"""
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import tempfile
|
|
|
|
import datetime
|
|
|
|
import xml.dom.minidom
|
2018-08-13 12:32:18 +00:00
|
|
|
from enumdata import language_aliases, country_aliases, script_aliases
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2017-05-30 13:50:47 +00:00
|
|
|
from localexml import Locale
|
|
|
|
|
2017-01-14 16:53:31 +00:00
|
|
|
# TODO: Make calendars a command-line parameter
|
|
|
|
# map { CLDR name: Qt file name }
|
2019-08-08 18:35:13 +00:00
|
|
|
calendars = {'gregorian': 'roman', 'persian': 'jalali', 'islamic': 'hijri',} # 'hebrew': 'hebrew',
|
2017-01-14 16:53:31 +00:00
|
|
|
|
2019-07-26 13:42:04 +00:00
|
|
|
generated_template = """
|
|
|
|
/*
|
|
|
|
This part of the file was generated on %s from the
|
|
|
|
Common Locale Data Repository v%s
|
|
|
|
|
|
|
|
http://www.unicode.org/cldr/
|
|
|
|
|
|
|
|
Do not edit this section: instead regenerate it using
|
|
|
|
cldr2qlocalexml.py and qlocalexml2cpp.py on updated (or
|
|
|
|
edited) CLDR data; see qtbase/util/locale_database/.
|
|
|
|
*/
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2011-04-27 10:05:43 +00:00
|
|
|
class Error:
|
|
|
|
def __init__(self, msg):
|
|
|
|
self.msg = msg
|
|
|
|
def __str__(self):
|
|
|
|
return self.msg
|
|
|
|
|
|
|
|
def wrap_list(lst):
|
|
|
|
def split(lst, size):
|
2017-05-12 09:59:18 +00:00
|
|
|
while lst:
|
|
|
|
head, lst = lst[:size], lst[size:]
|
|
|
|
yield head
|
|
|
|
return ",\n".join(", ".join(x) for x in split(lst, 20))
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2017-06-08 10:19:23 +00:00
|
|
|
def isNodeNamed(elt, name, TYPE=xml.dom.minidom.Node.ELEMENT_NODE):
|
|
|
|
return elt.nodeType == TYPE and elt.nodeName == name
|
|
|
|
|
2011-04-27 10:05:43 +00:00
|
|
|
def firstChildElt(parent, name):
|
|
|
|
child = parent.firstChild
|
|
|
|
while child:
|
2017-06-08 10:19:23 +00:00
|
|
|
if isNodeNamed(child, name):
|
2011-04-27 10:05:43 +00:00
|
|
|
return child
|
|
|
|
child = child.nextSibling
|
|
|
|
|
2017-06-08 10:19:23 +00:00
|
|
|
raise Error('No %s child found' % name)
|
|
|
|
|
|
|
|
def eachEltInGroup(parent, group, key):
|
|
|
|
try:
|
|
|
|
element = firstChildElt(parent, group).firstChild
|
|
|
|
except Error:
|
|
|
|
element = None
|
|
|
|
|
|
|
|
while element:
|
|
|
|
if isNodeNamed(element, key):
|
|
|
|
yield element
|
|
|
|
element = element.nextSibling
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2017-05-31 19:42:11 +00:00
|
|
|
def eltWords(elt):
|
2011-04-27 10:05:43 +00:00
|
|
|
child = elt.firstChild
|
|
|
|
while child:
|
|
|
|
if child.nodeType == elt.TEXT_NODE:
|
2017-05-31 19:42:11 +00:00
|
|
|
yield child.nodeValue
|
2011-04-27 10:05:43 +00:00
|
|
|
child = child.nextSibling
|
2017-05-31 19:42:11 +00:00
|
|
|
|
|
|
|
def firstChildText(elt, key):
|
|
|
|
return ' '.join(eltWords(firstChildElt(elt, key)))
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2017-06-08 10:30:22 +00:00
|
|
|
def loadMap(doc, category):
|
2017-05-31 19:42:11 +00:00
|
|
|
return dict((int(firstChildText(element, 'id')),
|
|
|
|
(firstChildText(element, 'name'),
|
|
|
|
firstChildText(element, 'code')))
|
2017-06-08 10:30:22 +00:00
|
|
|
for element in eachEltInGroup(doc.documentElement,
|
|
|
|
category + 'List', category))
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2012-11-19 17:12:58 +00:00
|
|
|
def loadLikelySubtagsMap(doc):
|
2017-05-31 19:42:11 +00:00
|
|
|
def triplet(element, keys=('language', 'script', 'country')):
|
|
|
|
return tuple(firstChildText(element, key) for key in keys)
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2017-05-31 19:42:11 +00:00
|
|
|
return dict((i, {'from': triplet(firstChildElt(elt, "from")),
|
|
|
|
'to': triplet(firstChildElt(elt, "to"))})
|
|
|
|
for i, elt in enumerate(eachEltInGroup(doc.documentElement,
|
|
|
|
'likelySubtags', 'likelySubtag')))
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
def fixedScriptName(name, dupes):
|
2017-05-23 15:18:18 +00:00
|
|
|
# Don't .capitalize() as some names are already camel-case (see enumdata.py):
|
|
|
|
name = ''.join(word[0].upper() + word[1:] for word in name.split())
|
2011-04-27 10:05:43 +00:00
|
|
|
if name[-6:] != "Script":
|
2017-05-31 19:42:11 +00:00
|
|
|
name = name + "Script"
|
2011-04-27 10:05:43 +00:00
|
|
|
if name in dupes:
|
|
|
|
sys.stderr.write("\n\n\nERROR: The script name '%s' is messy" % name)
|
2017-05-31 19:42:11 +00:00
|
|
|
sys.exit(1)
|
2011-04-27 10:05:43 +00:00
|
|
|
return name
|
|
|
|
|
|
|
|
def fixedCountryName(name, dupes):
|
|
|
|
if name in dupes:
|
|
|
|
return name.replace(" ", "") + "Country"
|
|
|
|
return name.replace(" ", "")
|
|
|
|
|
|
|
|
def fixedLanguageName(name, dupes):
|
|
|
|
if name in dupes:
|
|
|
|
return name.replace(" ", "") + "Language"
|
|
|
|
return name.replace(" ", "")
|
|
|
|
|
|
|
|
def findDupes(country_map, language_map):
|
2017-05-31 19:42:11 +00:00
|
|
|
country_set = set(v[0] for a, v in country_map.iteritems())
|
|
|
|
language_set = set(v[0] for a, v in language_map.iteritems())
|
2011-04-27 10:05:43 +00:00
|
|
|
return country_set & language_set
|
|
|
|
|
|
|
|
def languageNameToId(name, language_map):
|
|
|
|
for key in language_map.keys():
|
|
|
|
if language_map[key][0] == name:
|
|
|
|
return key
|
|
|
|
return -1
|
|
|
|
|
|
|
|
def scriptNameToId(name, script_map):
|
|
|
|
for key in script_map.keys():
|
|
|
|
if script_map[key][0] == name:
|
|
|
|
return key
|
|
|
|
return -1
|
|
|
|
|
|
|
|
def countryNameToId(name, country_map):
|
|
|
|
for key in country_map.keys():
|
|
|
|
if country_map[key][0] == name:
|
|
|
|
return key
|
|
|
|
return -1
|
|
|
|
|
2012-11-21 04:08:24 +00:00
|
|
|
def loadLocaleMap(doc, language_map, script_map, country_map, likely_subtags_map):
|
2011-04-27 10:05:43 +00:00
|
|
|
result = {}
|
|
|
|
|
2017-06-08 10:19:23 +00:00
|
|
|
for locale_elt in eachEltInGroup(doc.documentElement, "localeList", "locale"):
|
2017-01-14 16:53:31 +00:00
|
|
|
locale = Locale.fromXmlData(lambda k: firstChildText(locale_elt, k), calendars.keys())
|
2011-04-27 10:05:43 +00:00
|
|
|
language_id = languageNameToId(locale.language, language_map)
|
|
|
|
if language_id == -1:
|
|
|
|
sys.stderr.write("Cannot find a language id for '%s'\n" % locale.language)
|
|
|
|
script_id = scriptNameToId(locale.script, script_map)
|
|
|
|
if script_id == -1:
|
|
|
|
sys.stderr.write("Cannot find a script id for '%s'\n" % locale.script)
|
|
|
|
country_id = countryNameToId(locale.country, country_map)
|
|
|
|
if country_id == -1:
|
|
|
|
sys.stderr.write("Cannot find a country id for '%s'\n" % locale.country)
|
2012-11-21 04:08:24 +00:00
|
|
|
|
|
|
|
if language_id != 1: # C
|
|
|
|
if country_id == 0:
|
|
|
|
sys.stderr.write("loadLocaleMap: No country id for '%s'\n" % locale.language)
|
|
|
|
|
|
|
|
if script_id == 0:
|
|
|
|
# find default script for a given language and country (see http://www.unicode.org/reports/tr35/#Likely_Subtags)
|
|
|
|
for key in likely_subtags_map.keys():
|
|
|
|
tmp = likely_subtags_map[key]
|
|
|
|
if tmp["from"][0] == locale.language and tmp["from"][1] == "AnyScript" and tmp["from"][2] == locale.country:
|
|
|
|
locale.script = tmp["to"][1]
|
|
|
|
script_id = scriptNameToId(locale.script, script_map)
|
|
|
|
break
|
|
|
|
if script_id == 0 and country_id != 0:
|
|
|
|
# try with no country
|
|
|
|
for key in likely_subtags_map.keys():
|
|
|
|
tmp = likely_subtags_map[key]
|
|
|
|
if tmp["from"][0] == locale.language and tmp["from"][1] == "AnyScript" and tmp["from"][2] == "AnyCountry":
|
|
|
|
locale.script = tmp["to"][1]
|
|
|
|
script_id = scriptNameToId(locale.script, script_map)
|
|
|
|
break
|
|
|
|
|
2011-04-27 10:05:43 +00:00
|
|
|
result[(language_id, script_id, country_id)] = locale
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
def compareLocaleKeys(key1, key2):
|
|
|
|
if key1 == key2:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
if key1[0] == key2[0]:
|
|
|
|
l1 = compareLocaleKeys.locale_map[key1]
|
|
|
|
l2 = compareLocaleKeys.locale_map[key2]
|
|
|
|
|
2012-11-21 04:08:24 +00:00
|
|
|
if (l1.language, l1.script) in compareLocaleKeys.default_map.keys():
|
|
|
|
default = compareLocaleKeys.default_map[(l1.language, l1.script)]
|
|
|
|
if l1.country == default:
|
2011-04-27 10:05:43 +00:00
|
|
|
return -1
|
2012-11-21 04:08:24 +00:00
|
|
|
if l2.country == default:
|
2011-04-27 10:05:43 +00:00
|
|
|
return 1
|
|
|
|
|
2012-11-21 04:08:24 +00:00
|
|
|
if key1[1] != key2[1]:
|
|
|
|
if (l2.language, l2.script) in compareLocaleKeys.default_map.keys():
|
|
|
|
default = compareLocaleKeys.default_map[(l2.language, l2.script)]
|
|
|
|
if l2.country == default:
|
|
|
|
return 1
|
|
|
|
if l1.country == default:
|
|
|
|
return -1
|
|
|
|
|
2011-04-27 10:05:43 +00:00
|
|
|
if key1[1] != key2[1]:
|
|
|
|
return key1[1] - key2[1]
|
|
|
|
else:
|
|
|
|
return key1[0] - key2[0]
|
|
|
|
|
|
|
|
return key1[2] - key2[2]
|
|
|
|
|
|
|
|
|
|
|
|
def languageCount(language_id, locale_map):
|
|
|
|
result = 0
|
|
|
|
for key in locale_map.keys():
|
|
|
|
if key[0] == language_id:
|
|
|
|
result += 1
|
|
|
|
return result
|
|
|
|
|
|
|
|
def unicode2hex(s):
|
|
|
|
lst = []
|
|
|
|
for x in s:
|
|
|
|
v = ord(x)
|
|
|
|
if v > 0xFFFF:
|
|
|
|
# make a surrogate pair
|
|
|
|
# copied from qchar.h
|
|
|
|
high = (v >> 10) + 0xd7c0
|
|
|
|
low = (v % 0x400 + 0xdc00)
|
|
|
|
lst.append(hex(high))
|
|
|
|
lst.append(hex(low))
|
|
|
|
else:
|
|
|
|
lst.append(hex(v))
|
|
|
|
return lst
|
|
|
|
|
|
|
|
class StringDataToken:
|
2020-01-09 13:48:21 +00:00
|
|
|
def __init__(self, index, length, bits):
|
|
|
|
if index > 0xffff:
|
|
|
|
print "\n\n\n#error Data index is too big!", index
|
|
|
|
raise ValueError("Start-index (%d) exceeds the uint16 range!" % index)
|
|
|
|
if length >= (1 << bits):
|
|
|
|
print "\n\n\n#error Range length is too big!", length
|
|
|
|
raise ValueError("Data size (%d) exceeds the %d-bit range!" % (length, bits))
|
|
|
|
|
2011-04-27 10:05:43 +00:00
|
|
|
self.index = index
|
|
|
|
self.length = length
|
|
|
|
|
|
|
|
class StringData:
|
2017-05-31 14:17:54 +00:00
|
|
|
def __init__(self, name):
|
2011-04-27 10:05:43 +00:00
|
|
|
self.data = []
|
|
|
|
self.hash = {}
|
2017-05-31 14:17:54 +00:00
|
|
|
self.name = name
|
2020-01-09 19:47:23 +00:00
|
|
|
self.text = '' # Used in quick-search for matches in data
|
2017-01-14 16:53:31 +00:00
|
|
|
|
2020-01-09 13:48:21 +00:00
|
|
|
def append(self, s, bits=8):
|
2011-04-27 10:05:43 +00:00
|
|
|
try:
|
2020-01-09 19:47:23 +00:00
|
|
|
token = self.hash[s]
|
|
|
|
except KeyError:
|
2020-01-09 13:48:21 +00:00
|
|
|
token = self.__store(s, bits)
|
2020-01-09 19:47:23 +00:00
|
|
|
self.hash[s] = token
|
2011-04-27 10:05:43 +00:00
|
|
|
return token
|
|
|
|
|
2020-01-09 13:48:21 +00:00
|
|
|
def __store(self, s, bits):
|
2020-01-09 19:47:23 +00:00
|
|
|
"""Add string s to known data.
|
|
|
|
|
|
|
|
Seeks to avoid duplication, where possible.
|
|
|
|
For example, short-forms may be prefixes of long-forms.
|
|
|
|
"""
|
|
|
|
if not s:
|
2020-01-09 13:48:21 +00:00
|
|
|
return StringDataToken(0, 0, bits)
|
2020-01-09 19:47:23 +00:00
|
|
|
ucs2 = unicode2hex(s)
|
|
|
|
try:
|
|
|
|
index = self.text.index(s) - 1
|
|
|
|
matched = 0
|
|
|
|
while matched < len(ucs2):
|
|
|
|
index, matched = self.data.index(ucs2[0], index + 1), 1
|
|
|
|
if index + len(ucs2) >= len(self.data):
|
|
|
|
raise ValueError # not found after all !
|
|
|
|
while matched < len(ucs2) and self.data[index + matched] == ucs2[matched]:
|
|
|
|
matched += 1
|
|
|
|
except ValueError:
|
|
|
|
index = len(self.data)
|
|
|
|
self.data += ucs2
|
|
|
|
self.text += s
|
|
|
|
|
|
|
|
assert index >= 0
|
|
|
|
try:
|
2020-01-09 13:48:21 +00:00
|
|
|
return StringDataToken(index, len(ucs2), bits)
|
2020-01-09 19:47:23 +00:00
|
|
|
except ValueError as e:
|
|
|
|
e.args += (self.name, s)
|
|
|
|
raise
|
|
|
|
|
2017-01-14 16:53:31 +00:00
|
|
|
def write(self, fd):
|
2020-01-09 13:48:21 +00:00
|
|
|
if len(self.data) > 0xffff:
|
|
|
|
raise ValueError("Data is too big for quint16 index to its end!" % len(self.data),
|
|
|
|
self.name)
|
2020-01-30 09:15:46 +00:00
|
|
|
fd.write("\nstatic const char16_t %s[] = {\n" % self.name)
|
2017-01-14 16:53:31 +00:00
|
|
|
fd.write(wrap_list(self.data))
|
|
|
|
fd.write("\n};\n")
|
|
|
|
|
2011-04-27 10:05:43 +00:00
|
|
|
def escapedString(s):
|
|
|
|
result = ""
|
|
|
|
i = 0
|
|
|
|
while i < len(s):
|
|
|
|
if s[i] == '"':
|
|
|
|
result += '\\"'
|
|
|
|
i += 1
|
|
|
|
else:
|
|
|
|
result += s[i]
|
|
|
|
i += 1
|
|
|
|
s = result
|
|
|
|
|
|
|
|
line = ""
|
|
|
|
need_escape = False
|
|
|
|
result = ""
|
|
|
|
for c in s:
|
2020-01-09 19:36:58 +00:00
|
|
|
if ord(c) < 128 and not (need_escape and ord('a') <= ord(c.lower()) <= ord('f')):
|
2011-04-27 10:05:43 +00:00
|
|
|
line += c
|
|
|
|
need_escape = False
|
|
|
|
else:
|
|
|
|
line += "\\x%02x" % (ord(c))
|
|
|
|
need_escape = True
|
|
|
|
if len(line) > 80:
|
2017-05-12 09:59:18 +00:00
|
|
|
result = result + "\n" + '"' + line + '"'
|
2011-04-27 10:05:43 +00:00
|
|
|
line = ""
|
|
|
|
line += "\\0"
|
2017-05-12 09:59:18 +00:00
|
|
|
result = result + "\n" + '"' + line + '"'
|
2011-04-27 10:05:43 +00:00
|
|
|
if result[0] == "\n":
|
|
|
|
result = result[1:]
|
|
|
|
return result
|
|
|
|
|
|
|
|
def printEscapedString(s):
|
2017-05-31 19:42:11 +00:00
|
|
|
print escapedString(s)
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
def currencyIsoCodeData(s):
|
|
|
|
if s:
|
2017-05-12 10:00:55 +00:00
|
|
|
return '{' + ",".join(str(ord(x)) for x in s) + '}'
|
|
|
|
return "{0,0,0}"
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
def usage():
|
2017-05-23 13:24:35 +00:00
|
|
|
print "Usage: qlocalexml2cpp.py <path-to-locale.xml> <path-to-qtbase-src-tree>"
|
2011-04-27 10:05:43 +00:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
GENERATED_BLOCK_START = "// GENERATED PART STARTS HERE\n"
|
|
|
|
GENERATED_BLOCK_END = "// GENERATED PART ENDS HERE\n"
|
|
|
|
|
|
|
|
def main():
|
|
|
|
if len(sys.argv) != 3:
|
|
|
|
usage()
|
|
|
|
|
|
|
|
localexml = sys.argv[1]
|
|
|
|
qtsrcdir = sys.argv[2]
|
|
|
|
|
2017-05-31 19:42:11 +00:00
|
|
|
if not (os.path.isdir(qtsrcdir)
|
2019-10-23 14:37:22 +00:00
|
|
|
and all(os.path.isfile(os.path.join(qtsrcdir, 'src', 'corelib', 'text', leaf))
|
2017-05-31 19:42:11 +00:00
|
|
|
for leaf in ('qlocale_data_p.h', 'qlocale.h', 'qlocale.qdoc'))):
|
2011-04-27 10:05:43 +00:00
|
|
|
usage()
|
|
|
|
|
2020-01-09 19:36:58 +00:00
|
|
|
(data_temp_file, data_temp_file_path) = tempfile.mkstemp("qlocale_data_p.h", dir=qtsrcdir)
|
2011-04-27 10:05:43 +00:00
|
|
|
data_temp_file = os.fdopen(data_temp_file, "w")
|
2019-05-27 17:13:54 +00:00
|
|
|
qlocaledata_file = open(qtsrcdir + "/src/corelib/text/qlocale_data_p.h", "r")
|
2011-04-27 10:05:43 +00:00
|
|
|
s = qlocaledata_file.readline()
|
|
|
|
while s and s != GENERATED_BLOCK_START:
|
|
|
|
data_temp_file.write(s)
|
|
|
|
s = qlocaledata_file.readline()
|
|
|
|
data_temp_file.write(GENERATED_BLOCK_START)
|
|
|
|
|
|
|
|
doc = xml.dom.minidom.parse(localexml)
|
2017-06-08 10:30:22 +00:00
|
|
|
language_map = loadMap(doc, 'language')
|
|
|
|
script_map = loadMap(doc, 'script')
|
|
|
|
country_map = loadMap(doc, 'country')
|
2012-11-19 17:12:58 +00:00
|
|
|
likely_subtags_map = loadLikelySubtagsMap(doc)
|
|
|
|
default_map = {}
|
|
|
|
for key in likely_subtags_map.keys():
|
|
|
|
tmp = likely_subtags_map[key]
|
2012-11-21 04:08:24 +00:00
|
|
|
if tmp["from"][1] == "AnyScript" and tmp["from"][2] == "AnyCountry" and tmp["to"][2] != "AnyCountry":
|
|
|
|
default_map[(tmp["to"][0], tmp["to"][1])] = tmp["to"][2]
|
|
|
|
locale_map = loadLocaleMap(doc, language_map, script_map, country_map, likely_subtags_map)
|
2011-04-27 10:05:43 +00:00
|
|
|
dupes = findDupes(language_map, country_map)
|
|
|
|
|
2017-05-31 19:42:11 +00:00
|
|
|
cldr_version = firstChildText(doc.documentElement, "version")
|
2019-07-26 13:42:04 +00:00
|
|
|
data_temp_file.write(generated_template % (datetime.date.today(), cldr_version))
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2012-11-21 04:08:24 +00:00
|
|
|
# Likely subtags map
|
|
|
|
data_temp_file.write("static const QLocaleId likely_subtags[] = {\n")
|
|
|
|
index = 0
|
|
|
|
for key in likely_subtags_map.keys():
|
|
|
|
tmp = likely_subtags_map[key]
|
|
|
|
from_language = languageNameToId(tmp["from"][0], language_map)
|
|
|
|
from_script = scriptNameToId(tmp["from"][1], script_map)
|
|
|
|
from_country = countryNameToId(tmp["from"][2], country_map)
|
|
|
|
to_language = languageNameToId(tmp["to"][0], language_map)
|
|
|
|
to_script = scriptNameToId(tmp["to"][1], script_map)
|
|
|
|
to_country = countryNameToId(tmp["to"][2], country_map)
|
|
|
|
|
|
|
|
cmnt_from = ""
|
|
|
|
if from_language != 0:
|
|
|
|
cmnt_from = cmnt_from + language_map[from_language][1]
|
|
|
|
else:
|
|
|
|
cmnt_from = cmnt_from + "und"
|
|
|
|
if from_script != 0:
|
|
|
|
if cmnt_from:
|
|
|
|
cmnt_from = cmnt_from + "_"
|
|
|
|
cmnt_from = cmnt_from + script_map[from_script][1]
|
|
|
|
if from_country != 0:
|
|
|
|
if cmnt_from:
|
|
|
|
cmnt_from = cmnt_from + "_"
|
|
|
|
cmnt_from = cmnt_from + country_map[from_country][1]
|
|
|
|
cmnt_to = ""
|
|
|
|
if to_language != 0:
|
|
|
|
cmnt_to = cmnt_to + language_map[to_language][1]
|
|
|
|
else:
|
2013-03-16 07:23:32 +00:00
|
|
|
cmnt_to = cmnt_to + "und"
|
2012-11-21 04:08:24 +00:00
|
|
|
if to_script != 0:
|
|
|
|
if cmnt_to:
|
|
|
|
cmnt_to = cmnt_to + "_"
|
|
|
|
cmnt_to = cmnt_to + script_map[to_script][1]
|
|
|
|
if to_country != 0:
|
|
|
|
if cmnt_to:
|
|
|
|
cmnt_to = cmnt_to + "_"
|
|
|
|
cmnt_to = cmnt_to + country_map[to_country][1]
|
|
|
|
|
|
|
|
data_temp_file.write(" ")
|
2020-01-09 19:36:58 +00:00
|
|
|
data_temp_file.write("{ %3d, %3d, %3d }, { %3d, %3d, %3d }" %
|
|
|
|
(from_language, from_script, from_country, to_language, to_script, to_country))
|
2012-11-21 04:08:24 +00:00
|
|
|
index += 1
|
|
|
|
if index != len(likely_subtags_map):
|
|
|
|
data_temp_file.write(",")
|
|
|
|
else:
|
|
|
|
data_temp_file.write(" ")
|
|
|
|
data_temp_file.write(" // %s -> %s\n" % (cmnt_from, cmnt_to))
|
|
|
|
data_temp_file.write("};\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
2011-04-27 10:05:43 +00:00
|
|
|
# Locale index
|
|
|
|
data_temp_file.write("static const quint16 locale_index[] = {\n")
|
|
|
|
index = 0
|
|
|
|
for key in language_map.keys():
|
|
|
|
i = 0
|
|
|
|
count = languageCount(key, locale_map)
|
|
|
|
if count > 0:
|
|
|
|
i = index
|
|
|
|
index += count
|
|
|
|
data_temp_file.write("%6d, // %s\n" % (i, language_map[key][0]))
|
|
|
|
data_temp_file.write(" 0 // trailing 0\n")
|
2017-05-31 19:42:11 +00:00
|
|
|
data_temp_file.write("};\n\n")
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2017-05-31 14:17:54 +00:00
|
|
|
list_pattern_part_data = StringData('list_pattern_part_data')
|
2020-01-13 14:46:13 +00:00
|
|
|
single_character_data = StringData('single_character_data')
|
2017-05-31 14:17:54 +00:00
|
|
|
date_format_data = StringData('date_format_data')
|
|
|
|
time_format_data = StringData('time_format_data')
|
|
|
|
days_data = StringData('days_data')
|
|
|
|
am_data = StringData('am_data')
|
|
|
|
pm_data = StringData('pm_data')
|
Add byte-based units to CLDR data
Scan CLDR for {,kilo,mega,giga,tera,peta,exa}byte forms and their IEC
equivalents, providing SI and IEC defaults when missing (which all of
IEC are) in addition to the usual numeric data. Extrapolate from any
present data (e.g. French's ko, Mo, Go, To imply Po, Eo and, for IEC,
Kio, Mio, etc.), since CLDR only goes up to tera. Propagate this data
to QLocale's database ready for use by QLocale::formattedDataSize().
Change-Id: Ie6ee978948c68be9f71ab784a128cbfae3d80ee1
Reviewed-by: Shawn Rutledge <shawn.rutledge@qt.io>
2017-05-30 12:55:33 +00:00
|
|
|
byte_unit_data = StringData('byte_unit_data')
|
2017-05-31 14:17:54 +00:00
|
|
|
currency_symbol_data = StringData('currency_symbol_data')
|
|
|
|
currency_display_name_data = StringData('currency_display_name_data')
|
|
|
|
currency_format_data = StringData('currency_format_data')
|
|
|
|
endonyms_data = StringData('endonyms_data')
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
# Locale data
|
2012-03-26 16:23:21 +00:00
|
|
|
data_temp_file.write("static const QLocaleData locale_data[] = {\n")
|
2017-05-12 10:00:55 +00:00
|
|
|
# Table headings: keep each label centred in its field, matching line_format:
|
|
|
|
data_temp_file.write(' // '
|
|
|
|
# Width 6 + comma:
|
|
|
|
+ ' lang ' # IDs
|
|
|
|
+ 'script '
|
|
|
|
+ ' terr '
|
2020-01-09 13:48:21 +00:00
|
|
|
|
|
|
|
# Range entries (all start-indices, then all sizes):
|
|
|
|
# Width 5 + comma:
|
|
|
|
+ 'lStrt ' # List pattern
|
|
|
|
+ 'lpMid '
|
|
|
|
+ 'lpEnd '
|
|
|
|
+ 'lPair '
|
2020-01-13 14:46:13 +00:00
|
|
|
+ 'lDelm ' # List delimiter
|
|
|
|
# Representing numbers:
|
|
|
|
+ ' dec '
|
|
|
|
+ 'group '
|
|
|
|
+ 'prcnt '
|
|
|
|
+ ' zero '
|
|
|
|
+ 'minus '
|
|
|
|
+ 'plus '
|
|
|
|
+ ' exp '
|
|
|
|
# Quotation marks
|
|
|
|
+ 'qtOpn '
|
|
|
|
+ 'qtEnd '
|
|
|
|
+ 'altQO '
|
|
|
|
+ 'altQE '
|
2020-01-09 13:48:21 +00:00
|
|
|
+ 'lDFmt ' # Date format
|
|
|
|
+ 'sDFmt '
|
|
|
|
+ 'lTFmt ' # Time format
|
|
|
|
+ 'sTFmt '
|
|
|
|
+ 'slDay ' # Day names
|
|
|
|
+ 'lDays '
|
|
|
|
+ 'ssDys '
|
|
|
|
+ 'sDays '
|
|
|
|
+ 'snDay '
|
|
|
|
+ 'nDays '
|
|
|
|
+ ' am ' # am/pm indicators
|
|
|
|
+ ' pm '
|
|
|
|
+ ' byte '
|
|
|
|
+ 'siQnt '
|
|
|
|
+ 'iecQn '
|
|
|
|
+ 'crSym ' # Currency formatting:
|
|
|
|
+ 'crDsp '
|
|
|
|
+ 'crFmt '
|
|
|
|
+ 'crFNg '
|
|
|
|
+ 'ntLng ' # Name of language in itself, and of territory:
|
|
|
|
+ 'ntTer '
|
|
|
|
# Width 3 + comma for each size; no header
|
2020-01-13 14:46:13 +00:00
|
|
|
+ ' ' * 37
|
2020-01-09 13:48:21 +00:00
|
|
|
|
|
|
|
# Strays (char array, bit-fields):
|
2017-05-12 10:00:55 +00:00
|
|
|
# Width 8+4 + comma
|
|
|
|
+ ' currISO '
|
|
|
|
# Width 6 + comma:
|
2020-01-09 13:48:21 +00:00
|
|
|
+ 'curDgt ' # Currency digits
|
|
|
|
+ 'curRnd ' # Currencty rounding (unused: QTBUG-81343)
|
2017-05-12 10:00:55 +00:00
|
|
|
+ 'dow1st ' # First day of week
|
|
|
|
+ ' wknd+ ' # Week-end start/end days:
|
|
|
|
+ ' wknd-'
|
|
|
|
# No trailing space on last entry (be sure to
|
|
|
|
# pad before adding anything after it).
|
|
|
|
+ '\n')
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
locale_keys = locale_map.keys()
|
|
|
|
compareLocaleKeys.default_map = default_map
|
|
|
|
compareLocaleKeys.locale_map = locale_map
|
|
|
|
locale_keys.sort(compareLocaleKeys)
|
|
|
|
|
2017-05-12 10:00:55 +00:00
|
|
|
line_format = (' { '
|
|
|
|
# Locale-identifier:
|
|
|
|
+ '%6d,' * 3
|
2020-01-13 14:46:13 +00:00
|
|
|
# Offsets for starts of ranges:
|
|
|
|
+ '%5d,' * 37
|
2020-01-09 13:48:21 +00:00
|
|
|
# Sizes for the same:
|
2020-01-13 14:46:13 +00:00
|
|
|
+ '%3d,' * 37
|
2020-01-09 13:48:21 +00:00
|
|
|
|
2017-05-12 10:00:55 +00:00
|
|
|
# Currency ISO code:
|
|
|
|
+ ' %10s, '
|
|
|
|
# Currency formatting:
|
|
|
|
+ '%6d,%6d'
|
|
|
|
# Day of week and week-end:
|
|
|
|
+ ',%6d' * 3
|
|
|
|
+ ' }')
|
2011-04-27 10:05:43 +00:00
|
|
|
for key in locale_keys:
|
|
|
|
l = locale_map[key]
|
2020-01-09 13:48:21 +00:00
|
|
|
# Sequence of StringDataToken:
|
2020-01-13 14:46:13 +00:00
|
|
|
ranges = (tuple(list_pattern_part_data.append(p) for p in # 5 entries:
|
2020-01-09 13:48:21 +00:00
|
|
|
(l.listPatternPartStart, l.listPatternPartMiddle,
|
2020-01-13 14:46:13 +00:00
|
|
|
l.listPatternPartEnd, l.listPatternPartTwo, l.listDelim)) +
|
|
|
|
tuple(single_character_data.append(p) for p in # 11 entries
|
|
|
|
(l.decimal, l.group, l.percent, l.zero, l.minus, l.plus, l.exp,
|
|
|
|
l.quotationStart, l.quotationEnd,
|
|
|
|
l.alternateQuotationStart, l.alternateQuotationEnd)) +
|
2020-01-09 13:48:21 +00:00
|
|
|
tuple (date_format_data.append(f) for f in # 2 entries:
|
|
|
|
(l.longDateFormat, l.shortDateFormat)) +
|
|
|
|
tuple(time_format_data.append(f) for f in # 2 entries:
|
|
|
|
(l.longTimeFormat, l.shortTimeFormat)) +
|
|
|
|
tuple(days_data.append(d) for d in # 6 entries:
|
|
|
|
(l.standaloneLongDays, l.longDays,
|
|
|
|
l.standaloneShortDays, l.shortDays,
|
|
|
|
l.standaloneNarrowDays, l.narrowDays)) +
|
|
|
|
(am_data.append(l.am), pm_data.append(l.pm)) + # 2 entries:
|
|
|
|
tuple(byte_unit_data.append(b) for b in # 3 entries:
|
|
|
|
(l.byte_unit, l.byte_si_quantified, l.byte_iec_quantified)) +
|
|
|
|
(currency_symbol_data.append(l.currencySymbol),
|
|
|
|
currency_display_name_data.append(l.currencyDisplayName),
|
|
|
|
currency_format_data.append(l.currencyFormat),
|
|
|
|
currency_format_data.append(l.currencyNegativeFormat),
|
|
|
|
endonyms_data.append(l.languageEndonym),
|
|
|
|
endonyms_data.append(l.countryEndonym)) # 6 entries
|
2020-01-13 14:46:13 +00:00
|
|
|
) # Total: 37 entries
|
|
|
|
assert len(ranges) == 37
|
2020-01-09 13:48:21 +00:00
|
|
|
|
2017-05-12 10:00:55 +00:00
|
|
|
data_temp_file.write(line_format
|
2020-01-13 14:46:13 +00:00
|
|
|
% ((key[0], key[1], key[2]) +
|
2020-01-09 13:48:21 +00:00
|
|
|
tuple(r.index for r in ranges) +
|
|
|
|
tuple(r.length for r in ranges) +
|
|
|
|
(currencyIsoCodeData(l.currencyIsoCode),
|
2011-04-27 10:05:43 +00:00
|
|
|
l.currencyDigits,
|
2020-01-09 19:36:58 +00:00
|
|
|
l.currencyRounding, # unused (QTBUG-81343)
|
2011-04-27 10:05:43 +00:00
|
|
|
l.firstDayOfWeek,
|
|
|
|
l.weekendStart,
|
2020-01-09 13:48:21 +00:00
|
|
|
l.weekendEnd))
|
2017-05-12 10:00:55 +00:00
|
|
|
+ ", // %s/%s/%s\n" % (l.language, l.script, l.country))
|
|
|
|
data_temp_file.write(line_format # All zeros, matching the format:
|
2020-01-13 14:46:13 +00:00
|
|
|
% ( (0,) * 3 + (0,) * 37 * 2
|
2017-05-12 10:00:55 +00:00
|
|
|
+ (currencyIsoCodeData(0),)
|
2020-01-09 13:48:21 +00:00
|
|
|
+ (0,) * 2
|
|
|
|
+ (0,) * 3)
|
2020-01-09 19:36:58 +00:00
|
|
|
+ " // trailing zeros\n")
|
2011-04-27 10:05:43 +00:00
|
|
|
data_temp_file.write("};\n")
|
|
|
|
|
2017-05-31 14:17:54 +00:00
|
|
|
# StringData tables:
|
2020-01-13 14:46:13 +00:00
|
|
|
for data in (list_pattern_part_data, single_character_data,
|
|
|
|
date_format_data, time_format_data, days_data,
|
Add byte-based units to CLDR data
Scan CLDR for {,kilo,mega,giga,tera,peta,exa}byte forms and their IEC
equivalents, providing SI and IEC defaults when missing (which all of
IEC are) in addition to the usual numeric data. Extrapolate from any
present data (e.g. French's ko, Mo, Go, To imply Po, Eo and, for IEC,
Kio, Mio, etc.), since CLDR only goes up to tera. Propagate this data
to QLocale's database ready for use by QLocale::formattedDataSize().
Change-Id: Ie6ee978948c68be9f71ab784a128cbfae3d80ee1
Reviewed-by: Shawn Rutledge <shawn.rutledge@qt.io>
2017-05-30 12:55:33 +00:00
|
|
|
byte_unit_data, am_data, pm_data, currency_symbol_data,
|
2017-05-31 14:17:54 +00:00
|
|
|
currency_display_name_data, currency_format_data,
|
|
|
|
endonyms_data):
|
2017-01-14 16:53:31 +00:00
|
|
|
data.write(data_temp_file)
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
|
|
|
# Language name list
|
|
|
|
data_temp_file.write("static const char language_name_list[] =\n")
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"Default\\0"\n')
|
2011-04-27 10:05:43 +00:00
|
|
|
for key in language_map.keys():
|
|
|
|
if key == 0:
|
|
|
|
continue
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"' + language_map[key][0] + '\\0"\n')
|
2011-04-27 10:05:43 +00:00
|
|
|
data_temp_file.write(";\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
|
|
|
# Language name index
|
|
|
|
data_temp_file.write("static const quint16 language_name_index[] = {\n")
|
|
|
|
data_temp_file.write(" 0, // AnyLanguage\n")
|
|
|
|
index = 8
|
|
|
|
for key in language_map.keys():
|
|
|
|
if key == 0:
|
|
|
|
continue
|
|
|
|
language = language_map[key][0]
|
|
|
|
data_temp_file.write("%6d, // %s\n" % (index, language))
|
|
|
|
index += len(language) + 1
|
|
|
|
data_temp_file.write("};\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
|
|
|
# Script name list
|
|
|
|
data_temp_file.write("static const char script_name_list[] =\n")
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"Default\\0"\n')
|
2011-04-27 10:05:43 +00:00
|
|
|
for key in script_map.keys():
|
|
|
|
if key == 0:
|
|
|
|
continue
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"' + script_map[key][0] + '\\0"\n')
|
2011-04-27 10:05:43 +00:00
|
|
|
data_temp_file.write(";\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
|
|
|
# Script name index
|
|
|
|
data_temp_file.write("static const quint16 script_name_index[] = {\n")
|
|
|
|
data_temp_file.write(" 0, // AnyScript\n")
|
|
|
|
index = 8
|
|
|
|
for key in script_map.keys():
|
|
|
|
if key == 0:
|
|
|
|
continue
|
|
|
|
script = script_map[key][0]
|
|
|
|
data_temp_file.write("%6d, // %s\n" % (index, script))
|
|
|
|
index += len(script) + 1
|
|
|
|
data_temp_file.write("};\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
|
|
|
# Country name list
|
|
|
|
data_temp_file.write("static const char country_name_list[] =\n")
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"Default\\0"\n')
|
2011-04-27 10:05:43 +00:00
|
|
|
for key in country_map.keys():
|
|
|
|
if key == 0:
|
|
|
|
continue
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"' + country_map[key][0] + '\\0"\n')
|
2011-04-27 10:05:43 +00:00
|
|
|
data_temp_file.write(";\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
|
|
|
# Country name index
|
|
|
|
data_temp_file.write("static const quint16 country_name_index[] = {\n")
|
|
|
|
data_temp_file.write(" 0, // AnyCountry\n")
|
|
|
|
index = 8
|
|
|
|
for key in country_map.keys():
|
|
|
|
if key == 0:
|
|
|
|
continue
|
|
|
|
country = country_map[key][0]
|
|
|
|
data_temp_file.write("%6d, // %s\n" % (index, country))
|
|
|
|
index += len(country) + 1
|
|
|
|
data_temp_file.write("};\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
|
|
|
# Language code list
|
|
|
|
data_temp_file.write("static const unsigned char language_code_list[] =\n")
|
|
|
|
for key in language_map.keys():
|
|
|
|
code = language_map[key][1]
|
|
|
|
if len(code) == 2:
|
|
|
|
code += r"\0"
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"%2s" // %s\n' % (code, language_map[key][0]))
|
2011-04-27 10:05:43 +00:00
|
|
|
data_temp_file.write(";\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
|
|
|
|
# Script code list
|
|
|
|
data_temp_file.write("static const unsigned char script_code_list[] =\n")
|
|
|
|
for key in script_map.keys():
|
|
|
|
code = script_map[key][1]
|
|
|
|
for i in range(4 - len(code)):
|
|
|
|
code += "\\0"
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"%2s" // %s\n' % (code, script_map[key][0]))
|
2011-04-27 10:05:43 +00:00
|
|
|
data_temp_file.write(";\n")
|
|
|
|
|
|
|
|
# Country code list
|
|
|
|
data_temp_file.write("static const unsigned char country_code_list[] =\n")
|
|
|
|
for key in country_map.keys():
|
|
|
|
code = country_map[key][1]
|
|
|
|
if len(code) == 2:
|
|
|
|
code += "\\0"
|
2017-05-12 09:59:18 +00:00
|
|
|
data_temp_file.write('"%2s" // %s\n' % (code, country_map[key][0]))
|
2011-04-27 10:05:43 +00:00
|
|
|
data_temp_file.write(";\n")
|
|
|
|
|
|
|
|
data_temp_file.write("\n")
|
|
|
|
data_temp_file.write(GENERATED_BLOCK_END)
|
|
|
|
s = qlocaledata_file.readline()
|
2017-05-12 09:59:18 +00:00
|
|
|
# skip until end of the old block
|
2011-04-27 10:05:43 +00:00
|
|
|
while s and s != GENERATED_BLOCK_END:
|
|
|
|
s = qlocaledata_file.readline()
|
|
|
|
|
|
|
|
s = qlocaledata_file.readline()
|
|
|
|
while s:
|
|
|
|
data_temp_file.write(s)
|
|
|
|
s = qlocaledata_file.readline()
|
|
|
|
data_temp_file.close()
|
|
|
|
qlocaledata_file.close()
|
|
|
|
|
2019-05-27 17:13:54 +00:00
|
|
|
os.remove(qtsrcdir + "/src/corelib/text/qlocale_data_p.h")
|
|
|
|
os.rename(data_temp_file_path, qtsrcdir + "/src/corelib/text/qlocale_data_p.h")
|
2011-04-27 10:05:43 +00:00
|
|
|
|
2017-01-14 16:53:31 +00:00
|
|
|
# Generate calendar data
|
2020-01-09 13:48:21 +00:00
|
|
|
calendar_format = ' {%6d,%6d,%6d' + ',%5d' * 6 + ',%3d' * 6 + ' },'
|
2017-01-14 16:53:31 +00:00
|
|
|
for calendar, stem in calendars.items():
|
|
|
|
months_data = StringData('months_data')
|
|
|
|
calendar_data_file = "q%scalendar_data_p.h" % stem
|
|
|
|
calendar_template_file = open(os.path.join(qtsrcdir, 'src', 'corelib', 'time',
|
|
|
|
calendar_data_file), "r")
|
|
|
|
(calendar_temp_file, calendar_temp_file_path) = tempfile.mkstemp(calendar_data_file, dir=qtsrcdir)
|
|
|
|
calendar_temp_file = os.fdopen(calendar_temp_file, "w")
|
|
|
|
s = calendar_template_file.readline()
|
|
|
|
while s and s != GENERATED_BLOCK_START:
|
|
|
|
calendar_temp_file.write(s)
|
|
|
|
s = calendar_template_file.readline()
|
|
|
|
calendar_temp_file.write(GENERATED_BLOCK_START)
|
|
|
|
calendar_temp_file.write(generated_template % (datetime.date.today(), cldr_version))
|
|
|
|
calendar_temp_file.write("static const QCalendarLocale locale_data[] = {\n")
|
|
|
|
calendar_temp_file.write(' // '
|
|
|
|
# IDs, width 7 (6 + comma)
|
|
|
|
+ ' lang '
|
|
|
|
+ ' script'
|
|
|
|
+ ' terr '
|
2020-01-09 13:48:21 +00:00
|
|
|
# Month-name start-indices, width 6 (5 + comma):
|
|
|
|
+ 'sLng '
|
|
|
|
+ 'long '
|
|
|
|
+ 'sSrt '
|
|
|
|
+ 'shrt '
|
|
|
|
+ 'sNrw '
|
|
|
|
+ 'naro '
|
|
|
|
# No individual headers for the sizes.
|
|
|
|
+ 'Sizes...'
|
2017-01-14 16:53:31 +00:00
|
|
|
+ '\n')
|
|
|
|
for key in locale_keys:
|
|
|
|
l = locale_map[key]
|
2020-01-09 13:48:21 +00:00
|
|
|
# Sequence of StringDataToken:
|
|
|
|
try:
|
|
|
|
# Twelve long month names can add up to more than 256 (e.g. kde_TZ: 264)
|
|
|
|
ranges = (tuple(months_data.append(m[calendar], 16) for m in
|
|
|
|
(l.standaloneLongMonths, l.longMonths)) +
|
|
|
|
tuple(months_data.append(m[calendar]) for m in
|
|
|
|
(l.standaloneShortMonths, l.shortMonths,
|
|
|
|
l.standaloneNarrowMonths, l.narrowMonths)))
|
|
|
|
except ValueError as e:
|
|
|
|
e.args += (l.language, l.script, l.country, stem)
|
|
|
|
raise
|
|
|
|
|
2017-01-14 16:53:31 +00:00
|
|
|
calendar_temp_file.write(
|
|
|
|
calendar_format
|
2020-01-09 13:48:21 +00:00
|
|
|
% ((key[0], key[1], key[2]) +
|
|
|
|
tuple(r.index for r in ranges) +
|
|
|
|
tuple(r.length for r in ranges))
|
2020-01-09 19:36:58 +00:00
|
|
|
+ "// %s/%s/%s\n" % (l.language, l.script, l.country))
|
2020-01-09 13:48:21 +00:00
|
|
|
calendar_temp_file.write(calendar_format % ( (0,) * (3 + 6 * 2) )
|
|
|
|
+ '// trailing zeros\n')
|
2017-01-14 16:53:31 +00:00
|
|
|
calendar_temp_file.write("};\n")
|
|
|
|
months_data.write(calendar_temp_file)
|
|
|
|
s = calendar_template_file.readline()
|
|
|
|
while s and s != GENERATED_BLOCK_END:
|
|
|
|
s = calendar_template_file.readline()
|
|
|
|
while s:
|
|
|
|
calendar_temp_file.write(s)
|
|
|
|
s = calendar_template_file.readline()
|
|
|
|
os.rename(calendar_temp_file_path,
|
|
|
|
os.path.join(qtsrcdir, 'src', 'corelib', 'time', calendar_data_file))
|
|
|
|
|
2011-04-27 10:05:43 +00:00
|
|
|
# qlocale.h
|
|
|
|
|
|
|
|
(qlocaleh_temp_file, qlocaleh_temp_file_path) = tempfile.mkstemp("qlocale.h", dir=qtsrcdir)
|
|
|
|
qlocaleh_temp_file = os.fdopen(qlocaleh_temp_file, "w")
|
2019-05-27 17:13:54 +00:00
|
|
|
qlocaleh_file = open(qtsrcdir + "/src/corelib/text/qlocale.h", "r")
|
2011-04-27 10:05:43 +00:00
|
|
|
s = qlocaleh_file.readline()
|
|
|
|
while s and s != GENERATED_BLOCK_START:
|
|
|
|
qlocaleh_temp_file.write(s)
|
|
|
|
s = qlocaleh_file.readline()
|
|
|
|
qlocaleh_temp_file.write(GENERATED_BLOCK_START)
|
|
|
|
qlocaleh_temp_file.write("// see qlocale_data_p.h for more info on generated data\n")
|
|
|
|
|
|
|
|
# Language enum
|
|
|
|
qlocaleh_temp_file.write(" enum Language {\n")
|
2018-08-13 12:32:18 +00:00
|
|
|
language = None
|
|
|
|
for key, value in language_map.items():
|
|
|
|
language = fixedLanguageName(value[0], dupes)
|
2011-04-27 10:05:43 +00:00
|
|
|
qlocaleh_temp_file.write(" " + language + " = " + str(key) + ",\n")
|
2018-08-13 12:32:18 +00:00
|
|
|
|
|
|
|
qlocaleh_temp_file.write("\n " +
|
|
|
|
",\n ".join('%s = %s' % pair
|
|
|
|
for pair in sorted(language_aliases.items())) +
|
|
|
|
",\n")
|
2016-03-19 19:51:57 +00:00
|
|
|
qlocaleh_temp_file.write("\n")
|
2011-04-27 10:05:43 +00:00
|
|
|
qlocaleh_temp_file.write(" LastLanguage = " + language + "\n")
|
2020-01-09 19:36:58 +00:00
|
|
|
qlocaleh_temp_file.write(" };\n\n")
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
# Script enum
|
|
|
|
qlocaleh_temp_file.write(" enum Script {\n")
|
2018-08-13 12:32:18 +00:00
|
|
|
script = None
|
|
|
|
for key, value in script_map.items():
|
|
|
|
script = fixedScriptName(value[0], dupes)
|
2011-04-27 10:05:43 +00:00
|
|
|
qlocaleh_temp_file.write(" " + script + " = " + str(key) + ",\n")
|
2018-08-13 12:32:18 +00:00
|
|
|
qlocaleh_temp_file.write("\n " +
|
|
|
|
",\n ".join('%s = %s' % pair
|
|
|
|
for pair in sorted(script_aliases.items())) +
|
|
|
|
",\n")
|
2016-03-19 19:51:57 +00:00
|
|
|
qlocaleh_temp_file.write("\n")
|
2011-04-27 10:05:43 +00:00
|
|
|
qlocaleh_temp_file.write(" LastScript = " + script + "\n")
|
2020-01-09 19:36:58 +00:00
|
|
|
qlocaleh_temp_file.write(" };\n\n")
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
# Country enum
|
|
|
|
qlocaleh_temp_file.write(" enum Country {\n")
|
2018-08-13 12:32:18 +00:00
|
|
|
country = None
|
|
|
|
for key, value in country_map.items():
|
|
|
|
country = fixedCountryName(value[0], dupes)
|
2011-04-27 10:05:43 +00:00
|
|
|
qlocaleh_temp_file.write(" " + country + " = " + str(key) + ",\n")
|
2018-08-13 12:32:18 +00:00
|
|
|
qlocaleh_temp_file.write("\n " +
|
|
|
|
",\n ".join('%s = %s' % pair
|
|
|
|
for pair in sorted(country_aliases.items())) +
|
|
|
|
",\n")
|
2016-03-19 19:51:57 +00:00
|
|
|
qlocaleh_temp_file.write("\n")
|
2011-04-27 10:05:43 +00:00
|
|
|
qlocaleh_temp_file.write(" LastCountry = " + country + "\n")
|
|
|
|
qlocaleh_temp_file.write(" };\n")
|
|
|
|
|
|
|
|
qlocaleh_temp_file.write(GENERATED_BLOCK_END)
|
|
|
|
s = qlocaleh_file.readline()
|
2017-05-12 09:59:18 +00:00
|
|
|
# skip until end of the old block
|
2011-04-27 10:05:43 +00:00
|
|
|
while s and s != GENERATED_BLOCK_END:
|
|
|
|
s = qlocaleh_file.readline()
|
|
|
|
|
|
|
|
s = qlocaleh_file.readline()
|
|
|
|
while s:
|
|
|
|
qlocaleh_temp_file.write(s)
|
|
|
|
s = qlocaleh_file.readline()
|
|
|
|
qlocaleh_temp_file.close()
|
|
|
|
qlocaleh_file.close()
|
|
|
|
|
2019-05-27 17:13:54 +00:00
|
|
|
os.remove(qtsrcdir + "/src/corelib/text/qlocale.h")
|
|
|
|
os.rename(qlocaleh_temp_file_path, qtsrcdir + "/src/corelib/text/qlocale.h")
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
# qlocale.qdoc
|
|
|
|
|
|
|
|
(qlocaleqdoc_temp_file, qlocaleqdoc_temp_file_path) = tempfile.mkstemp("qlocale.qdoc", dir=qtsrcdir)
|
|
|
|
qlocaleqdoc_temp_file = os.fdopen(qlocaleqdoc_temp_file, "w")
|
2019-05-27 17:13:54 +00:00
|
|
|
qlocaleqdoc_file = open(qtsrcdir + "/src/corelib/text/qlocale.qdoc", "r")
|
2011-04-27 10:05:43 +00:00
|
|
|
s = qlocaleqdoc_file.readline()
|
2018-08-13 12:32:18 +00:00
|
|
|
DOCSTRING = " QLocale's data is based on Common Locale Data Repository "
|
2011-04-27 10:05:43 +00:00
|
|
|
while s:
|
|
|
|
if DOCSTRING in s:
|
|
|
|
qlocaleqdoc_temp_file.write(DOCSTRING + "v" + cldr_version + ".\n")
|
|
|
|
else:
|
|
|
|
qlocaleqdoc_temp_file.write(s)
|
|
|
|
s = qlocaleqdoc_file.readline()
|
|
|
|
qlocaleqdoc_temp_file.close()
|
|
|
|
qlocaleqdoc_file.close()
|
|
|
|
|
2019-05-27 17:13:54 +00:00
|
|
|
os.remove(qtsrcdir + "/src/corelib/text/qlocale.qdoc")
|
|
|
|
os.rename(qlocaleqdoc_temp_file_path, qtsrcdir + "/src/corelib/text/qlocale.qdoc")
|
2011-04-27 10:05:43 +00:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|