aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Chromium Automerger <chromium-automerger@android>2014-04-03 15:03:02 +0000
committerAndroid Chromium Automerger <chromium-automerger@android>2014-04-03 15:03:02 +0000
commit7bb353b32e503652cab6087a93042bbc7ab2f4b6 (patch)
tree771ef406967cb68d25d99fd1ab4ea550e027928f
parent4cebc3a557e2390264e09fb4c63337a52f89f54a (diff)
parentbf1b697651f655172662c9defd04e323089477d2 (diff)
downloadgrit-7bb353b32e503652cab6087a93042bbc7ab2f4b6.tar.gz
Merge tools/grit from https://chromium.googlesource.com/external/grit-i18n.git at bf1b697651f655172662c9defd04e323089477d2
This commit was generated by merge_from_chromium.py. Change-Id: I4844601ec276955ae8ca32d4e4261c29cc4ba22c
-rwxr-xr-xgrit/format/data_pack.py113
-rw-r--r--grit/format/data_pack_unittest.py29
-rwxr-xr-xgrit/format/repack.py22
3 files changed, 119 insertions, 45 deletions
diff --git a/grit/format/data_pack.py b/grit/format/data_pack.py
index 0cdbbd8..779a862 100755
--- a/grit/format/data_pack.py
+++ b/grit/format/data_pack.py
@@ -3,9 +3,9 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-'''Support for formatting a data pack file used for platform agnostic resource
+"""Support for formatting a data pack file used for platform agnostic resource
files.
-'''
+"""
import collections
import exceptions
@@ -19,7 +19,6 @@ from grit import util
from grit.node import include
from grit.node import message
from grit.node import structure
-from grit.node import misc
PACK_FILE_VERSION = 4
@@ -37,7 +36,7 @@ DataPackContents = collections.namedtuple(
def Format(root, lang='en', output_dir='.'):
- '''Writes out the data pack file format (platform agnostic resource file).'''
+ """Writes out the data pack file format (platform agnostic resource file)."""
data = {}
for node in root.ActiveDescendants():
with node:
@@ -55,9 +54,9 @@ def ReadDataPack(input_file):
original_data = data
# Read the header.
- version, num_entries, encoding = struct.unpack("<IIB", data[:HEADER_LENGTH])
+ version, num_entries, encoding = struct.unpack('<IIB', data[:HEADER_LENGTH])
if version != PACK_FILE_VERSION:
- print "Wrong file version in ", input_file
+ print 'Wrong file version in ', input_file
raise WrongFileVersion
resources = {}
@@ -68,22 +67,21 @@ def ReadDataPack(input_file):
data = data[HEADER_LENGTH:]
kIndexEntrySize = 2 + 4 # Each entry is a uint16 and a uint32.
for _ in range(num_entries):
- id, offset = struct.unpack("<HI", data[:kIndexEntrySize])
+ id, offset = struct.unpack('<HI', data[:kIndexEntrySize])
data = data[kIndexEntrySize:]
- next_id, next_offset = struct.unpack("<HI", data[:kIndexEntrySize])
+ next_id, next_offset = struct.unpack('<HI', data[:kIndexEntrySize])
resources[id] = original_data[offset:next_offset]
return DataPackContents(resources, encoding)
def WriteDataPackToString(resources, encoding):
- """Write a map of id=>data into a string in the data pack format and return
- it."""
+ """Returns a string with a map of id=>data in the data pack format."""
ids = sorted(resources.keys())
ret = []
# Write file header.
- ret.append(struct.pack("<IIB", PACK_FILE_VERSION, len(ids), encoding))
+ ret.append(struct.pack('<IIB', PACK_FILE_VERSION, len(ids), encoding))
HEADER_LENGTH = 2 * 4 + 1 # Two uint32s and one uint8.
# Each entry is a uint16 + a uint32s. We have one extra entry for the last
@@ -93,10 +91,10 @@ def WriteDataPackToString(resources, encoding):
# Write index.
data_offset = HEADER_LENGTH + index_length
for id in ids:
- ret.append(struct.pack("<HI", id, data_offset))
+ ret.append(struct.pack('<HI', id, data_offset))
data_offset += len(resources[id])
- ret.append(struct.pack("<HI", 0, data_offset))
+ ret.append(struct.pack('<HI', 0, data_offset))
# Write data.
for id in ids:
@@ -105,39 +103,78 @@ def WriteDataPackToString(resources, encoding):
def WriteDataPack(resources, output_file, encoding):
- """Write a map of id=>data into output_file as a data pack."""
+ """Writes a map of id=>data into output_file as a data pack."""
content = WriteDataPackToString(resources, encoding)
- with open(output_file, "wb") as file:
+ with open(output_file, 'wb') as file:
file.write(content)
-def RePack(output_file, input_files):
- """Write a new data pack to |output_file| based on a list of filenames
- (|input_files|)"""
+def RePack(output_file, input_files, whitelist_file=None):
+ """Write a new data pack file by combining input pack files.
+
+ Args:
+ output_file: path to the new data pack file.
+ input_files: a list of paths to the data pack files to combine.
+ whitelist_file: path to the file that contains the list of resource IDs
+ that should be kept in the output file or None to include
+ all resources.
+
+ Raises:
+ KeyError: if there are duplicate keys or resource encoding is
+ inconsistent.
+ """
+ input_data_packs = [ReadDataPack(filename) for filename in input_files]
+ whitelist = None
+ if whitelist_file:
+ whitelist = util.ReadFile(whitelist_file, util.RAW_TEXT).strip().split('\n')
+ whitelist = map(int, whitelist)
+ resources, encoding = RePackFromDataPackStrings(input_data_packs, whitelist)
+ WriteDataPack(resources, output_file, encoding)
+
+
+def RePackFromDataPackStrings(inputs, whitelist):
+ """Returns a data pack string that combines the resources from inputs.
+
+ Args:
+ inputs: a list of data pack strings that need to be combined.
+ whitelist: a list of resource IDs that should be kep in the output string
+ or None to include all resources.
+
+ Returns:
+ DataPackContents: a tuple containing the new combined data pack and its
+ encoding.
+
+ Raises:
+ KeyError: if there are duplicate keys or resource encoding is
+ inconsistent.
+ """
resources = {}
encoding = None
- for filename in input_files:
- new_content = ReadDataPack(filename)
-
+ for content in inputs:
# Make sure we have no dups.
- duplicate_keys = set(new_content.resources.keys()) & set(resources.keys())
- if len(duplicate_keys) != 0:
- raise exceptions.KeyError("Duplicate keys: " + str(list(duplicate_keys)))
+ duplicate_keys = set(content.resources.keys()) & set(resources.keys())
+ if duplicate_keys:
+ raise exceptions.KeyError('Duplicate keys: ' + str(list(duplicate_keys)))
# Make sure encoding is consistent.
if encoding in (None, BINARY):
- encoding = new_content.encoding
- elif new_content.encoding not in (BINARY, encoding):
- raise exceptions.KeyError("Inconsistent encodings: " +
- str(encoding) + " vs " +
- str(new_content.encoding))
-
- resources.update(new_content.resources)
+ encoding = content.encoding
+ elif content.encoding not in (BINARY, encoding):
+ raise exceptions.KeyError('Inconsistent encodings: ' + str(encoding) +
+ ' vs ' + str(content.encoding))
+
+ if whitelist:
+ whitelisted_resources = dict([(key, content.resources[key])
+ for key in content.resources.keys()
+ if key in whitelist])
+ resources.update(whitelisted_resources)
+ else:
+ resources.update(content.resources)
# Encoding is 0 for BINARY, 1 for UTF8 and 2 for UTF16
if encoding is None:
encoding = BINARY
- WriteDataPack(resources, output_file, encoding)
+ return DataPackContents(resources, encoding)
# Temporary hack for external programs that import data_pack.
@@ -157,14 +194,14 @@ def main():
data = ReadDataPack(sys.argv[1])
print data.encoding
for (resource_id, text) in data.resources.iteritems():
- print "%s: %s" % (resource_id, text)
+ print '%s: %s' % (resource_id, text)
else:
# Just write a simple file.
- data = { 1: "", 4: "this is id 4", 6: "this is id 6", 10: "" }
- WriteDataPack(data, "datapack1.pak", UTF8)
- data2 = { 1000: "test", 5: "five" }
- WriteDataPack(data2, "datapack2.pak", UTF8)
- print "wrote datapack1 and datapack2 to current directory."
+ data = {1: '', 4: 'this is id 4', 6: 'this is id 6', 10: ''}
+ WriteDataPack(data, 'datapack1.pak', UTF8)
+ data2 = {1000: 'test', 5: 'five'}
+ WriteDataPack(data2, 'datapack2.pak', UTF8)
+ print 'wrote datapack1 and datapack2 to current directory.'
if __name__ == '__main__':
diff --git a/grit/format/data_pack_unittest.py b/grit/format/data_pack_unittest.py
index d210c99..f6e9edc 100644
--- a/grit/format/data_pack_unittest.py
+++ b/grit/format/data_pack_unittest.py
@@ -28,10 +28,37 @@ class FormatDataPackUnittest(unittest.TestCase):
'\x0a\x00\x3f\x00\x00\x00' # index entry 10
'\x00\x00\x3f\x00\x00\x00' # extra entry for the size of last
'this is id 4this is id 6') # data
- input = { 1: "", 4: "this is id 4", 6: "this is id 6", 10: "" }
+ input = {1: '', 4: 'this is id 4', 6: 'this is id 6', 10: ''}
output = data_pack.WriteDataPackToString(input, data_pack.UTF8)
self.failUnless(output == expected)
+ def testRePackUnittest(self):
+ expected_with_whitelist = {
+ 1: 'Never gonna', 10: 'give you up', 20: 'Never gonna let',
+ 30: 'you down', 40: 'Never', 50: 'gonna run around and',
+ 60: 'desert you'}
+ expected_without_whitelist = {
+ 1: 'Never gonna', 10: 'give you up', 20: 'Never gonna let', 65: 'Close',
+ 30: 'you down', 40: 'Never', 50: 'gonna run around and', 4: 'click',
+ 60: 'desert you', 6: 'chirr', 32: 'oops, try again', 70: 'Awww, snap!'}
+ inputs = [{1: 'Never gonna', 4: 'click', 6: 'chirr', 10: 'give you up'},
+ {20: 'Never gonna let', 30: 'you down', 32: 'oops, try again'},
+ {40: 'Never', 50: 'gonna run around and', 60: 'desert you'},
+ {65: 'Close', 70: 'Awww, snap!'}]
+ whitelist = [1, 10, 20, 30, 40, 50, 60]
+ inputs = [data_pack.DataPackContents(input, data_pack.UTF8) for input
+ in inputs]
+
+ # RePack using whitelist
+ output, _ = data_pack.RePackFromDataPackStrings(inputs, whitelist)
+ self.assertDictEqual(expected_with_whitelist, output,
+ 'Incorrect resource output')
+
+ # RePack a None whitelist
+ output, _ = data_pack.RePackFromDataPackStrings(inputs, None)
+ self.assertDictEqual(expected_without_whitelist, output,
+ 'Incorrect resource output')
+
if __name__ == '__main__':
unittest.main()
diff --git a/grit/format/repack.py b/grit/format/repack.py
index e42acdb..337b7af 100755
--- a/grit/format/repack.py
+++ b/grit/format/repack.py
@@ -9,19 +9,29 @@ http://dev.chromium.org/developers/design-documents/linuxresourcesandlocalizedst
for details about the file format.
"""
+import optparse
import os
import sys
+
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import grit.format.data_pack
+
def main(argv):
- if len(argv) < 3:
- print ("Usage:\n %s <output_filename> <input_file1> [input_file2] ... " %
- argv[0])
- sys.exit(-1)
- grit.format.data_pack.RePack(argv[1], argv[2:])
+ parser = optparse.OptionParser('usage: %prog [options] <output_filename>'
+ '<input_file1> [input_file2] ...')
+ parser.add_option('--whitelist', action='store', dest='whitelist',
+ default=None, help='Full path to the whitelist used to'
+ 'filter output pak file resource IDs')
+ options, file_paths = parser.parse_args(argv)
+
+ if len(file_paths) < 2:
+ parser.error('Please specify output and at least one input filenames')
+
+ grit.format.data_pack.RePack(file_paths[0], file_paths[1:],
+ whitelist_file=options.whitelist)
if '__main__' == __name__:
- main(sys.argv)
+ main(sys.argv[1:])