Delete bpt

This project hasn't been in our manifest in a long time, and
the only usage of bpttool we had was deleted in aosp/2654584.

A partner was asking about this repository, so detete it just to clear
up any confusion.

The tool is also written in python 2, which we want to remove.

Test: Presubmits
Change-Id: Ibb4d38b3e0661f787096d07924f929174cddd41b
diff --git a/Android.mk b/Android.mk
deleted file mode 100644
index f5b8506..0000000
--- a/Android.mk
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := bpttool
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE := bpttool
-include $(BUILD_PREBUILT)
-
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := bpt_test
-LOCAL_MODULE_TAGS := debug
-include $(BUILD_HOST_PREBUILT)
diff --git a/README b/README
deleted file mode 100644
index 67c0b43..0000000
--- a/README
+++ /dev/null
@@ -1,253 +0,0 @@
-This directory contains a tool for partitioning disk images for Brillo
-and Android.
-
--- FILES AND DIRECTORIES
-
- Android.mk
-
-   Build instructions.
-
- bpttool
-
-   A tool written in Python for partioning disk images.
-
- bpttool_test
-
-   Unit tests for bpttool.
-
- test/
-
-   Contains test data used in unit tests.
-
--- PARTITION DEFINITION FILES
-
-Partitioning directives are expressed in JSON and the file extension
-".bpt" is typically used. A typical .bpt-file looks like this
-
-  {
-      "settings": {
-          "disk_size": "4 GiB"
-      },
-      "partitions": [
-          {
-              "ab": true,
-              "label": "boot",
-              "size": "32 MiB",
-              "guid": "auto",
-              "type_guid": "brillo_boot"
-          },
-          {
-              "ab": true,
-              "label": "system",
-              "size": "512 MiB",
-              "guid": "auto",
-              "type_guid": "brillo_system"
-          },
-          [...]
-          {
-              "label": "userdata",
-              "grow": true,
-              "guid": "auto",
-              "type_guid": "brillo_userdata"
-          }
-      ]
-  }
-
-The two root objects are "settings" and "partitions". Well-known
-key/value-pairs for "settings" are
-
- ab_suffixes:    List of suffixes to use for A/B partitions, for example
-                 ["-A", "-B"] or ["0", "1", "2"]. Default is ["_a",
-                 "_b"].
-
- disk_size:      The size of the target disk to use, in bytes. This has
-                 no default value.
-
- partitions_offset_begin :
-                 The size of the disk partitions offset begin, in bytes.
-                 Default is 0.
-
- disk_alignment: The alignment, in bytes, to use when laying out
-                 partitions. Default is 4096.
-
- disk_guid:      The GUID to use for the disk or 'auto' to make bpttool
-                 generate it. The default is 'auto'.
-
-Each of these settings can be overriden or set using command-line
-options in bpttool.
-
-The "partitions" object is an array of objects with the following
-well-known key/value pairs:
-
- label:       The label of the partition. This must be able to fit in 36
-              UTF-16 code-points.
-
- offset:      Offset of the partition, in bytes. This will always be
-              re-calculated when using bpttool.
-
- size:        Size of the partition, in bytes.
-
- grow:        If 'true', the partition will be grown to use available free
-              space. Default value is 'false'.
-
- guid:        The GPT instance GUID. Default value is 'auto'.
-
- type_guid:   The GPT type GUID. A RFC-4122 style GUID is accepted as
-              are the following special values:
-
-                brillo_boot
-                brillo_bootloader
-                brillo_system
-                brillo_odm
-                brillo_oem
-                brillo_userdata
-                brillo_misc
-                brillo_vbmeta
-                brillo_vendor_specific
-                linux_fs
-                ms_basic_data
-
-              for well-known GPT partition GUIDs. If unset, the value
-              'brillo_vendor_specific' is used.
-
- flags:       A 64-bit integer (decimal or hexadecimal representations are
-              accepted) for GPT flags. Default value is 0.
-
- persist:     If 'true', the parition will be marked persistant. This will
-              set bit 0 of flags to 1. This will OR with the value of
-              'flags'.
-
- ignore:      If 'true', the partition will not be included in the final
-              output.
-
- ab:          Set to 'true' if the partition should be expanded for A/B.
-              Default value is 'false'.
-
- ab_expanded: Set to 'true' only if this partition has already been
-              expanded for A/B. Default value is 'false'.
-
- position:    The position for the partition, used to determine the sequence
-              of partitions (and, indirectly, partition numbers) or 0 if
-              position doesn't matter. Partitions with low 'position'
-              numbers will be laid out before partitions with high
-              'position' numbers. Default value is 0.
-
-For key/value-pairs involving sizes, either integers can be used, e.g.
-
- "size": 1048576
-
-means 1 mebibyte. Strings with base-10 units (kB, MB, GB, TB, PB) and
-base-2 units (KiB, MiB, GiB, TiB, PiB) are also supported. For
-example:
-
- "size": "1 MiB"
-
-means 1,048,576 bytes and
-
- "size": "1 MB"
-
-means 1,000,000 bytes.
-
-If a partition size is not a multiple of the disk sector size, it will
-be rounded up and if a disk size is not a multiple, it will be rounded
-down.
-
-The bpttool program reads one or more of .bpt-files and the order
-matters. Partitions are identified by label and if a partition has
-already been mentioned in a previous file, directives in the latter
-file will override the existing entry. This allows for setups where
-e.g. a file with the following content
-
-  {
-      "partitions": [
-          {
-              "label": "system",
-              "size": "128 MiB"
-          }
-      ]
-  }
-
-can be used on top to specify that system partitions should be 128 MiB
-instead of 512 MiB. Similarly, a file with the content
-
-  {
-      "partitions": [
-          {
-              "label": "my_app_data",
-              "size": "512 MiB",
-              "type_guid": "linux_fs"
-          }
-      ]
-  }
-
-can be used on top to have a new 512 MiB partition "my_app_data" in
-addition to existing partitions. Notably the "userdata" data partition
-will be shrunk in order to make room for the newly added partition.
-
-Additionally, 'bpttool make_table' generates a .bpt-file - in addition
-to the binary GPT partition tables - that is complete in the sense
-that it can be used as input to generate the same output without
-additional command-line options.
-
-Also, expanded A/B partitions in input files are folded and then
-expanded again. This allows for setups as the following:
-
- $ bpttool make_table \
-    --input prev_output.bpt \
-    --ab_suffixes "-A,-B,-C" \
-    --output_json new_output.bpt
-    [...]
-
-where if prev_output.bpt contained the partitions "system_a",
-"system_b" (for the default A/B suffixes) then new_output.bpt would
-contain partitions "system-A", "system-B", and "system-C".
-
--- DISK IMAGE GENERATION
-
-Disk images may be created given an unfolded .bpt file. 'bpttool
-make_disk_image' generates the output disk image file.
-
-To generate a disk image, use the following subcommand:
-
-  $ bpttool make_disk_image \
-      --output disk-image.bin \
-      --input /path/to/bpt-file.bpt \
-      --image system_a:/path/to/system.img \
-      --image boot_a:/path/to/boot.img \
-      [...]
-
-where the 'output' argument specifies the name and location of the outputted
-disk image and the 'input' argument is the .bpt file containing valid labels and
-offsets for each partition.  The 'image' argument specifies a mapping  from
-partition name/label to the path of the corresponding image partition image.
-All partitions specified in the .bpt file must be passed in via the 'image'
-argument.
-
-Typically, each of the 'image' argument files are located in the
-ANDROID_PRODUCT_OUT directory after a build is complete.
-
--- BUILD SYSTEM INTEGRATION NOTES
-
-To generate partition tables in the Android build system, simply add
-the path to a .bpt file to the BOARD_BPT_INPUT_FILES variable.
-
- BOARD_BPT_INPUT_FILES += "hardware/bsp/vendor/soc/board/board-specific.bpt"
-
-The variable BOARD_BPT_DISK_SIZE can be used to specify or override
-the disk size, for example:
-
- BOARD_BPT_DISK_SIZE := "10 GiB"
-
-Additional arguments to 'bpttool make_table' can be specified in the
-variable BOARD_BPT_MAKE_TABLE_ARGS.
-
-If BOARD_BPT_INPUT_FILES is set, the build system generates two files
-
- partition-table.img
- partition-table.bpt
-
-in ${ANDROID_PRODUCT_OUT} using 'bpttool make_table'. The former is
-the binary partition tables generated using bptool's --output_gpt
-option and the latter is a JSON file generated using the --output_json
-option. These files will also be put in the IMAGES/ directory of
-target-files.zip when running 'm dist'.
diff --git a/bpt_test b/bpt_test
deleted file mode 100755
index f79146b..0000000
--- a/bpt_test
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2016, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from bpt_unittest import *
-import unittest
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/bpt_unittest.py b/bpt_unittest.py
deleted file mode 100755
index 3266a7e..0000000
--- a/bpt_unittest.py
+++ /dev/null
@@ -1,705 +0,0 @@
-#!/usr/bin/python
-
-# Copyright 2016, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Unit-test for bpttool."""
-
-
-import imp
-import json
-import os
-import sys
-import tempfile
-import unittest
-
-sys.dont_write_bytecode = True
-bpttool = imp.load_source('bpttool', './bpttool')
-
-
-class FakeGuidGenerator(object):
-  """A GUID generator that dispenses predictable GUIDs.
-
-  This is used in place of the usual GUID generator in bpttool which
-  is generating random GUIDs as per RFC 4122.
-  """
-
-  def dispense_guid(self, partition_number):
-    """Dispenses a new GUID."""
-
-    uuid = '01234567-89ab-cdef-0123-%012x' % partition_number
-    return uuid
-
-class PatternPartition(object):
-  """A partition image file containing a predictable pattern.
-
-  This holds file data about a partition image file for binary pattern.
-  testing.
-  """
-  def __init__(self, char='', file=None, partition_name=None, obj=None):
-    self.char = char
-    self.file = file
-    self.partition_name = partition_name
-    self.obj = obj
-
-class RoundToMultipleTest(unittest.TestCase):
-  """Unit tests for the RoundToMultiple() function."""
-
-  def testRoundUp(self):
-    """Checks that we round up correctly."""
-    self.assertEqual(bpttool.RoundToMultiple(100, 10), 100)
-    self.assertEqual(bpttool.RoundToMultiple(189, 10), 190)
-    self.assertEqual(bpttool.RoundToMultiple(190, 10), 190)
-    self.assertEqual(bpttool.RoundToMultiple(191, 10), 200)
-    self.assertEqual(bpttool.RoundToMultiple(199, 10), 200)
-    self.assertEqual(bpttool.RoundToMultiple(200, 10), 200)
-    self.assertEqual(bpttool.RoundToMultiple(201, 10), 210)
-    self.assertEqual(bpttool.RoundToMultiple(-18, 10), -10)
-    self.assertEqual(bpttool.RoundToMultiple(-19, 10), -10)
-    self.assertEqual(bpttool.RoundToMultiple(-20, 10), -20)
-    self.assertEqual(bpttool.RoundToMultiple(-21, 10), -20)
-
-  def testRoundDown(self):
-    """Checks that we round down correctly."""
-    self.assertEqual(bpttool.RoundToMultiple(100, 10, True), 100)
-    self.assertEqual(bpttool.RoundToMultiple(189, 10, True), 180)
-    self.assertEqual(bpttool.RoundToMultiple(190, 10, True), 190)
-    self.assertEqual(bpttool.RoundToMultiple(191, 10, True), 190)
-    self.assertEqual(bpttool.RoundToMultiple(199, 10, True), 190)
-    self.assertEqual(bpttool.RoundToMultiple(200, 10, True), 200)
-    self.assertEqual(bpttool.RoundToMultiple(201, 10, True), 200)
-    self.assertEqual(bpttool.RoundToMultiple(-18, 10, True), -20)
-    self.assertEqual(bpttool.RoundToMultiple(-19, 10, True), -20)
-    self.assertEqual(bpttool.RoundToMultiple(-20, 10, True), -20)
-    self.assertEqual(bpttool.RoundToMultiple(-21, 10, True), -30)
-
-
-class ParseSizeTest(unittest.TestCase):
-  """Unit tests for the ParseSize() function."""
-
-  def testIntegers(self):
-    """Checks parsing of integers."""
-    self.assertEqual(bpttool.ParseSize(123), 123)
-    self.assertEqual(bpttool.ParseSize(17179869184), 1<<34)
-    self.assertEqual(bpttool.ParseSize(0x1000), 4096)
-
-  def testRawNumbers(self):
-    """Checks parsing of raw numbers."""
-    self.assertEqual(bpttool.ParseSize('123'), 123)
-    self.assertEqual(bpttool.ParseSize('17179869184'), 1<<34)
-    self.assertEqual(bpttool.ParseSize('0x1000'), 4096)
-
-  def testDecimalUnits(self):
-    """Checks that decimal size units are interpreted correctly."""
-    self.assertEqual(bpttool.ParseSize('1 kB'), pow(10, 3))
-    self.assertEqual(bpttool.ParseSize('1 MB'), pow(10, 6))
-    self.assertEqual(bpttool.ParseSize('1 GB'), pow(10, 9))
-    self.assertEqual(bpttool.ParseSize('1 TB'), pow(10, 12))
-    self.assertEqual(bpttool.ParseSize('1 PB'), pow(10, 15))
-
-  def testBinaryUnits(self):
-    """Checks that binary size units are interpreted correctly."""
-    self.assertEqual(bpttool.ParseSize('1 KiB'), pow(2, 10))
-    self.assertEqual(bpttool.ParseSize('1 MiB'), pow(2, 20))
-    self.assertEqual(bpttool.ParseSize('1 GiB'), pow(2, 30))
-    self.assertEqual(bpttool.ParseSize('1 TiB'), pow(2, 40))
-    self.assertEqual(bpttool.ParseSize('1 PiB'), pow(2, 50))
-
-  def testFloatAndUnits(self):
-    """Checks that floating point numbers can be used with units."""
-    self.assertEqual(bpttool.ParseSize('0.5 kB'), 500)
-    self.assertEqual(bpttool.ParseSize('0.5 KiB'), 512)
-    self.assertEqual(bpttool.ParseSize('0.5 GB'), 500000000)
-    self.assertEqual(bpttool.ParseSize('0.5 GiB'), 536870912)
-    self.assertEqual(bpttool.ParseSize('0.1 MiB'), 104858)
-
-class MakeDiskImageTest(unittest.TestCase):
-  """Unit tests for 'bpttool make_disk_image'."""
-
-  def setUp(self):
-    """Set-up method."""
-    self.bpt = bpttool.Bpt()
-
-  def _BinaryPattern(self, bpt_file_name, partition_patterns):
-    """Checks that a binary pattern may be written to a specified partition.
-
-    This checks individual partion image writes to portions of a disk.  Known
-    patterns are written into certain partitions and are verified after each
-    pattern has been written to.
-
-    Arguments:
-      bpt_file_name: File name of bpt JSON containing partition information.
-      partition_patterns: List of tuples with each tuple having partition name
-                          as the first argument, and character pattern as the
-                          second argument.
-
-    """
-    bpt_file = open(bpt_file_name, 'r')
-    partitions_string, _ = self.bpt.make_table([bpt_file])
-    bpt_tmp = tempfile.NamedTemporaryFile()
-    bpt_tmp.write(partitions_string)
-    bpt_tmp.seek(0)
-    partitions, _ = self.bpt._read_json([bpt_tmp])
-
-    # Declare list of partition images to be written and compared on disk.
-    pattern_images = [PatternPartition(
-                      char=pp[1],
-                      file=tempfile.NamedTemporaryFile(),
-                      partition_name=pp[0])
-                      for pp in partition_patterns]
-
-    # Store partition object and write a known character pattern image.
-    for pi in pattern_images:
-      pi.obj = [p for p in partitions if str(p.label) == pi.partition_name][0]
-      pi.file.write(bytearray(pi.char * int(pi.obj.size)))
-
-    # Create the disk containing the partition filled with a known character
-    # pattern, seek to it's position and compare it to the supposed pattern.
-    with tempfile.NamedTemporaryFile() as generated_disk_image:
-      bpt_tmp.seek(0)
-      self.bpt.make_disk_image(generated_disk_image,
-                               bpt_tmp,
-                               [p.partition_name + ':' + p.file.name
-                                for p in pattern_images])
-
-      for pi in pattern_images:
-        generated_disk_image.seek(pi.obj.offset)
-        pi.file.seek(0)
-
-        self.assertEqual(generated_disk_image.read(pi.obj.size),
-                    pi.file.read())
-        pi.file.close()
-
-    bpt_file.close()
-    bpt_tmp.close()
-
-  def _LargeBinary(self, bpt_file_name):
-    """Helper function to write large partition images to disk images.
-
-    This is a simple call to make_disk_image, passing a large in an image
-    which exceeds the it's size as specfied in the bpt file.
-
-    Arguments:
-      bpt_file_name: File name of bpt JSON containing partition information.
-
-    """
-    with open(bpt_file_name, 'r') as bpt_file, \
-         tempfile.NamedTemporaryFile() as bpt_tmp, \
-         tempfile.NamedTemporaryFile() as generated_disk_image, \
-         tempfile.NamedTemporaryFile() as large_partition_image:
-        partitions_string, _ = self.bpt.make_table([bpt_file])
-        bpt_tmp.write(partitions_string)
-        bpt_tmp.seek(0)
-        partitions, _ = self.bpt._read_json([bpt_tmp])
-
-        # Create the over-sized partition image.
-        large_partition_image.write(bytearray('0' *
-          int(1.1*partitions[0].size + 1)))
-
-        bpt_tmp.seek(0)
-
-        # Expect exception here.
-        self.bpt.make_disk_image(generated_disk_image, bpt_tmp,
-          [p.label + ':' + large_partition_image.name for p in partitions])
-
-  def testBinaryPattern(self):
-    """Checks patterns written to partitions on disk images."""
-    self._BinaryPattern('test/pattern_partition_single.bpt', [('charlie', 'c')])
-    self._BinaryPattern('test/pattern_partition_multi.bpt', [('alpha', 'a'),
-                        ('beta', 'b')])
-
-  def testExceedPartitionSize(self):
-    """Checks that exceedingly large partition images are not accepted."""
-    try:
-      self._LargeBinary('test/pattern_partition_exceed_size.bpt')
-    except bpttool.BptError as e:
-      assert 'exceeds the partition size' in e.message
-
-  def testSparseImage(self):
-    """Checks that sparse input is unsparsified."""
-    bpt_file = open('test/test_sparse_image.bpt', 'r')
-    bpt_json, _ = self.bpt.make_table([bpt_file])
-    bpt_json_file = tempfile.NamedTemporaryFile()
-    bpt_json_file.write(bpt_json)
-    bpt_json_file.seek(0)
-    partitions, _ = self.bpt._read_json([bpt_json_file])
-
-    # Generate a disk image where one of the inputs is a sparse disk
-    # image. See below for details about test/test_file.bin and
-    # test/test_file.bin.sparse.
-    generated_disk_image = tempfile.NamedTemporaryFile()
-    bpt_json_file.seek(0)
-    self.bpt.make_disk_image(generated_disk_image,
-                             bpt_json_file,
-                             ['sparse_data:test/test_file.bin.sparse'])
-
-    # Get offset and size of the generated partition.
-    part = json.loads(bpt_json)['partitions'][0]
-    part_offset = int(part['offset'])
-    part_size = int(part['size'])
-
-    # Load the unsparsed data.
-    unsparse_file = open('test/test_file.bin', 'r')
-    unsparse_data = unsparse_file.read()
-    unsparse_size = unsparse_file.tell()
-
-    # Check that the unsparse image doesn't take up all the space.
-    self.assertLess(unsparse_size, part_size)
-
-    # Check that the sparse image was unsparsified correctly.
-    generated_disk_image.seek(part_offset)
-    disk_image_data = generated_disk_image.read(unsparse_size)
-    self.assertItemsEqual(disk_image_data, unsparse_data)
-
-    # Check that the remainder of the partition has zeroes.
-    trailing_size = part_size - unsparse_size
-    trailing_data = generated_disk_image.read(trailing_size)
-    self.assertItemsEqual(trailing_data, '\0'*trailing_size)
-
-
-class MakeTableTest(unittest.TestCase):
-  """Unit tests for 'bpttool make_table'."""
-
-  def setUp(self):
-    """Reset GUID generator for every test."""
-    self.fake_guid_generator = FakeGuidGenerator()
-
-  def _MakeTable(self, input_file_names,
-                 expected_json_file_name,
-                 expected_gpt_file_name=None,
-                 partitions_offset_begin=None,
-                 disk_size=None,
-                 disk_alignment=None,
-                 disk_guid=None,
-                 ab_suffixes=None):
-    """Helper function to create partition tables.
-
-    This is a simple wrapper for Bpt.make_table() that compares its
-    output with an expected output when given a set of inputs.
-
-    Arguments:
-      input_file_names: A list of .bpt files to process.
-      expected_json_file_name: File name of the file with expected JSON output.
-      expected_gpt_file_name: File name of the file with expected binary
-                              output or None.
-      partitions_offset_begin: if not None, the size of the disk
-                               partitions offset begin to use.
-      disk_size: if not None, the size of the disk to use.
-      disk_alignment: if not None, the disk alignment to use.
-      disk_guid: if not None, the disk GUID to use.
-      ab_suffixes: if not None, a list of the A/B suffixes (as a
-                   comma-separated string) to use.
-
-    """
-
-    inputs = []
-    for file_name in input_file_names:
-      inputs.append(open(file_name))
-
-    bpt = bpttool.Bpt()
-    (json_str, gpt_bin) = bpt.make_table(
-        inputs,
-        partitions_offset_begin=partitions_offset_begin,
-        disk_size=disk_size,
-        disk_alignment=disk_alignment,
-        disk_guid=disk_guid,
-        ab_suffixes=ab_suffixes,
-        guid_generator=self.fake_guid_generator)
-    self.assertEqual(json_str, open(expected_json_file_name).read())
-
-    # Check that bpttool generates same JSON if being fed output it
-    # just generated without passing any options (e.g. disk size,
-    # alignment, suffixes). This verifies that we include all
-    # necessary information in the generated JSON.
-    (json_str2, _) = bpt.make_table(
-        [open(expected_json_file_name, 'r')],
-        guid_generator=self.fake_guid_generator)
-    self.assertEqual(json_str, json_str2)
-
-    if expected_gpt_file_name:
-      self.assertEqual(gpt_bin, open(expected_gpt_file_name).read())
-
-  def testBase(self):
-    """Checks that the basic layout is as expected."""
-    self._MakeTable(['test/base.bpt'],
-                    'test/expected_json_base.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testSize(self):
-    """Checks that disk size can be changed on the command-line."""
-    self._MakeTable(['test/base.bpt'],
-                    'test/expected_json_size.bpt',
-                    disk_size=bpttool.ParseSize('20 GiB'))
-
-  def testAlignment(self):
-    """Checks that disk alignment can be changed on the command-line."""
-    self._MakeTable(['test/base.bpt'],
-                    'test/expected_json_alignment.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'),
-                    disk_alignment=1048576)
-
-  def testPartitionsOffsetBegin(self):
-    """Checks that disk partitions offset begin
-       can be changed on the command-line."""
-    self._MakeTable(['test/base.bpt'],
-                    'test/expected_json_partitions_offset_begin.bpt',
-                    partitions_offset_begin=bpttool.ParseSize('1 MiB'),
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testDiskGuid(self):
-    """Checks that disk GUID can be changed on the command-line."""
-    self._MakeTable(['test/base.bpt'],
-                    'test/expected_json_disk_guid.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'),
-                    disk_guid='01234567-89ab-cdef-0123-00000000002a')
-
-  def testPersist(self):
-    """Checks that persist flags are generated"""
-    self._MakeTable(['test/persist.bpt'],
-                    'test/expected_json_persist.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testSuffixes(self):
-    """Checks that A/B-suffixes can be changed on the command-line."""
-    self._MakeTable(['test/base.bpt'],
-                    'test/expected_json_suffixes.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'),
-                    ab_suffixes='-A,-B')
-
-  def testStackedIgnore(self):
-    """Checks that partitions can be ignored through stacking."""
-    self._MakeTable(['test/base.bpt', 'test/ignore_userdata.bpt'],
-                    'test/expected_json_stacked_ignore.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testStackedSize(self):
-    """Checks that partition size can be changed through stacking."""
-    self._MakeTable(['test/base.bpt', 'test/change_odm_size.bpt'],
-                    'test/expected_json_stacked_size.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testStackedSettings(self):
-    """Checks that settings can be changed through stacking."""
-    self._MakeTable(['test/base.bpt', 'test/override_settings.bpt'],
-                    'test/expected_json_stacked_override_settings.bpt')
-
-  def testStackedNewPartition(self):
-    """Checks that a new partition can be added through stacking."""
-    self._MakeTable(['test/base.bpt', 'test/new_partition.bpt'],
-                    'test/expected_json_stacked_new_partition.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testStackedChangeFlags(self):
-    """Checks that we can change partition flags through stacking."""
-    self._MakeTable(['test/base.bpt', 'test/change_userdata_flags.bpt'],
-                    'test/expected_json_stacked_change_flags.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testStackedDisableAB(self):
-    """Checks that we can change disable A/B on partitions through stacking."""
-    self._MakeTable(['test/base.bpt', 'test/disable_ab.bpt'],
-                    'test/expected_json_stacked_disable_ab.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testStackedNewPartitionOnTop(self):
-    """Checks that we can add a new partition only given the output JSON.
-
-    This also tests that 'userdata' is shrunk (it has a size in the
-    input file 'expected_json_base.bpt') to make room for the new
-    partition. This works because the 'grow' attribute is preserved.
-    """
-    self._MakeTable(['test/expected_json_base.bpt',
-                     'test/new_partition_on_top.bpt'],
-                    'test/expected_json_stacked_new_partition_on_top.bpt')
-
-  def testStackedSizeAB(self):
-    """Checks that AB partition size can be changed given output JSON.
-
-    This verifies that we un-expand A/B partitions when importing
-    output JSON. E.g. the output JSON has system_a, system_b which is
-    rewritten to just system as part of the import.
-    """
-    self._MakeTable(['test/expected_json_base.bpt',
-                     'test/change_system_size.bpt'],
-                    'test/expected_json_stacked_change_ab_size.bpt')
-
-  def testPositionAttribute(self):
-    """Checks that it's possible to influence partition order."""
-    self._MakeTable(['test/base.bpt',
-                     'test/positions.bpt'],
-                    'test/expected_json_stacked_positions.bpt',
-                    disk_size=bpttool.ParseSize('10 GiB'))
-
-  def testBinaryOutput(self):
-    """Checks binary partition table output.
-
-    This verifies that we generate the binary partition tables
-    correctly.
-    """
-    self._MakeTable(['test/expected_json_stacked_change_flags.bpt'],
-                    'test/expected_json_stacked_change_flags.bpt',
-                    'test/expected_json_stacked_change_flags.bin')
-
-  def testFileWithSyntaxErrors(self):
-    """Check that we catch errors in JSON files in a structured way."""
-    try:
-      self._MakeTable(['test/file_with_syntax_errors.bpt'],
-                      'file_with_syntax_errors.bpt')
-    except bpttool.BptParsingError as e:
-      self.assertEqual(e.filename, 'test/file_with_syntax_errors.bpt')
-
-
-class QueryPartitionTest(unittest.TestCase):
-  """Unit tests for 'bpttool query_partition'."""
-
-  def setUp(self):
-    """Set-up method."""
-    self.bpt = bpttool.Bpt()
-
-  def testQuerySize(self):
-    """Checks query for size."""
-    self.assertEqual(
-        self.bpt.query_partition(
-            open('test/expected_json_stacked_change_flags.bpt'),
-            'userdata', 'size', False),
-        '7449042944')
-
-  def testQueryOffset(self):
-    """Checks query for offset."""
-    self.assertEqual(
-        self.bpt.query_partition(
-            open('test/expected_json_stacked_change_flags.bpt'),
-            'userdata', 'offset', False),
-        '3288354816')
-
-  def testQueryGuid(self):
-    """Checks query for GUID."""
-    self.assertEqual(
-        self.bpt.query_partition(
-            open('test/expected_json_stacked_change_flags.bpt'),
-            'userdata', 'guid', False),
-        '01234567-89ab-cdef-0123-000000000007')
-
-  def testQueryTypeGuid(self):
-    """Checks query for type GUID."""
-    self.assertEqual(
-        self.bpt.query_partition(
-            open('test/expected_json_stacked_change_flags.bpt'),
-            'userdata', 'type_guid', False),
-        '0bb7e6ed-4424-49c0-9372-7fbab465ab4c')
-
-  def testQueryFlags(self):
-    """Checks query for flags."""
-    self.assertEqual(
-        self.bpt.query_partition(
-            open('test/expected_json_stacked_change_flags.bpt'),
-            'userdata', 'flags', False),
-        '0x0420000000000000')
-
-  def testQueryPersistTrue(self):
-    """Checks query for persist."""
-    self.assertEqual(
-        self.bpt.query_partition(
-            open('test/persist.bpt'),
-            'true_persist', 'persist', False), 'True')
-
-  def testQueryPersistFalse(self):
-    """Checks query for persist."""
-    self.assertEqual(
-        self.bpt.query_partition(
-            open('test/persist.bpt'),
-            'false_persist', 'persist', False), 'False')
-
-  def testQuerySizeCollapse(self):
-    """Checks query for size when collapsing A/B partitions."""
-    self.assertEqual(
-        self.bpt.query_partition(
-            open('test/expected_json_stacked_change_flags.bpt'),
-            'odm', 'size', True),
-        '1073741824')
-
-
-# The file test_file.bin and test_file.bin.sparse are generated using
-# the following python code:
-#
-#  with open('test_file.bin', 'w+b') as f:
-#    f.write('Barfoo43'*128*12)
-#  os.system('img2simg test_file.bin test_file.bin.sparse')
-#  image = bpttool.ImageHandler('test_file.bin.sparse')
-#  image.append_dont_care(12*1024)
-#  image.append_fill('\x01\x02\x03\x04', 12*1024)
-#  image.append_raw('Foobar42'*128*12)
-#  image.append_dont_care(12*1024)
-#  del image
-#  os.system('rm -f test_file.bin')
-#  os.system('simg2img test_file.bin.sparse test_file.bin')
-#
-# and manually verified to be correct. The content of the raw and
-# sparse files are as follows (the line with "Fill with 0x04030201" is
-# a simg_dump.py bug):
-#
-# $ hexdump -C test_file.bin
-# 00000000  42 61 72 66 6f 6f 34 33  42 61 72 66 6f 6f 34 33  |Barfoo43Barfoo43|
-# *
-# 00003000  00 00 00 00 00 00 00 00  00 00 00 00 00 00 00 00  |................|
-# *
-# 00006000  01 02 03 04 01 02 03 04  01 02 03 04 01 02 03 04  |................|
-# *
-# 00009000  46 6f 6f 62 61 72 34 32  46 6f 6f 62 61 72 34 32  |Foobar42Foobar42|
-# *
-# 0000c000  00 00 00 00 00 00 00 00  00 00 00 00 00 00 00 00  |................|
-# *
-# 0000f000
-#
-# $ system/core/libsparse/simg_dump.py -v test_file.bin.sparse
-# test_file.bin.sparse: Total of 15 4096-byte output blocks in 5 input chunks.
-#             input_bytes      output_blocks
-# chunk    offset     number  offset  number
-#    1         40      12288       0       3 Raw data
-#    2      12340          0       3       3 Don't care
-#    3      12352          4       6       3 Fill with 0x04030201
-#    4      12368      12288       9       3 Raw data
-#    5      24668          0      12       3 Don't care
-#           24668                 15         End
-#
-class ImageHandler(unittest.TestCase):
-
-  TEST_FILE_SPARSE_PATH = 'test/test_file.bin.sparse'
-  TEST_FILE_PATH = 'test/test_file.bin'
-  TEST_FILE_SIZE = 61440
-  TEST_FILE_BLOCK_SIZE = 4096
-
-  def _file_contents_equal(self, path1, path2, size):
-    f1 = open(path1, 'r')
-    f2 = open(path2, 'r')
-    if f1.read(size) != f2.read(size):
-      return False
-    return True
-
-  def _file_size(self, f):
-    old_pos = f.tell()
-    f.seek(0, os.SEEK_END)
-    size = f.tell()
-    f.seek(old_pos)
-    return size
-
-  def _clone_sparse_file(self):
-    f = tempfile.NamedTemporaryFile()
-    f.write(open(self.TEST_FILE_SPARSE_PATH).read())
-    f.flush()
-    return f
-
-  def _unsparsify(self, path):
-    f = tempfile.NamedTemporaryFile()
-    os.system('simg2img {} {}'.format(path, f.name))
-    return f
-
-  def testRead(self):
-    """Checks that reading from a sparse file works as intended."""
-    ih = bpttool.ImageHandler(self.TEST_FILE_SPARSE_PATH)
-
-    # Check that we start at offset 0.
-    self.assertEqual(ih.tell(), 0)
-
-    # Check that reading advances the cursor.
-    self.assertEqual(ih.read(14), bytearray('Barfoo43Barfoo'))
-    self.assertEqual(ih.tell(), 14)
-    self.assertEqual(ih.read(2), bytearray('43'))
-    self.assertEqual(ih.tell(), 16)
-
-    # Check reading in the middle of a fill chunk gets the right data.
-    ih.seek(0x6000 + 1)
-    self.assertEqual(ih.read(4), bytearray('\x02\x03\x04\x01'))
-
-    # Check we can cross the chunk boundary correctly.
-    ih.seek(0x3000 - 10)
-    self.assertEqual(ih.read(12), bytearray('43Barfoo43\x00\x00'))
-    ih.seek(0x9000 - 3)
-    self.assertEqual(ih.read(5), bytearray('\x02\x03\x04Fo'))
-
-    # Check reading at end of file is a partial read.
-    ih.seek(0xf000 - 2)
-    self.assertEqual(ih.read(16), bytearray('\x00\x00'))
-
-  def testTruncate(self):
-    """Checks that we can truncate a sparse file correctly."""
-    # Check truncation at all possible boundaries (including start and end).
-    for size in range(0, self.TEST_FILE_SIZE + self.TEST_FILE_BLOCK_SIZE,
-                      self.TEST_FILE_BLOCK_SIZE):
-      sparse_file = self._clone_sparse_file()
-      ih = bpttool.ImageHandler(sparse_file.name)
-      ih.truncate(size)
-      unsparse_file = self._unsparsify(sparse_file.name)
-      self.assertEqual(self._file_size(unsparse_file), size)
-      self.assertTrue(self._file_contents_equal(unsparse_file.name,
-                                                self.TEST_FILE_PATH,
-                                                size))
-
-    # Check truncation to grow the file.
-    grow_size = 8192
-    sparse_file = self._clone_sparse_file()
-    ih = bpttool.ImageHandler(sparse_file.name)
-    ih.truncate(self.TEST_FILE_SIZE + grow_size)
-    unsparse_file = self._unsparsify(sparse_file.name)
-    self.assertEqual(self._file_size(unsparse_file),
-                     self.TEST_FILE_SIZE + grow_size)
-    self.assertTrue(self._file_contents_equal(unsparse_file.name,
-                                              self.TEST_FILE_PATH,
-                                              self.TEST_FILE_SIZE))
-    unsparse_file.seek(self.TEST_FILE_SIZE)
-    self.assertEqual(unsparse_file.read(), '\0'*grow_size)
-
-  def testAppendRaw(self):
-    """Checks that we can append raw data correctly."""
-    sparse_file = self._clone_sparse_file()
-    ih = bpttool.ImageHandler(sparse_file.name)
-    data = 'SomeData'*4096
-    ih.append_raw(data)
-    unsparse_file = self._unsparsify(sparse_file.name)
-    self.assertTrue(self._file_contents_equal(unsparse_file.name,
-                                              self.TEST_FILE_PATH,
-                                              self.TEST_FILE_SIZE))
-    unsparse_file.seek(self.TEST_FILE_SIZE)
-    self.assertEqual(unsparse_file.read(), data)
-
-  def testAppendFill(self):
-    """Checks that we can append fill data correctly."""
-    sparse_file = self._clone_sparse_file()
-    ih = bpttool.ImageHandler(sparse_file.name)
-    data = 'ABCD'*4096
-    ih.append_fill('ABCD', len(data))
-    unsparse_file = self._unsparsify(sparse_file.name)
-    self.assertTrue(self._file_contents_equal(unsparse_file.name,
-                                              self.TEST_FILE_PATH,
-                                              self.TEST_FILE_SIZE))
-    unsparse_file.seek(self.TEST_FILE_SIZE)
-    self.assertEqual(unsparse_file.read(), data)
-
-  def testDontCare(self):
-    """Checks that we can append DONT_CARE data correctly."""
-    sparse_file = self._clone_sparse_file()
-    ih = bpttool.ImageHandler(sparse_file.name)
-    data = '\0'*40960
-    ih.append_dont_care(len(data))
-    unsparse_file = self._unsparsify(sparse_file.name)
-    self.assertTrue(self._file_contents_equal(unsparse_file.name,
-                                              self.TEST_FILE_PATH,
-                                              self.TEST_FILE_SIZE))
-    unsparse_file.seek(self.TEST_FILE_SIZE)
-    self.assertEqual(unsparse_file.read(), data)
-
-
-if __name__ == '__main__':
-  unittest.main()
diff --git a/bpttool b/bpttool
deleted file mode 100755
index a29d737..0000000
--- a/bpttool
+++ /dev/null
@@ -1,1649 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2016, The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-"""Command-line tool for partitioning Brillo images."""
-
-
-import argparse
-import bisect
-import copy
-import json
-import math
-import numbers
-import os
-import struct
-import sys
-import uuid
-import zlib
-
-# Python 2.6 required for modern exception syntax
-if sys.hexversion < 0x02060000:
-  print >> sys.stderr, "Python 2.6 or newer is required."
-  sys.exit(1)
-
-# Keywords used in JSON files.
-JSON_KEYWORD_SETTINGS = 'settings'
-JSON_KEYWORD_SETTINGS_AB_SUFFIXES = 'ab_suffixes'
-JSON_KEYWORD_SETTINGS_DISK_SIZE = 'disk_size'
-JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT = 'disk_alignment'
-JSON_KEYWORD_SETTINGS_DISK_GUID = 'disk_guid'
-JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN = 'partitions_offset_begin'
-JSON_KEYWORD_PARTITIONS = 'partitions'
-JSON_KEYWORD_PARTITIONS_LABEL = 'label'
-JSON_KEYWORD_PARTITIONS_OFFSET = 'offset'
-JSON_KEYWORD_PARTITIONS_SIZE = 'size'
-JSON_KEYWORD_PARTITIONS_GROW = 'grow'
-JSON_KEYWORD_PARTITIONS_GUID = 'guid'
-JSON_KEYWORD_PARTITIONS_TYPE_GUID = 'type_guid'
-JSON_KEYWORD_PARTITIONS_FLAGS = 'flags'
-JSON_KEYWORD_PARTITIONS_PERSIST = 'persist'
-JSON_KEYWORD_PARTITIONS_IGNORE = 'ignore'
-JSON_KEYWORD_PARTITIONS_AB = 'ab'
-JSON_KEYWORD_PARTITIONS_AB_EXPANDED = 'ab_expanded'
-JSON_KEYWORD_PARTITIONS_POSITION = 'position'
-JSON_KEYWORD_AUTO = 'auto'
-
-# Possible values for the --type option of the query_partition
-# sub-command.
-QUERY_PARTITION_TYPES = ['size',
-                         'offset',
-                         'guid',
-                         'type_guid',
-                         'flags',
-                         'persist']
-
-BPT_VERSION_MAJOR = 1
-BPT_VERSION_MINOR = 0
-
-DISK_SECTOR_SIZE = 512
-
-GPT_NUM_LBAS = 33
-
-GPT_MIN_PART_NUM = 1
-GPT_MAX_PART_NUM = 128
-
-KNOWN_TYPE_GUIDS = {
-    'brillo_boot': 'bb499290-b57e-49f6-bf41-190386693794',
-    'brillo_bootloader': '4892aeb3-a45f-4c5f-875f-da3303c0795c',
-    'brillo_system': '0f2778c4-5cc1-4300-8670-6c88b7e57ed6',
-    'brillo_odm': 'e99d84d7-2c1b-44cf-8c58-effae2dc2558',
-    'brillo_oem': 'aa3434b2-ddc3-4065-8b1a-18e99ea15cb7',
-    'brillo_userdata': '0bb7e6ed-4424-49c0-9372-7fbab465ab4c',
-    'brillo_misc': '6b2378b0-0fbc-4aa9-a4f6-4d6e17281c47',
-    'brillo_vbmeta': 'b598858a-5fe3-418e-b8c4-824b41f4adfc',
-    'brillo_vendor_specific': '314f99d5-b2bf-4883-8d03-e2f2ce507d6a',
-    'linux_fs': '0fc63daf-8483-4772-8e79-3d69d8477de4',
-    'ms_basic_data': 'ebd0a0a2-b9e5-4433-87c0-68b6b72699c7',
-    'efi_system': 'c12a7328-f81f-11d2-ba4b-00a0c93ec93b'
-}
-
-
-def RoundToMultiple(number, size, round_down=False):
-  """Rounds a number up (or down) to nearest multiple of another number.
-
-  Args:
-    number: The number to round up.
-    size: The multiple to round up to.
-    round_down: If True, the number will be rounded down.
-
-  Returns:
-    If |number| is a multiple of |size|, returns |number|, otherwise
-    returns |number| + |size| - |remainder| (if |round_down| is False) or
-    |number| - |remainder| (if |round_down| is True). Always returns
-    an integer.
-  """
-  remainder = number % size
-  if remainder == 0:
-    return int(number)
-  if round_down:
-    return int(number - remainder)
-  return int(number + size - remainder)
-
-
-def ParseNumber(arg):
-  """Number parser.
-
-  If |arg| is an integer, that value is returned. Otherwise int(arg, 0)
-  is returned.
-
-  This function is suitable for use in the |type| parameter of
-  |ArgumentParser|'s add_argument() function. An improvement to just
-  using type=int is that this function supports numbers in other
-  bases, e.g. "0x1234".
-
-  Arguments:
-    arg: Argument (int or string) to parse.
-
-  Returns:
-    The parsed value, as an integer.
-
-  Raises:
-    ValueError: If the argument could not be parsed.
-  """
-  if isinstance(arg, numbers.Integral):
-    return arg
-  return int(arg, 0)
-
-
-def ParseGuid(arg):
-  """Parser for RFC 4122 GUIDs.
-
-  Arguments:
-    arg: The argument, as a string.
-
-  Returns:
-    UUID in hyphenated format.
-
-  Raises:
-    ValueError: If the given string cannot be parsed.
-  """
-  return str(uuid.UUID(arg))
-
-
-def ParseSize(arg):
-  """Parser for size strings with decimal and binary unit support.
-
-  This supports both integers and strings.
-
-  Arguments:
-    arg: The string to parse.
-
-  Returns:
-    The parsed size in bytes as an integer.
-
-  Raises:
-    ValueError: If the given string cannot be parsed.
-  """
-  if isinstance(arg, numbers.Integral):
-    return arg
-
-  ws_index = arg.find(' ')
-  if ws_index != -1:
-    num = float(arg[0:ws_index])
-    factor = 1
-    if arg.endswith('KiB'):
-      factor = 1024
-    elif arg.endswith('MiB'):
-      factor = 1024*1024
-    elif arg.endswith('GiB'):
-      factor = 1024*1024*1024
-    elif arg.endswith('TiB'):
-      factor = 1024*1024*1024*1024
-    elif arg.endswith('PiB'):
-      factor = 1024*1024*1024*1024*1024
-    elif arg.endswith('kB'):
-      factor = 1000
-    elif arg.endswith('MB'):
-      factor = 1000*1000
-    elif arg.endswith('GB'):
-      factor = 1000*1000*1000
-    elif arg.endswith('TB'):
-      factor = 1000*1000*1000*1000
-    elif arg.endswith('PB'):
-      factor = 1000*1000*1000*1000*1000
-    else:
-      raise ValueError('Cannot parse string "{}"'.format(arg))
-    value = num*factor
-    # If the resulting value isn't an integer, round up.
-    if not value.is_integer():
-      value = int(math.ceil(value))
-  else:
-    value = int(arg, 0)
-  return value
-
-
-class ImageChunk(object):
-  """Data structure used for representing chunks in Android sparse files.
-
-  Attributes:
-    chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
-    chunk_offset: Offset in the sparse file where this chunk begins.
-    output_offset: Offset in de-sparsified file where output begins.
-    output_size: Number of bytes in output.
-    input_offset: Offset in sparse file for data if TYPE_RAW otherwise None.
-    fill_data: Blob with data to fill if TYPE_FILL otherwise None.
-  """
-
-  FORMAT = '<2H2I'
-  TYPE_RAW = 0xcac1
-  TYPE_FILL = 0xcac2
-  TYPE_DONT_CARE = 0xcac3
-  TYPE_CRC32 = 0xcac4
-
-  def __init__(self, chunk_type, chunk_offset, output_offset, output_size,
-               input_offset, fill_data):
-    """Initializes an ImageChunk object.
-
-    Arguments:
-      chunk_type: One of TYPE_RAW, TYPE_FILL, or TYPE_DONT_CARE.
-      chunk_offset: Offset in the sparse file where this chunk begins.
-      output_offset: Offset in de-sparsified file.
-      output_size: Number of bytes in output.
-      input_offset: Offset in sparse file if TYPE_RAW otherwise None.
-      fill_data: Blob with data to fill if TYPE_FILL otherwise None.
-
-    Raises:
-      ValueError: If data is not well-formed.
-    """
-    self.chunk_type = chunk_type
-    self.chunk_offset = chunk_offset
-    self.output_offset = output_offset
-    self.output_size = output_size
-    self.input_offset = input_offset
-    self.fill_data = fill_data
-    # Check invariants.
-    if self.chunk_type == self.TYPE_RAW:
-      if self.fill_data is not None:
-        raise ValueError('RAW chunk cannot have fill_data set.')
-      if not self.input_offset:
-        raise ValueError('RAW chunk must have input_offset set.')
-    elif self.chunk_type == self.TYPE_FILL:
-      if self.fill_data is None:
-        raise ValueError('FILL chunk must have fill_data set.')
-      if self.input_offset:
-        raise ValueError('FILL chunk cannot have input_offset set.')
-    elif self.chunk_type == self.TYPE_DONT_CARE:
-      if self.fill_data is not None:
-        raise ValueError('DONT_CARE chunk cannot have fill_data set.')
-      if self.input_offset:
-        raise ValueError('DONT_CARE chunk cannot have input_offset set.')
-    else:
-      raise ValueError('Invalid chunk type')
-
-
-class ImageHandler(object):
-  """Abstraction for image I/O with support for Android sparse images.
-
-  This class provides an interface for working with image files that
-  may be using the Android Sparse Image format. When an instance is
-  constructed, we test whether it's an Android sparse file. If so,
-  operations will be on the sparse file by interpreting the sparse
-  format, otherwise they will be directly on the file. Either way the
-  operations do the same.
-
-  For reading, this interface mimics a file object - it has seek(),
-  tell(), and read() methods. For writing, only truncation
-  (truncate()) and appending is supported (append_raw(),
-  append_fill(), and append_dont_care()). Additionally, data can only
-  be written in units of the block size.
-
-  Attributes:
-    is_sparse: Whether the file being operated on is sparse.
-    block_size: The block size, typically 4096.
-    image_size: The size of the unsparsified file.
-
-  """
-  # See system/core/libsparse/sparse_format.h for details.
-  MAGIC = 0xed26ff3a
-  HEADER_FORMAT = '<I4H4I'
-
-  # These are formats and offset of just the |total_chunks| and
-  # |total_blocks| fields.
-  NUM_CHUNKS_AND_BLOCKS_FORMAT = '<II'
-  NUM_CHUNKS_AND_BLOCKS_OFFSET = 16
-
-  def __init__(self, image_filename):
-    """Initializes an image handler.
-
-    Arguments:
-      image_filename: The name of the file to operate on.
-
-    Raises:
-      ValueError: If data in the file is invalid.
-    """
-    self._image_filename = image_filename
-    self._read_header()
-
-  def _read_header(self):
-    """Initializes internal data structures used for reading file.
-
-    This may be called multiple times and is typically called after
-    modifying the file (e.g. appending, truncation).
-
-    Raises:
-      ValueError: If data in the file is invalid.
-    """
-    self.is_sparse = False
-    self.block_size = 4096
-    self._file_pos = 0
-    self._image = open(self._image_filename, 'r+b')
-    self._image.seek(0, os.SEEK_END)
-    self.image_size = self._image.tell()
-
-    self._image.seek(0, os.SEEK_SET)
-    header_bin = self._image.read(struct.calcsize(self.HEADER_FORMAT))
-    if len(header_bin) < struct.calcsize(self.HEADER_FORMAT):
-      # Not a sparse image, our job here is done.
-      return
-    (magic, major_version, minor_version, file_hdr_sz, chunk_hdr_sz,
-     block_size, self._num_total_blocks, self._num_total_chunks,
-     _) = struct.unpack(self.HEADER_FORMAT, header_bin)
-    if magic != self.MAGIC:
-      # Not a sparse image, our job here is done.
-      return
-    if not (major_version == 1 and minor_version == 0):
-      raise ValueError('Encountered sparse image format version {}.{} but '
-                       'only 1.0 is supported'.format(major_version,
-                                                      minor_version))
-    if file_hdr_sz != struct.calcsize(self.HEADER_FORMAT):
-      raise ValueError('Unexpected file_hdr_sz value {}.'.
-                       format(file_hdr_sz))
-    if chunk_hdr_sz != struct.calcsize(ImageChunk.FORMAT):
-      raise ValueError('Unexpected chunk_hdr_sz value {}.'.
-                       format(chunk_hdr_sz))
-
-    self.block_size = block_size
-
-    # Build an list of chunks by parsing the file.
-    self._chunks = []
-
-    # Find the smallest offset where only "Don't care" chunks
-    # follow. This will be the size of the content in the sparse
-    # image.
-    offset = 0
-    output_offset = 0
-    for _ in xrange(1, self._num_total_chunks + 1):
-      chunk_offset = self._image.tell()
-
-      header_bin = self._image.read(struct.calcsize(ImageChunk.FORMAT))
-      (chunk_type, _, chunk_sz, total_sz) = struct.unpack(ImageChunk.FORMAT,
-                                                          header_bin)
-      data_sz = total_sz - struct.calcsize(ImageChunk.FORMAT)
-
-      if chunk_type == ImageChunk.TYPE_RAW:
-        if data_sz != (chunk_sz * self.block_size):
-          raise ValueError('Raw chunk input size ({}) does not match output '
-                           'size ({})'.
-                           format(data_sz, chunk_sz*self.block_size))
-        self._chunks.append(ImageChunk(ImageChunk.TYPE_RAW,
-                                       chunk_offset,
-                                       output_offset,
-                                       chunk_sz*self.block_size,
-                                       self._image.tell(),
-                                       None))
-        self._image.read(data_sz)
-
-      elif chunk_type == ImageChunk.TYPE_FILL:
-        if data_sz != 4:
-          raise ValueError('Fill chunk should have 4 bytes of fill, but this '
-                           'has {}'.format(data_sz))
-        fill_data = self._image.read(4)
-        self._chunks.append(ImageChunk(ImageChunk.TYPE_FILL,
-                                       chunk_offset,
-                                       output_offset,
-                                       chunk_sz*self.block_size,
-                                       None,
-                                       fill_data))
-      elif chunk_type == ImageChunk.TYPE_DONT_CARE:
-        if data_sz != 0:
-          raise ValueError('Don\'t care chunk input size is non-zero ({})'.
-                           format(data_sz))
-        self._chunks.append(ImageChunk(ImageChunk.TYPE_DONT_CARE,
-                                       chunk_offset,
-                                       output_offset,
-                                       chunk_sz*self.block_size,
-                                       None,
-                                       None))
-      elif chunk_type == ImageChunk.TYPE_CRC32:
-        if data_sz != 4:
-          raise ValueError('CRC32 chunk should have 4 bytes of CRC, but '
-                           'this has {}'.format(data_sz))
-        self._image.read(4)
-      else:
-        raise ValueError('Unknown chunk type {}'.format(chunk_type))
-
-      offset += chunk_sz
-      output_offset += chunk_sz * self.block_size
-
-    # Record where sparse data end.
-    self._sparse_end = self._image.tell()
-
-    # Now that we've traversed all chunks, sanity check.
-    if self._num_total_blocks != offset:
-      raise ValueError('The header said we should have {} output blocks, '
-                       'but we saw {}'.format(self._num_total_blocks, offset))
-    junk_len = len(self._image.read())
-    if junk_len > 0:
-      raise ValueError('There were {} bytes of extra data at the end of the '
-                       'file.'.format(junk_len))
-
-    # Assign |image_size|.
-    self.image_size = output_offset
-
-    # This is used when bisecting in read() to find the initial slice.
-    self._chunk_output_offsets = [i.output_offset for i in self._chunks]
-
-    self.is_sparse = True
-
-  def _update_chunks_and_blocks(self):
-    """Helper function to update the image header.
-
-    The the |total_chunks| and |total_blocks| fields in the header
-    will be set to value of the |_num_total_blocks| and
-    |_num_total_chunks| attributes.
-
-    """
-    self._image.seek(self.NUM_CHUNKS_AND_BLOCKS_OFFSET, os.SEEK_SET)
-    self._image.write(struct.pack(self.NUM_CHUNKS_AND_BLOCKS_FORMAT,
-                                  self._num_total_blocks,
-                                  self._num_total_chunks))
-
-  def append_dont_care(self, num_bytes):
-    """Appends a DONT_CARE chunk to the sparse file.
-
-    The given number of bytes must be a multiple of the block size.
-
-    Arguments:
-      num_bytes: Size in number of bytes of the DONT_CARE chunk.
-    """
-    assert num_bytes % self.block_size == 0
-
-    if not self.is_sparse:
-      self._image.seek(0, os.SEEK_END)
-      # This is more efficient that writing NUL bytes since it'll add
-      # a hole on file systems that support sparse files (native
-      # sparse, not Android sparse).
-      self._image.truncate(self._image.tell() + num_bytes)
-      self._read_header()
-      return
-
-    self._num_total_chunks += 1
-    self._num_total_blocks += num_bytes / self.block_size
-    self._update_chunks_and_blocks()
-
-    self._image.seek(self._sparse_end, os.SEEK_SET)
-    self._image.write(struct.pack(ImageChunk.FORMAT,
-                                  ImageChunk.TYPE_DONT_CARE,
-                                  0,  # Reserved
-                                  num_bytes / self.block_size,
-                                  struct.calcsize(ImageChunk.FORMAT)))
-    self._read_header()
-
-  def append_raw(self, data):
-    """Appends a RAW chunk to the sparse file.
-
-    The length of the given data must be a multiple of the block size.
-
-    Arguments:
-      data: Data to append.
-    """
-    assert len(data) % self.block_size == 0
-
-    if not self.is_sparse:
-      self._image.seek(0, os.SEEK_END)
-      self._image.write(data)
-      self._read_header()
-      return
-
-    self._num_total_chunks += 1
-    self._num_total_blocks += len(data) / self.block_size
-    self._update_chunks_and_blocks()
-
-    self._image.seek(self._sparse_end, os.SEEK_SET)
-    self._image.write(struct.pack(ImageChunk.FORMAT,
-                                  ImageChunk.TYPE_RAW,
-                                  0,  # Reserved
-                                  len(data) / self.block_size,
-                                  len(data) +
-                                  struct.calcsize(ImageChunk.FORMAT)))
-    self._image.write(data)
-    self._read_header()
-
-  def append_fill(self, fill_data, size):
-    """Appends a fill chunk to the sparse file.
-
-    The total length of the fill data must be a multiple of the block size.
-
-    Arguments:
-      fill_data: Fill data to append - must be four bytes.
-      size: Number of chunk - must be a multiple of four and the block size.
-    """
-    assert len(fill_data) == 4
-    assert size % 4 == 0
-    assert size % self.block_size == 0
-
-    if not self.is_sparse:
-      self._image.seek(0, os.SEEK_END)
-      self._image.write(fill_data * (size/4))
-      self._read_header()
-      return
-
-    self._num_total_chunks += 1
-    self._num_total_blocks += size / self.block_size
-    self._update_chunks_and_blocks()
-
-    self._image.seek(self._sparse_end, os.SEEK_SET)
-    self._image.write(struct.pack(ImageChunk.FORMAT,
-                                  ImageChunk.TYPE_FILL,
-                                  0,  # Reserved
-                                  size / self.block_size,
-                                  4 + struct.calcsize(ImageChunk.FORMAT)))
-    self._image.write(fill_data)
-    self._read_header()
-
-  def seek(self, offset):
-    """Sets the cursor position for reading from unsparsified file.
-
-    Arguments:
-      offset: Offset to seek to from the beginning of the file.
-    """
-    self._file_pos = offset
-
-  def read(self, size):
-    """Reads data from the unsparsified file.
-
-    This method may return fewer than |size| bytes of data if the end
-    of the file was encountered.
-
-    The file cursor for reading is advanced by the number of bytes
-    read.
-
-    Arguments:
-      size: Number of bytes to read.
-
-    Returns:
-      The data.
-
-    """
-    if not self.is_sparse:
-      self._image.seek(self._file_pos)
-      data = self._image.read(size)
-      self._file_pos += len(data)
-      return data
-
-    # Iterate over all chunks.
-    chunk_idx = bisect.bisect_right(self._chunk_output_offsets,
-                                    self._file_pos) - 1
-    data = bytearray()
-    to_go = size
-    while to_go > 0:
-      chunk = self._chunks[chunk_idx]
-      chunk_pos_offset = self._file_pos - chunk.output_offset
-      chunk_pos_to_go = min(chunk.output_size - chunk_pos_offset, to_go)
-
-      if chunk.chunk_type == ImageChunk.TYPE_RAW:
-        self._image.seek(chunk.input_offset + chunk_pos_offset)
-        data.extend(self._image.read(chunk_pos_to_go))
-      elif chunk.chunk_type == ImageChunk.TYPE_FILL:
-        all_data = chunk.fill_data*(chunk_pos_to_go/len(chunk.fill_data) + 2)
-        offset_mod = chunk_pos_offset % len(chunk.fill_data)
-        data.extend(all_data[offset_mod:(offset_mod + chunk_pos_to_go)])
-      else:
-        assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
-        data.extend('\0' * chunk_pos_to_go)
-
-      to_go -= chunk_pos_to_go
-      self._file_pos += chunk_pos_to_go
-      chunk_idx += 1
-      # Generate partial read in case of EOF.
-      if chunk_idx >= len(self._chunks):
-        break
-
-    return data
-
-  def tell(self):
-    """Returns the file cursor position for reading from unsparsified file.
-
-    Returns:
-      The file cursor position for reading.
-    """
-    return self._file_pos
-
-  def truncate(self, size):
-    """Truncates the unsparsified file.
-
-    Arguments:
-      size: Desired size of unsparsified file.
-
-    Raises:
-      ValueError: If desired size isn't a multiple of the block size.
-    """
-    if not self.is_sparse:
-      self._image.truncate(size)
-      self._read_header()
-      return
-
-    if size % self.block_size != 0:
-      raise ValueError('Cannot truncate to a size which is not a multiple '
-                       'of the block size')
-
-    if size == self.image_size:
-      # Trivial where there's nothing to do.
-      return
-    elif size < self.image_size:
-      chunk_idx = bisect.bisect_right(self._chunk_output_offsets, size) - 1
-      chunk = self._chunks[chunk_idx]
-      if chunk.output_offset != size:
-        # Truncation in the middle of a trunk - need to keep the chunk
-        # and modify it.
-        chunk_idx_for_update = chunk_idx + 1
-        num_to_keep = size - chunk.output_offset
-        assert num_to_keep % self.block_size == 0
-        if chunk.chunk_type == ImageChunk.TYPE_RAW:
-          truncate_at = (chunk.chunk_offset +
-                         struct.calcsize(ImageChunk.FORMAT) + num_to_keep)
-          data_sz = num_to_keep
-        elif chunk.chunk_type == ImageChunk.TYPE_FILL:
-          truncate_at = (chunk.chunk_offset +
-                         struct.calcsize(ImageChunk.FORMAT) + 4)
-          data_sz = 4
-        else:
-          assert chunk.chunk_type == ImageChunk.TYPE_DONT_CARE
-          truncate_at = chunk.chunk_offset + struct.calcsize(ImageChunk.FORMAT)
-          data_sz = 0
-        chunk_sz = num_to_keep/self.block_size
-        total_sz = data_sz + struct.calcsize(ImageChunk.FORMAT)
-        self._image.seek(chunk.chunk_offset)
-        self._image.write(struct.pack(ImageChunk.FORMAT,
-                                      chunk.chunk_type,
-                                      0,  # Reserved
-                                      chunk_sz,
-                                      total_sz))
-        chunk.output_size = num_to_keep
-      else:
-        # Truncation at trunk boundary.
-        truncate_at = chunk.chunk_offset
-        chunk_idx_for_update = chunk_idx
-
-      self._num_total_chunks = chunk_idx_for_update
-      self._num_total_blocks = 0
-      for i in range(0, chunk_idx_for_update):
-        self._num_total_blocks += self._chunks[i].output_size / self.block_size
-      self._update_chunks_and_blocks()
-      self._image.truncate(truncate_at)
-
-      # We've modified the file so re-read all data.
-      self._read_header()
-    else:
-      # Truncating to grow - just add a DONT_CARE section.
-      self.append_dont_care(size - self.image_size)
-
-
-class GuidGenerator(object):
-  """An interface for obtaining strings that are GUIDs.
-
-  To facilitate unit testing, this abstraction is used instead of the
-  directly using the uuid module.
-  """
-
-  def dispense_guid(self, partition_number):
-    """Dispenses a GUID.
-
-    Arguments:
-      partition_number: The partition number or 0 if requesting a GUID
-                        for the whole disk.
-
-    Returns:
-      A RFC 4122 compliant GUID, as a string.
-    """
-    return str(uuid.uuid4())
-
-
-class Partition(object):
-  """Object representing a partition.
-
-  Attributes:
-    label: The partition label.
-    offset: Offset of the partition on the disk, or None.
-    size: Size of the partition or None if not specified.
-    grow: True if partition has been requested to use all remaining space.
-    guid: Instance GUID (RFC 4122 compliant) as a string or None or 'auto'
-          if it should be automatically generated.
-    type_guid: Type GUID (RFC 4122 compliant) as a string or a known type
-               from the |KNOWN_TYPE_GUIDS| map.
-    flags: GUID flags.
-    persist: If true, sets bit 0 of flags indicating that this partition should
-             not be deleted by the bootloader.
-    ab: If True, the partition is an A/B partition.
-    ab_expanded: If True, the A/B partitions have been generated.
-    ignore: If True, the partition should not be included in the final output.
-    position: The requested position of the partition or 0 if it doesn't matter.
-  """
-
-  def __init__(self):
-    """Initializer method."""
-    self.label = ''
-    self.offset = None
-    self.size = None
-    self.grow = False
-    self.guid = None
-    self.type_guid = None
-    self.flags = 0
-    self.persist = False
-    self.ab = False
-    self.ab_expanded = False
-    self.ignore = False
-    self.position = 0
-
-  def add_info(self, pobj):
-    """Add information to partition.
-
-    Arguments:
-      pobj: A JSON object with information about the partition.
-    """
-    self.label = pobj[JSON_KEYWORD_PARTITIONS_LABEL]
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_OFFSET)
-    if value is not None:
-      self.offset = ParseSize(value)
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_SIZE)
-    if value is not None:
-      self.size = ParseSize(value)
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_GROW)
-    if value is not None:
-      self.grow = value
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_AB)
-    if value is not None:
-      self.ab = value
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_AB_EXPANDED)
-    if value is not None:
-      self.ab_expanded = value
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_GUID)
-    if value is not None:
-      self.guid = value
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_IGNORE)
-    if value is not None:
-      self.ignore = value
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_TYPE_GUID)
-    if value is not None:
-      self.type_guid = str.lower(str(value))
-      if self.type_guid in KNOWN_TYPE_GUIDS:
-        self.type_guid = KNOWN_TYPE_GUIDS[self.type_guid]
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_FLAGS)
-    if value is not None:
-      self.flags = ParseNumber(value)
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_PERSIST)
-    if value is not None:
-      self.persist = value
-      if value:
-        self.flags = self.flags | 0x1
-    value = pobj.get(JSON_KEYWORD_PARTITIONS_POSITION)
-    if value is not None:
-      self.position = ParseNumber(value)
-
-  def expand_guid(self, guid_generator, partition_number):
-    """Assign instance GUID and type GUID if required.
-
-    Arguments:
-      guid_generator: A GuidGenerator object.
-      partition_number: The partition number, starting from 1.
-    """
-    if not self.guid or self.guid == JSON_KEYWORD_AUTO:
-      self.guid = guid_generator.dispense_guid(partition_number)
-    if not self.type_guid:
-      self.type_guid = KNOWN_TYPE_GUIDS['brillo_vendor_specific']
-
-  def validate(self):
-    """Sanity checks data in object."""
-
-    try:
-      _ = uuid.UUID(str(self.guid))
-    except ValueError:
-      raise ValueError('The string "{}" is not a valid GPT instance GUID on '
-                       'partition with label "{}".'.format(
-                           str(self.guid), self.label))
-
-    try:
-      _ = uuid.UUID(str(self.type_guid))
-    except ValueError:
-      raise ValueError('The string "{}" is not a valid GPT type GUID on '
-                       'partition with label "{}".'.format(
-                           str(self.type_guid), self.label))
-
-    if not self.size:
-      if not self.grow:
-        raise ValueError('Size can only be unset if "grow" is True.')
-
-  def cmp(self, other):
-    """Comparison method."""
-    self_position = self.position
-    if self_position == 0:
-      self_position = GPT_MAX_PART_NUM
-    other_position = other.position
-    if other_position == 0:
-      other_position = GPT_MAX_PART_NUM
-    return cmp(self_position, other_position)
-
-
-class Settings(object):
-  """An object for holding settings.
-
-  Attributes:
-    ab_suffixes: A list of A/B suffixes to use.
-    disk_size: An integer with the disk size in bytes.
-    partitions_offset_begin: An integer with the disk partitions
-                             offset begin size in bytes.
-    disk_alignment: The alignment to use for partitions.
-    disk_guid: The GUID to use for the disk or None or 'auto' if
-               automatically generated.
-  """
-
-  def __init__(self):
-    """Initializer with defaults."""
-    self.ab_suffixes = ['_a', '_b']
-    self.disk_size = None
-    self.partitions_offset_begin = 0
-    self.disk_alignment = 4096
-    self.disk_guid = JSON_KEYWORD_AUTO
-
-
-class BptError(Exception):
-  """Application-specific errors.
-
-  These errors represent issues for which a stack-trace should not be
-  presented.
-
-  Attributes:
-    message: Error message.
-  """
-
-  def __init__(self, message):
-    Exception.__init__(self, message)
-
-
-class BptParsingError(BptError):
-  """Represents an error with an input file.
-
-  Attributes:
-    message: Error message.
-    filename: Name of the file that caused an error.
-  """
-
-  def __init__(self, filename, message):
-    self.filename = filename
-    BptError.__init__(self, message)
-
-
-class Bpt(object):
-  """Business logic for bpttool command-line tool."""
-
-  def _read_json(self, input_files, ab_collapse=True):
-    """Parses a stack of JSON files into suitable data structures.
-
-    The order of files matters as later files can modify partitions
-    declared in earlier files.
-
-    Arguments:
-      input_files: An ordered list of open files.
-      ab_collapse: If True, collapse A/B partitions.
-
-    Returns:
-      A tuple where the first element is a list of Partition objects
-      and the second element is a Settings object.
-
-    Raises:
-      BptParsingError: If an input file has an error.
-    """
-    partitions = []
-    settings = Settings()
-
-    # Read all input file and merge partitions and settings.
-    for f in input_files:
-      try:
-        obj = json.loads(f.read())
-      except ValueError as e:
-        # Unfortunately we can't easily get the line number where the
-        # error occurred.
-        raise BptParsingError(f.name, e.message)
-
-      sobj = obj.get(JSON_KEYWORD_SETTINGS)
-      if sobj:
-        ab_suffixes = sobj.get(JSON_KEYWORD_SETTINGS_AB_SUFFIXES)
-        if ab_suffixes:
-          settings.ab_suffixes = ab_suffixes
-        disk_size = sobj.get(JSON_KEYWORD_SETTINGS_DISK_SIZE)
-        if disk_size:
-          settings.disk_size = ParseSize(disk_size)
-        partitions_offset_begin = sobj.get(
-                JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN)
-        if partitions_offset_begin:
-          settings.partitions_offset_begin = ParseSize(partitions_offset_begin)
-        disk_alignment = sobj.get(JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT)
-        if disk_alignment:
-          settings.disk_alignment = ParseSize(disk_alignment)
-        disk_guid = sobj.get(JSON_KEYWORD_SETTINGS_DISK_GUID)
-        if disk_guid:
-          settings.disk_guid = disk_guid
-
-      pobjs = obj.get(JSON_KEYWORD_PARTITIONS)
-      if pobjs:
-        for pobj in pobjs:
-          if ab_collapse and pobj.get(JSON_KEYWORD_PARTITIONS_AB_EXPANDED):
-            # If we encounter an expanded partition, unexpand it. This
-            # is to make it possible to use output-JSON (from this tool)
-            # and stack it with an input-JSON file that e.g. specifies
-            # size='256 GiB' for the 'system' partition.
-            label = pobj[JSON_KEYWORD_PARTITIONS_LABEL]
-            if label.endswith(settings.ab_suffixes[0]):
-              # Modify first A/B copy so it doesn't have the trailing suffix.
-              new_len = len(label) - len(settings.ab_suffixes[0])
-              pobj[JSON_KEYWORD_PARTITIONS_LABEL] = label[0:new_len]
-              pobj[JSON_KEYWORD_PARTITIONS_AB_EXPANDED] = False
-              pobj[JSON_KEYWORD_PARTITIONS_GUID] = JSON_KEYWORD_AUTO
-            else:
-              # Skip other A/B copies.
-              continue
-          # Find or create a partition.
-          p = None
-          for candidate in partitions:
-            if candidate.label == pobj[JSON_KEYWORD_PARTITIONS_LABEL]:
-              p = candidate
-              break
-          if not p:
-            p = Partition()
-            partitions.append(p)
-          p.add_info(pobj)
-
-    return partitions, settings
-
-  def _generate_json(self, partitions, settings):
-    """Generate a string with JSON representing partitions and settings.
-
-    Arguments:
-      partitions: A list of Partition objects.
-      settings: A Settings object.
-
-    Returns:
-      A JSON string.
-    """
-    suffixes_str = '['
-    for n in range(0, len(settings.ab_suffixes)):
-      if n != 0:
-        suffixes_str += ', '
-      suffixes_str += '"{}"'.format(settings.ab_suffixes[n])
-    suffixes_str += ']'
-
-    ret = ('{{\n'
-           '  "' + JSON_KEYWORD_SETTINGS + '": {{\n'
-           '    "' + JSON_KEYWORD_SETTINGS_AB_SUFFIXES + '": {},\n'
-           '    "' + JSON_KEYWORD_SETTINGS_PARTITIONS_OFFSET_BEGIN + '": {},\n'
-           '    "' + JSON_KEYWORD_SETTINGS_DISK_SIZE + '": {},\n'
-           '    "' + JSON_KEYWORD_SETTINGS_DISK_ALIGNMENT + '": {},\n'
-           '    "' + JSON_KEYWORD_SETTINGS_DISK_GUID + '": "{}"\n'
-           '  }},\n'
-           '  "' + JSON_KEYWORD_PARTITIONS + '": [\n').format(
-               suffixes_str,
-               settings.partitions_offset_begin,
-               settings.disk_size,
-               settings.disk_alignment,
-               settings.disk_guid)
-
-    for n in range(0, len(partitions)):
-      p = partitions[n]
-      ret += ('    {{\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_LABEL + '": "{}",\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_OFFSET + '": {},\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_SIZE + '": {},\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_GROW + '": {},\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_GUID + '": "{}",\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_TYPE_GUID + '": "{}",\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_FLAGS + '": "{:#018x}",\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_PERSIST + '": {},\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_IGNORE + '": {},\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_AB + '": {},\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_AB_EXPANDED + '": {},\n'
-              '      "' + JSON_KEYWORD_PARTITIONS_POSITION + '": {}\n'
-              '    }}{}\n').format(p.label,
-                                   p.offset,
-                                   p.size,
-                                   'true' if p.grow else 'false',
-                                   p.guid,
-                                   p.type_guid,
-                                   p.flags,
-                                   'true' if p.persist else 'false',
-                                   'true' if p.ignore else 'false',
-                                   'true' if p.ab else 'false',
-                                   'true' if p.ab_expanded else 'false',
-                                   p.position,
-                                   '' if n == len(partitions) - 1 else ',')
-    ret += ('  ]\n'
-            '}\n')
-    return ret
-
-  def _lba_to_chs(self, lba):
-    """Converts LBA to CHS.
-
-    Arguments:
-      lba: The sector number to convert.
-
-    Returns:
-      An array containing the CHS encoded the way it's expected in a
-      MBR partition table.
-    """
-    # See https://en.wikipedia.org/wiki/Cylinder-head-sector
-    num_heads = 255
-    num_sectors = 63
-    # If LBA isn't going to fit in CHS, return maximum CHS values.
-    max_lba = 255*num_heads*num_sectors
-    if lba > max_lba:
-      return [255, 255, 255]
-    c = lba / (num_heads*num_sectors)
-    h = (lba / num_sectors) % num_heads
-    s = lba % num_sectors
-    return [h, (((c>>8) & 0x03)<<6) | (s & 0x3f), c & 0xff]
-
-  def _generate_protective_mbr(self, settings):
-    """Generate Protective MBR.
-
-    Arguments:
-      settings: A Settings object.
-
-    Returns:
-      A string with the binary protective MBR (512 bytes).
-    """
-    # See https://en.wikipedia.org/wiki/Master_boot_record for MBR layout.
-    #
-    # The first partition starts at offset 446 (0x1be).
-    lba_start = 1
-    lba_end = settings.disk_size/DISK_SECTOR_SIZE - 1
-    start_chs = self._lba_to_chs(lba_start)
-    end_chs = self._lba_to_chs(lba_end)
-    pmbr = struct.pack('<446s'     # Bootloader code
-                       'B'         # Status.
-                       'BBB'       # CHS start.
-                       'B'         # Partition type.
-                       'BBB'       # CHS end.
-                       'I'         # LBA of partition start.
-                       'I'         # Number of sectors in partition.
-                       '48x'       # Padding to get to offset 510 (0x1fe).
-                       'BB',       # Boot signature.
-                       '\xfa\xeb\xfe', # cli ; jmp $ (x86)
-                       0x00,
-                       start_chs[0], start_chs[1], start_chs[2],
-                       0xee,       # MBR Partition Type: GPT protective MBR.
-                       end_chs[0], end_chs[1], end_chs[2],
-                       1,          # LBA start
-                       lba_end,
-                       0x55, 0xaa)
-    return pmbr
-
-  def _generate_gpt(self, partitions, settings, primary=True):
-    """Generate GUID Partition Table.
-
-    Arguments:
-      partitions: A list of Partition objects.
-      settings: A Settings object.
-      primary: True to generate primary GPT, False to generate secondary.
-
-    Returns:
-      A string with the binary GUID Partition Table (33*512 bytes).
-    """
-    # See https://en.wikipedia.org/wiki/Master_boot_record for MBR layout.
-    #
-    # The first partition starts at offset 446 (0x1be).
-
-    disk_num_lbas = settings.disk_size/DISK_SECTOR_SIZE
-    if primary:
-      current_lba = 1
-      other_lba = disk_num_lbas - 1
-      partitions_lba = 2
-    else:
-      current_lba = disk_num_lbas - 1
-      other_lba = 1
-      partitions_lba = disk_num_lbas - GPT_NUM_LBAS
-    first_usable_lba = GPT_NUM_LBAS + 1
-    last_usable_lba = disk_num_lbas - GPT_NUM_LBAS - 1
-
-    part_array = []
-    for p in partitions:
-      part_array.append(struct.pack(
-          '<16s'    # Partition type GUID.
-          '16s'     # Partition instance GUID.
-          'QQ'      # First and last LBA.
-          'Q'       # Flags.
-          '72s',    # Name (36 UTF-16LE code units).
-          uuid.UUID(p.type_guid).get_bytes_le(),
-          uuid.UUID(p.guid).get_bytes_le(),
-          p.offset/DISK_SECTOR_SIZE,
-          (p.offset + p.size)/DISK_SECTOR_SIZE - 1,
-          p.flags,
-          p.label.encode(encoding='utf-16le')))
-
-    part_array.append(((128 - len(partitions))*128) * '\0')
-    part_array_str = ''.join(part_array)
-
-    partitions_crc32 = zlib.crc32(part_array_str) % (1<<32)
-
-    header_crc32 = 0
-    while True:
-      header = struct.pack(
-          '<8s'    # Signature.
-          '4B'     # Version.
-          'I'      # Header size.
-          'I'      # CRC32 (must be zero during calculation).
-          'I'      # Reserved (must be zero).
-          'QQ'     # Current and Other LBA.
-          'QQ'     # First and last usable LBA.
-          '16s'    # Disk GUID.
-          'Q'      # Starting LBA of array of partitions.
-          'I'      # Number of partitions.
-          'I'      # Partition entry size, in bytes.
-          'I'      # CRC32 of partition array
-          '420x',  # Padding to get to 512 bytes.
-          'EFI PART',
-          0x00, 0x00, 0x01, 0x00,
-          92,
-          header_crc32,
-          0x00000000,
-          current_lba, other_lba,
-          first_usable_lba, last_usable_lba,
-          uuid.UUID(settings.disk_guid).get_bytes_le(),
-          partitions_lba,
-          128,
-          128,
-          partitions_crc32)
-      if header_crc32 != 0:
-        break
-      header_crc32 = zlib.crc32(header[0:92]) % (1<<32)
-
-    if primary:
-      return header + part_array_str
-    else:
-      return part_array_str + header
-
-  def _generate_gpt_bin(self, partitions, settings):
-    """Generate a bytearray representing partitions and settings.
-
-    The blob will have three partition tables, laid out one after
-    another: 1) Protective MBR (512 bytes); 2) Primary GPT (33*512
-    bytes); and 3) Secondary GPT (33*512 bytes).
-
-    The total size will be 34,304 bytes.
-
-    Arguments:
-      partitions: A list of Partition objects.
-      settings: A Settings object.
-
-    Returns:
-      A bytearray() object.
-    """
-    protective_mbr = self._generate_protective_mbr(settings)
-    primary_gpt = self._generate_gpt(partitions, settings)
-    secondary_gpt = self._generate_gpt(partitions, settings, primary=False)
-    ret = protective_mbr + primary_gpt + secondary_gpt
-    return ret
-
-  def _validate_disk_partitions(self, partitions, disk_size):
-    """Check that a list of partitions have assigned offsets and fits on a
-       disk of a given size.
-
-    This function checks partition offsets and sizes to see if they may fit on
-    a disk image.
-
-    Arguments:
-      partitions: A list of Partition objects.
-      settings: Integer size of disk image.
-
-    Raises:
-      BptError: If checked condition is not satisfied.
-    """
-    for p in partitions:
-      if not p.offset or p.offset < (GPT_NUM_LBAS + 1)*DISK_SECTOR_SIZE:
-        raise BptError('Partition with label "{}" has no offset.'
-                       .format(p.label))
-      if not p.size or p.size < 0:
-        raise BptError('Partition with label "{}" has no size.'
-                        .format(p.label))
-      if (p.offset + p.size) > (disk_size - GPT_NUM_LBAS*DISK_SECTOR_SIZE):
-        raise BptError('Partition with label "{}" exceeds the disk '
-                       'image size.'.format(p.label))
-
-  def make_table(self,
-                 inputs,
-                 ab_suffixes=None,
-                 partitions_offset_begin=None,
-                 disk_size=None,
-                 disk_alignment=None,
-                 disk_guid=None,
-                 guid_generator=None):
-    """Implementation of the 'make_table' command.
-
-    This function takes a list of input partition definition files,
-    flattens them, expands A/B partitions, grows partitions, and lays
-    out partitions according to alignment constraints.
-
-    Arguments:
-      inputs: List of JSON files to parse.
-      ab_suffixes: List of the A/B suffixes (as a comma-separated string)
-                   to use or None to not override.
-      partitions_offset_begin: Size of disk partitions offset
-                               begin or None to not override.
-      disk_size: Size of disk or None to not override.
-      disk_alignment: Disk alignment or None to not override.
-      disk_guid: Disk GUID as a string or None to not override.
-      guid_generator: A GuidGenerator or None to use the default.
-
-    Returns:
-      A tuple where the first argument is a JSON string for the resulting
-      partitions and the second argument is the binary partition tables.
-
-    Raises:
-      BptParsingError: If an input file has an error.
-      BptError: If another application-specific error occurs
-    """
-    partitions, settings = self._read_json(inputs)
-
-    # Command-line arguments override anything specified in input
-    # files.
-    if disk_size:
-      settings.disk_size = int(math.ceil(disk_size))
-    if disk_alignment:
-      settings.disk_alignment = int(disk_alignment)
-    if partitions_offset_begin:
-      settings.partitions_offset_begin = int(partitions_offset_begin)
-    if ab_suffixes:
-      settings.ab_suffixes = ab_suffixes.split(',')
-    if disk_guid:
-      settings.disk_guid = disk_guid
-
-    if not guid_generator:
-      guid_generator = GuidGenerator()
-
-    # We need to know the disk size. Also round it down to ensure it's
-    # a multiple of the sector size.
-    if not settings.disk_size:
-      raise BptError('Disk size not specified. Use --disk_size option '
-                     'or specify it in an input file.\n')
-    settings.disk_size = RoundToMultiple(settings.disk_size,
-                                         DISK_SECTOR_SIZE,
-                                         round_down=True)
-
-    # Alignment must be divisible by disk sector size.
-    if settings.disk_alignment % DISK_SECTOR_SIZE != 0:
-      raise BptError(
-          'Disk alignment size of {} is not divisible by {}.\n'.format(
-              settings.disk_alignment, DISK_SECTOR_SIZE))
-
-    if settings.partitions_offset_begin != 0:
-      # Disk partitions offset begin size must be
-      # divisible by disk sector size.
-      if settings.partitions_offset_begin % settings.disk_alignment != 0:
-        raise BptError(
-            'Disk Partitions offset begin size of {} '
-            'is not divisible by {}.\n'.format(
-                settings.partitions_offset_begin, settings.disk_alignment))
-      settings.partitions_offset_begin = max(settings.partitions_offset_begin,
-                                           DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
-      settings.partitions_offset_begin = RoundToMultiple(
-          settings.partitions_offset_begin, settings.disk_alignment)
-
-    # Expand A/B partitions and skip ignored partitions.
-    expanded_partitions = []
-    for p in partitions:
-      if p.ignore:
-        continue
-      if p.ab and not p.ab_expanded:
-        p.ab_expanded = True
-        for suffix in settings.ab_suffixes:
-          new_p = copy.deepcopy(p)
-          new_p.label += suffix
-          expanded_partitions.append(new_p)
-      else:
-        expanded_partitions.append(p)
-    partitions = expanded_partitions
-
-    # Expand Disk GUID if needed.
-    if not settings.disk_guid or settings.disk_guid == JSON_KEYWORD_AUTO:
-      settings.disk_guid = guid_generator.dispense_guid(0)
-
-    # Sort according to 'position' attribute.
-    partitions = sorted(partitions, cmp=lambda x, y: x.cmp(y))
-
-    # Automatically generate GUIDs if the GUID is unset or set to
-    # 'auto'. Also validate the rest of the fields.
-    part_no = 1
-    for p in partitions:
-      p.expand_guid(guid_generator, part_no)
-      p.validate()
-      part_no += 1
-
-    # Idenfify partition to grow and lay out partitions, ignoring the
-    # one to grow. This way we can figure out how much space is left.
-    #
-    # Right now we only support a single 'grow' partition but we could
-    # support more in the future by splitting up the available bytes
-    # between them.
-    grow_part = None
-    # offset minimal size: DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS)
-    offset = max(settings.partitions_offset_begin,
-                 DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
-    for p in partitions:
-      if p.grow:
-        if grow_part:
-          raise BptError('Only a single partition can be automatically '
-                         'grown.\n')
-        grow_part = p
-      else:
-        # Ensure size is a multiple of DISK_SECTOR_SIZE by rounding up
-        # (user may specify it as e.g. "1.5 GB" which is not divisible
-        # by 512).
-        p.size = RoundToMultiple(p.size, DISK_SECTOR_SIZE)
-        # Align offset to disk alignment.
-        offset = RoundToMultiple(offset, settings.disk_alignment)
-        offset += p.size
-
-    # After laying out (respecting alignment) all non-grow
-    # partitions, check that the given disk size is big enough.
-    if offset > settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS:
-      raise BptError('Disk size of {} bytes is too small for partitions '
-                     'totaling {} bytes.\n'.format(
-                         settings.disk_size, offset))
-
-    # If we have a grow partition, it'll starts at the next
-    # available alignment offset and we can calculate its size as
-    # follows.
-    if grow_part:
-      offset = RoundToMultiple(offset, settings.disk_alignment)
-      grow_part.size = RoundToMultiple(
-          settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS - offset,
-          settings.disk_alignment,
-          round_down=True)
-      if grow_part.size < DISK_SECTOR_SIZE:
-        raise BptError('Not enough space for partition "{}" to be '
-                       'automatically grown.\n'.format(grow_part.label))
-
-    # Now we can assign partition start offsets for all partitions,
-    # including the grow partition.
-    # offset minimal size: DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS)
-    offset = max(settings.partitions_offset_begin,
-                 DISK_SECTOR_SIZE*(1 + GPT_NUM_LBAS))
-    for p in partitions:
-      # Align offset.
-      offset = RoundToMultiple(offset, settings.disk_alignment)
-      p.offset = offset
-      offset += p.size
-    assert offset <= settings.disk_size - DISK_SECTOR_SIZE*GPT_NUM_LBAS
-
-    json_str = self._generate_json(partitions, settings)
-
-    gpt_bin = self._generate_gpt_bin(partitions, settings)
-
-    return json_str, gpt_bin
-
-  def make_disk_image(self, output, bpt, images, allow_empty_partitions=False):
-    """Implementation of the 'make_disk_image' command.
-
-    This function takes in a list of partitions images and a bpt file
-    for the purpose of creating a raw disk image with a protective MBR,
-    primary and secondary GPT, and content for each partition as specified.
-
-    Arguments:
-      output: Output file where disk image is to be written to.
-      bpt: BPT JSON file to parse.
-      images: List of partition image paths to be combined (as specified by
-              bpt).  Each element is of the form.
-              'PARTITION_NAME:/PATH/TO/PARTITION_IMAGE'
-      allow_empty_partitions: If True, partitions defined in |bpt| need not to
-                              be present in |images|. Otherwise an exception is
-                              thrown if a partition is referenced in |bpt| but
-                              not in |images|.
-
-    Raises:
-      BptParsingError: If an image file has an error.
-      BptError: If another application-specific error occurs.
-    """
-    # Generate partition list and settings.
-    partitions, settings = self._read_json([bpt], ab_collapse=False)
-
-    # Validated partition sizes and offsets.
-    self._validate_disk_partitions(partitions, settings.disk_size)
-
-    # Sort according to 'offset' attribute.
-    partitions = sorted(partitions, cmp=lambda x, y: cmp(x.offset, y.offset))
-
-    # Create necessary tables.
-    protective_mbr = self._generate_protective_mbr(settings)
-    primary_gpt = self._generate_gpt(partitions, settings)
-    secondary_gpt = self._generate_gpt(partitions, settings, primary=False)
-
-    # Start at 0 offset for mbr and primary gpt.
-    output.seek(0)
-    output.write(protective_mbr)
-    output.write(primary_gpt)
-
-    # Create mapping of partition name to partition image file.
-    image_file_names = {}
-    try:
-      for name_path in images:
-        name, path = name_path.split(":")
-        image_file_names[name] = path
-    except ValueError as e:
-      raise BptParsingError(name_path, 'Bad image argument {}.'.format(
-                            images[i]))
-
-    # Read image and insert in correct offset.
-    for p in partitions:
-      if p.label not in image_file_names:
-        if allow_empty_partitions:
-          continue
-        else:
-          raise BptParsingError(bpt.name, 'No content specified for partition'
-                                ' with label {}'.format(p.label))
-
-      input_image = ImageHandler(image_file_names[p.label])
-      output.seek(p.offset)
-      partition_blob = input_image.read(p.size)
-      output.write(partition_blob)
-
-    # Put secondary GPT and end of disk.
-    output.seek(settings.disk_size - len(secondary_gpt))
-    output.write(secondary_gpt)
-
-  def query_partition(self, input_file, part_label, query_type, ab_collapse):
-    """Implementation of the 'query_partition' command.
-
-    This reads the partition definition file given by |input_file| and
-    returns information of type |query_type| for the partition with
-    label |part_label|.
-
-    Arguments:
-      input_file: A JSON file to parse.
-      part_label: Label of partition to query information about.
-      query_type: The information to query, see |QUERY_PARTITION_TYPES|.
-      ab_collapse: If True, collapse A/B partitions.
-
-    Returns:
-      The requested information as a string or None if there is no
-      partition with the given label.
-
-    Raises:
-      BptParsingError: If an input file has an error.
-      BptError: If another application-specific error occurs
-    """
-
-    partitions, _ = self._read_json([input_file], ab_collapse)
-
-    part = None
-    for p in partitions:
-      if p.label == part_label:
-        part = p
-        break
-
-    if not part:
-      return None
-
-    value = part.__dict__.get(query_type)
-    # Print out flags as a hex-value.
-    if query_type == 'flags':
-      return '{:#018x}'.format(value)
-    return str(value)
-
-
-class BptTool(object):
-  """Object for bpttool command-line tool."""
-
-  def __init__(self):
-    """Initializer method."""
-    self.bpt = Bpt()
-
-  def run(self, argv):
-    """Command-line processor.
-
-    Arguments:
-      argv: Pass sys.argv from main.
-    """
-    parser = argparse.ArgumentParser()
-    subparsers = parser.add_subparsers(title='subcommands')
-
-    sub_parser = subparsers.add_parser(
-        'version',
-        help='Prints version of bpttool.')
-    sub_parser.set_defaults(func=self.version)
-
-    sub_parser = subparsers.add_parser(
-        'make_table',
-        help='Lays out partitions and creates partition table.')
-    sub_parser.add_argument('--input',
-                            help='Path to partition definition file.',
-                            type=argparse.FileType('r'),
-                            action='append')
-    sub_parser.add_argument('--ab_suffixes',
-                            help='Set or override A/B suffixes.')
-    sub_parser.add_argument('--partitions_offset_begin',
-                            help='Set or override disk partitions '
-                                 'offset begin size.',
-                            type=ParseSize)
-    sub_parser.add_argument('--disk_size',
-                            help='Set or override disk size.',
-                            type=ParseSize)
-    sub_parser.add_argument('--disk_alignment',
-                            help='Set or override disk alignment.',
-                            type=ParseSize)
-    sub_parser.add_argument('--disk_guid',
-                            help='Set or override disk GUID.',
-                            type=ParseGuid)
-    sub_parser.add_argument('--output_json',
-                            help='JSON output file name.',
-                            type=argparse.FileType('w'))
-    sub_parser.add_argument('--output_gpt',
-                            help='Output file name for MBR/GPT/GPT file.',
-                            type=argparse.FileType('w'))
-    sub_parser.set_defaults(func=self.make_table)
-
-    sub_parser = subparsers.add_parser(
-        'make_disk_image',
-        help='Creates disk image for loaded with partitions.')
-    sub_parser.add_argument('--output',
-                            help='Path to image output.',
-                            type=argparse.FileType('w'),
-                            required=True)
-    sub_parser.add_argument('--input',
-                            help='Path to bpt file input.',
-                            type=argparse.FileType('r'),
-                            required=True)
-    sub_parser.add_argument('--image',
-                            help='Partition name and path to image file.',
-                            metavar='PARTITION_NAME:PATH',
-                            action='append')
-    sub_parser.add_argument('--allow_empty_partitions',
-                            help='Allow skipping partitions in bpt file.',
-                            action='store_true')
-    sub_parser.set_defaults(func=self.make_disk_image)
-
-    sub_parser = subparsers.add_parser(
-        'query_partition',
-        help='Looks up informtion about a partition.')
-    sub_parser.add_argument('--input',
-                            help='Path to partition definition file.',
-                            type=argparse.FileType('r'),
-                            required=True)
-    sub_parser.add_argument('--label',
-                            help='Label of partition to look up.',
-                            required=True)
-    sub_parser.add_argument('--ab_collapse',
-                            help='Collapse A/B partitions.',
-                            action='store_true')
-    sub_parser.add_argument('--type',
-                            help='Type of information to look up.',
-                            choices=QUERY_PARTITION_TYPES,
-                            required=True)
-    sub_parser.set_defaults(func=self.query_partition)
-
-    args = parser.parse_args(argv[1:])
-    args.func(args)
-
-  def version(self, _):
-    """Implements the 'version' sub-command."""
-    print '{}.{}'.format(BPT_VERSION_MAJOR, BPT_VERSION_MINOR)
-
-  def query_partition(self, args):
-    """Implements the 'query_partition' sub-command."""
-    try:
-      result = self.bpt.query_partition(args.input,
-                                        args.label,
-                                        args.type,
-                                        args.ab_collapse)
-    except BptParsingError as e:
-      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
-      sys.exit(1)
-    except BptError as e:
-      sys.stderr.write('{}\n'.format(e.message))
-      sys.exit(1)
-
-    if not result:
-      sys.stderr.write('No partition with label "{}".\n'.format(args.label))
-      sys.exit(1)
-
-    print result
-
-  def make_table(self, args):
-    """Implements the 'make_table' sub-command."""
-    if not args.input:
-      sys.stderr.write('Option --input is required one or more times.\n')
-      sys.exit(1)
-
-    try:
-      (json_str, gpt_bin) = self.bpt.make_table(args.input, args.ab_suffixes,
-                                                args.partitions_offset_begin,
-                                                args.disk_size,
-                                                args.disk_alignment,
-                                                args.disk_guid)
-    except BptParsingError as e:
-      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
-      sys.exit(1)
-    except BptError as e:
-      sys.stderr.write('{}\n'.format(e.message))
-      sys.exit(1)
-
-    if args.output_json:
-      args.output_json.write(json_str)
-    if args.output_gpt:
-      args.output_gpt.write(gpt_bin)
-
-  def make_disk_image(self, args):
-    """Implements the 'make_disk_image' sub-command."""
-    if not args.input:
-      sys.stderr.write('Option --input is required.\n')
-      sys.exit(1)
-    if not args.output:
-      sys.stderr.write('Option --ouptut is required.\n')
-      sys.exit(1)
-
-    try:
-      self.bpt.make_disk_image(args.output,
-                               args.input,
-                               args.image,
-                               args.allow_empty_partitions)
-    except BptParsingError as e:
-      sys.stderr.write('{}: Error parsing: {}\n'.format(e.filename, e.message))
-      sys.exit(1)
-    except 'BptError' as e:
-      sys.stderr.write('{}\n'.format(e.message))
-      sys.exit(1)
-
-if __name__ == '__main__':
-  tool = BptTool()
-  tool.run(sys.argv)
diff --git a/test/base.bpt b/test/base.bpt
deleted file mode 100644
index aa1e445..0000000
--- a/test/base.bpt
+++ /dev/null
@@ -1,31 +0,0 @@
-{
-    "partitions": [
-        {
-            "ab": true,
-            "label": "boot",
-            "size": "32 MiB",
-            "guid": "auto",
-            "type_guid": "brillo_boot"
-        },
-        {
-            "ab": true,
-            "label": "system",
-            "size": "512 MiB",
-            "guid": "auto",
-            "type_guid": "brillo_system"
-        },
-        {
-            "ab": true,
-            "label": "odm",
-            "size": "1 GiB",
-            "guid": "auto",
-            "type_guid": "brillo_odm"
-        },
-        {
-            "label": "userdata",
-            "grow": true,
-            "guid": "auto",
-            "type_guid": "brillo_userdata"
-        }
-    ]
-}
diff --git a/test/change_odm_size.bpt b/test/change_odm_size.bpt
deleted file mode 100644
index e55cdcd..0000000
--- a/test/change_odm_size.bpt
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-    "partitions": [
-        {
-            "label": "odm",
-            "size": "2 GiB"
-        }
-    ]
-}
diff --git a/test/change_system_size.bpt b/test/change_system_size.bpt
deleted file mode 100644
index 8d2b4b8..0000000
--- a/test/change_system_size.bpt
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-    "partitions": [
-        {
-            "label": "system",
-            "size": "256 MiB"
-        }
-    ]
-}
diff --git a/test/change_userdata_flags.bpt b/test/change_userdata_flags.bpt
deleted file mode 100644
index 8299f28..0000000
--- a/test/change_userdata_flags.bpt
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-    "partitions": [
-        {
-            "label": "userdata",
-            "flags": "0x0420000000000000"
-        }
-    ]
-}
diff --git a/test/disable_ab.bpt b/test/disable_ab.bpt
deleted file mode 100644
index eb1d97e..0000000
--- a/test/disable_ab.bpt
+++ /dev/null
@@ -1,16 +0,0 @@
-{
-    "partitions": [
-        {
-            "ab": false,
-            "label": "boot"
-        },
-        {
-            "ab": false,
-            "label": "system"
-        },
-        {
-            "ab": false,
-            "label": "odm"
-        }
-    ]
-}
diff --git a/test/expected_json_alignment.bpt b/test/expected_json_alignment.bpt
deleted file mode 100644
index f34b9ed..0000000
--- a/test/expected_json_alignment.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 1048576,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 1048576,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 34603008,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 68157440,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 605028352,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1141899264,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2215641088,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3289382912,
-      "size": 7446986752,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_base.bpt b/test/expected_json_base.bpt
deleted file mode 100644
index 2a135f1..0000000
--- a/test/expected_json_base.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1140871168,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2214612992,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3288354816,
-      "size": 7449042944,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_disk_guid.bpt b/test/expected_json_disk_guid.bpt
deleted file mode 100644
index 9246ebf..0000000
--- a/test/expected_json_disk_guid.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-00000000002a"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1140871168,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2214612992,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3288354816,
-      "size": 7449042944,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_partitions_offset_begin.bpt b/test/expected_json_partitions_offset_begin.bpt
deleted file mode 100644
index 4964dab..0000000
--- a/test/expected_json_partitions_offset_begin.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 1048576,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 1048576,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 34603008,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 68157440,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 605028352,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1141899264,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2215641088,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3289382912,
-      "size": 7448014848,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_persist.bpt b/test/expected_json_persist.bpt
deleted file mode 100644
index 0d3b263..0000000
--- a/test/expected_json_persist.bpt
+++ /dev/null
@@ -1,81 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "no_persist",
-      "offset": 20480,
-      "size": 134217728,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 1
-    },
-    {
-      "label": "false_persist",
-      "offset": 134238208,
-      "size": 134217728,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 2
-    },
-    {
-      "label": "true_persist",
-      "offset": 268455936,
-      "size": 134217728,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000001",
-      "persist": true,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 3
-    },
-    {
-      "label": "false_persist_with_flags",
-      "offset": 402673664,
-      "size": 134217728,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000010",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 4
-    },
-    {
-      "label": "true_persist_with_flags",
-      "offset": 536891392,
-      "size": 134217728,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000011",
-      "persist": true,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 5
-    }
-  ]
-}
diff --git a/test/expected_json_size.bpt b/test/expected_json_size.bpt
deleted file mode 100644
index d31ab10..0000000
--- a/test/expected_json_size.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 21474836480,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1140871168,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2214612992,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3288354816,
-      "size": 18186461184,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_change_ab_size.bpt b/test/expected_json_stacked_change_ab_size.bpt
deleted file mode 100644
index 0d16099..0000000
--- a/test/expected_json_stacked_change_ab_size.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 268435456,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 335564800,
-      "size": 268435456,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 604000256,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 1677742080,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 2751483904,
-      "size": 7985913856,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_change_flags.bin b/test/expected_json_stacked_change_flags.bin
deleted file mode 100644
index 2fb6ee3..0000000
--- a/test/expected_json_stacked_change_flags.bin
+++ /dev/null
Binary files differ
diff --git a/test/expected_json_stacked_change_flags.bpt b/test/expected_json_stacked_change_flags.bpt
deleted file mode 100644
index 094e4bb..0000000
--- a/test/expected_json_stacked_change_flags.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1140871168,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2214612992,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3288354816,
-      "size": 7449042944,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0420000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_disable_ab.bpt b/test/expected_json_stacked_disable_ab.bpt
deleted file mode 100644
index 0558206..0000000
--- a/test/expected_json_stacked_disable_ab.bpt
+++ /dev/null
@@ -1,67 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    },
-    {
-      "label": "system",
-      "offset": 33574912,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    },
-    {
-      "label": "odm",
-      "offset": 570445824,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 1644187648,
-      "size": 9093210112,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_ignore.bpt b/test/expected_json_stacked_ignore.bpt
deleted file mode 100644
index 9fac092..0000000
--- a/test/expected_json_stacked_ignore.bpt
+++ /dev/null
@@ -1,95 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1140871168,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2214612992,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_new_partition.bpt b/test/expected_json_stacked_new_partition.bpt
deleted file mode 100644
index 5553c60..0000000
--- a/test/expected_json_stacked_new_partition.bpt
+++ /dev/null
@@ -1,123 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1140871168,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2214612992,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3288354816,
-      "size": 5949042688,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    },
-    {
-      "label": "my_data_partition",
-      "offset": 9237397504,
-      "size": 1500000256,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000008",
-      "type_guid": "ebd0a0a2-b9e5-4433-87c0-68b6b72699c7",
-      "flags": "0x8000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_new_partition_on_top.bpt b/test/expected_json_stacked_new_partition_on_top.bpt
deleted file mode 100644
index cd1d18e..0000000
--- a/test/expected_json_stacked_new_partition_on_top.bpt
+++ /dev/null
@@ -1,123 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1140871168,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 2214612992,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3288354816,
-      "size": 6375301120,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    },
-    {
-      "label": "my_partition_on_top_of_json",
-      "offset": 9663655936,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000008",
-      "type_guid": "0fc63daf-8483-4772-8e79-3d69d8477de4",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_override_settings.bpt b/test/expected_json_stacked_override_settings.bpt
deleted file mode 100644
index ab40651..0000000
--- a/test/expected_json_stacked_override_settings.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["-0", "-1"],
-    "partitions_offset_begin": 0,
-    "disk_size": 16106127360,
-    "disk_alignment": 512,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot-0",
-      "offset": 17408,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot-1",
-      "offset": 33571840,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system-0",
-      "offset": 67126272,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system-1",
-      "offset": 603997184,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm-0",
-      "offset": 1140868096,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm-1",
-      "offset": 2214609920,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3288351744,
-      "size": 12817758720,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_positions.bpt b/test/expected_json_stacked_positions.bpt
deleted file mode 100644
index 8469bec..0000000
--- a/test/expected_json_stacked_positions.bpt
+++ /dev/null
@@ -1,151 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "my_data_1",
-      "offset": 20480,
-      "size": 134217728,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "314f99d5-b2bf-4883-8d03-e2f2ce507d6a",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 1
-    },
-    {
-      "label": "my_data_2",
-      "offset": 134238208,
-      "size": 268435456,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "314f99d5-b2bf-4883-8d03-e2f2ce507d6a",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 2
-    },
-    {
-      "label": "system_a",
-      "offset": 402673664,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 3
-    },
-    {
-      "label": "system_b",
-      "offset": 939544576,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 3
-    },
-    {
-      "label": "my_data_3",
-      "offset": 1476415488,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "314f99d5-b2bf-4883-8d03-e2f2ce507d6a",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 4
-    },
-    {
-      "label": "boot_a",
-      "offset": 2013286400,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 2046840832,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 2080395264,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000008",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 3154137088,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000009",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 4227878912,
-      "size": 6509518848,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-00000000000a",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_stacked_size.bpt b/test/expected_json_stacked_size.bpt
deleted file mode 100644
index 43837cc..0000000
--- a/test/expected_json_stacked_size.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["_a", "_b"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot_a",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot_b",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_a",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system_b",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_a",
-      "offset": 1140871168,
-      "size": 2147483648,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm_b",
-      "offset": 3288354816,
-      "size": 2147483648,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 5435838464,
-      "size": 5301559296,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/expected_json_suffixes.bpt b/test/expected_json_suffixes.bpt
deleted file mode 100644
index ec50d5d..0000000
--- a/test/expected_json_suffixes.bpt
+++ /dev/null
@@ -1,109 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["-A", "-B"],
-    "partitions_offset_begin": 0,
-    "disk_size": 10737418240,
-    "disk_alignment": 4096,
-    "disk_guid": "01234567-89ab-cdef-0123-000000000000"
-  },
-  "partitions": [
-    {
-      "label": "boot-A",
-      "offset": 20480,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000001",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "boot-B",
-      "offset": 33574912,
-      "size": 33554432,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000002",
-      "type_guid": "bb499290-b57e-49f6-bf41-190386693794",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system-A",
-      "offset": 67129344,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000003",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "system-B",
-      "offset": 604000256,
-      "size": 536870912,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000004",
-      "type_guid": "0f2778c4-5cc1-4300-8670-6c88b7e57ed6",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm-A",
-      "offset": 1140871168,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000005",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "odm-B",
-      "offset": 2214612992,
-      "size": 1073741824,
-      "grow": false,
-      "guid": "01234567-89ab-cdef-0123-000000000006",
-      "type_guid": "e99d84d7-2c1b-44cf-8c58-effae2dc2558",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": true,
-      "ab_expanded": true,
-      "position": 0
-    },
-    {
-      "label": "userdata",
-      "offset": 3288354816,
-      "size": 7449042944,
-      "grow": true,
-      "guid": "01234567-89ab-cdef-0123-000000000007",
-      "type_guid": "0bb7e6ed-4424-49c0-9372-7fbab465ab4c",
-      "flags": "0x0000000000000000",
-      "persist": false,
-      "ignore": false,
-      "ab": false,
-      "ab_expanded": false,
-      "position": 0
-    }
-  ]
-}
diff --git a/test/file_with_syntax_errors.bpt b/test/file_with_syntax_errors.bpt
deleted file mode 100644
index 52e826c..0000000
--- a/test/file_with_syntax_errors.bpt
+++ /dev/null
@@ -1,31 +0,0 @@
-{
-    "partitions": [
-        {
-            "ab": true,
-            "label": "boot",
-            "size": "32 MiB",
-            "guid": "auto",
-            "type_guid": "brillo_boot",
-        },
-        {
-            "ab": true,
-            "label": "system",
-            "size": "512 MiB",
-            "guid": "auto",
-            "type_guid": "brillo_system"
-        },
-        {
-            "ab": true,
-            "label": "odm",
-            "size": "1 GiB",
-            "guid": "auto",
-            "type_guid": "brillo_odm"
-        },
-        {
-            "label": "userdata",
-            "grow": true,
-            "guid": "auto",
-            "type_guid": "brillo_userdata"
-        }
-    ]
-}
diff --git a/test/ignore_userdata.bpt b/test/ignore_userdata.bpt
deleted file mode 100644
index 1b35403..0000000
--- a/test/ignore_userdata.bpt
+++ /dev/null
@@ -1,8 +0,0 @@
-{
-    "partitions": [
-        {
-            "label": "userdata",
-            "ignore": true
-        }
-    ]
-}
diff --git a/test/new_partition.bpt b/test/new_partition.bpt
deleted file mode 100644
index 684cd2d..0000000
--- a/test/new_partition.bpt
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-    "partitions": [
-        {
-            "label": "my_data_partition",
-            "size": "1.5 GB",
-            "guid": "auto",
-            "type_guid": "ebd0a0a2-b9e5-4433-87c0-68b6b72699c7",
-            "flags": "0x8000000000000000"
-        }
-    ]
-}
diff --git a/test/new_partition_on_top.bpt b/test/new_partition_on_top.bpt
deleted file mode 100644
index b6dd67a..0000000
--- a/test/new_partition_on_top.bpt
+++ /dev/null
@@ -1,10 +0,0 @@
-{
-    "partitions": [
-        {
-            "label": "my_partition_on_top_of_json",
-            "size": "1 GiB",
-            "guid": "auto",
-            "type_guid": "linux_fs"
-        }
-    ]
-}
diff --git a/test/override_settings.bpt b/test/override_settings.bpt
deleted file mode 100644
index e3a487b..0000000
--- a/test/override_settings.bpt
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-  "settings": {
-    "ab_suffixes": ["-0", "-1"],
-    "disk_size": 16106127360,
-    "disk_alignment": 512
-  }
-}
diff --git a/test/pattern_partition_exceed_size.bpt b/test/pattern_partition_exceed_size.bpt
deleted file mode 100644
index c1c54c0..0000000
--- a/test/pattern_partition_exceed_size.bpt
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-    "settings": {
-        "disk_size": "50 MiB"
-    },
-    "partitions": [
-        {
-            "label": "delta",
-            "size": "20 MiB"
-        }
-    ]
-}
diff --git a/test/pattern_partition_multi.bpt b/test/pattern_partition_multi.bpt
deleted file mode 100644
index 56cd061..0000000
--- a/test/pattern_partition_multi.bpt
+++ /dev/null
@@ -1,15 +0,0 @@
-{
-    "settings": {
-        "disk_size": "80 MiB"
-    },
-    "partitions": [
-        {
-            "label": "alpha",
-            "size": "10 MiB"
-        },
-        {
-            "label": "beta",
-            "size": "50 MiB"
-        }
-    ]
-}
diff --git a/test/pattern_partition_single.bpt b/test/pattern_partition_single.bpt
deleted file mode 100644
index 036de77..0000000
--- a/test/pattern_partition_single.bpt
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-    "settings": {
-        "disk_size": "40 GiB"
-    },
-    "partitions": [
-        {
-            "label": "charlie",
-            "size": "10 MiB"
-        }
-    ]
-}
diff --git a/test/persist.bpt b/test/persist.bpt
deleted file mode 100644
index 06f39d4..0000000
--- a/test/persist.bpt
+++ /dev/null
@@ -1,40 +0,0 @@
-{
-    "partitions": [
-        {
-            "label": "no_persist",
-            "size": "128 MiB",
-            "position": 1,
-            "type_guid": "brillo_boot"
-        },
-        {
-            "label": "false_persist",
-            "size": "128 MiB",
-            "position": 2,
-            "persist": false,
-            "type_guid": "brillo_boot"
-        },
-        {
-            "label": "true_persist",
-            "size": "128 MiB",
-            "position": 3,
-            "persist": true,
-            "type_guid": "brillo_boot"
-        },
-        {
-            "label": "false_persist_with_flags",
-            "size": "128 MiB",
-            "flags": "0x10",
-            "persist": false,
-            "position": 4,
-            "type_guid": "brillo_boot"
-        },
-        {
-            "label": "true_persist_with_flags",
-            "size": "128 MiB",
-            "flags": "0x10",
-            "persist": true,
-            "position": 5,
-            "type_guid": "brillo_boot"
-        }
-    ]
-}
diff --git a/test/positions.bpt b/test/positions.bpt
deleted file mode 100644
index 97b48a7..0000000
--- a/test/positions.bpt
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-    "partitions": [
-        {
-            "label": "my_data_1",
-            "size": "128 MiB",
-            "position": 1
-        },
-        {
-            "label": "my_data_2",
-            "size": "256 MiB",
-            "position": 2
-        },
-        {
-            "label": "system",
-            "position": 3
-        },
-        {
-            "label": "my_data_3",
-            "size": "512 MiB",
-            "position": 4
-        }
-    ]
-}
diff --git a/test/test_file.bin b/test/test_file.bin
deleted file mode 100644
index a3fc7fb..0000000
--- a/test/test_file.bin
+++ /dev/null
Binary files differ
diff --git a/test/test_file.bin.sparse b/test/test_file.bin.sparse
deleted file mode 100644
index c4962b2..0000000
--- a/test/test_file.bin.sparse
+++ /dev/null
Binary files differ
diff --git a/test/test_sparse_image.bpt b/test/test_sparse_image.bpt
deleted file mode 100644
index 872f5a6..0000000
--- a/test/test_sparse_image.bpt
+++ /dev/null
@@ -1,11 +0,0 @@
-{
-  "settings": {
-    "disk_size": "128 KiB"
-  },
-  "partitions": [
-    {
-      "label": "sparse_data",
-      "grow": true
-    }
-  ]
-}