Browse Source

Merge tag 'dm-pull-24jul19-take3' of https://gitlab.denx.de/u-boot/custodians/u-boot-dm

Minor driver-model fixes and tweaks
A few device-tree fixes
Binman support for extracting files from an image
Tom Rini 4 years ago
parent
commit
f9b65c76b4
87 changed files with 8158 additions and 923 deletions
  1. 1 1
      .gitlab-ci.yml
  2. 2 1
      .travis.yml
  3. 2 2
      Makefile
  4. 15 12
      common/fdt_support.c
  5. 2 0
      drivers/clk/clk-uclass.c
  6. 12 5
      drivers/core/device.c
  7. 1 1
      drivers/core/ofnode.c
  8. 4 0
      drivers/timer/timer-uclass.c
  9. 2 2
      fs/cbfs/cbfs.c
  10. 14 2
      include/cbfs.h
  11. 1 1
      include/dm/read.h
  12. 1 1
      include/dm/uclass.h
  13. 6 3
      test/run
  14. 3 0
      tools/Makefile
  15. 136 18
      tools/binman/README
  16. 273 9
      tools/binman/README.entries
  17. 61 30
      tools/binman/binman.py
  18. 0 464
      tools/binman/bsection.py
  19. 887 0
      tools/binman/cbfs_util.py
  20. 625 0
      tools/binman/cbfs_util_test.py
  21. 66 33
      tools/binman/cmdline.py
  22. 175 37
      tools/binman/control.py
  23. 174 0
      tools/binman/elf.py
  24. 41 0
      tools/binman/elf_test.py
  25. 169 18
      tools/binman/entry.py
  26. 16 16
      tools/binman/entry_test.py
  27. 0 0
      tools/binman/etype/__init__.py
  28. 11 3
      tools/binman/etype/_testing.py
  29. 31 28
      tools/binman/etype/blob.py
  30. 5 5
      tools/binman/etype/blob_dtb.py
  31. 263 0
      tools/binman/etype/cbfs.py
  32. 130 0
      tools/binman/etype/fdtmap.py
  33. 1 2
      tools/binman/etype/files.py
  34. 2 2
      tools/binman/etype/fmap.py
  35. 99 0
      tools/binman/etype/image_header.py
  36. 12 4
      tools/binman/etype/intel_descriptor.py
  37. 100 0
      tools/binman/etype/intel_ifwi.py
  38. 2 0
      tools/binman/etype/intel_me.py
  39. 397 42
      tools/binman/etype/section.py
  40. 19 4
      tools/binman/etype/text.py
  41. 1 1
      tools/binman/etype/u_boot_spl_elf.py
  42. 24 0
      tools/binman/etype/u_boot_tpl_elf.py
  43. 4 4
      tools/binman/etype/u_boot_with_ucode_ptr.py
  44. 974 65
      tools/binman/ftest.py
  45. 232 83
      tools/binman/image.py
  46. 7 11
      tools/binman/image_test.py
  47. 25 1
      tools/binman/state.py
  48. 5 0
      tools/binman/test/066_text.dts
  49. 2 0
      tools/binman/test/096_elf.dts
  50. 20 0
      tools/binman/test/102_cbfs_raw.dts
  51. 21 0
      tools/binman/test/103_cbfs_raw_ppc.dts
  52. 19 0
      tools/binman/test/104_cbfs_stage.dts
  53. 26 0
      tools/binman/test/105_cbfs_raw_compress.dts
  54. 15 0
      tools/binman/test/106_cbfs_bad_arch.dts
  55. 13 0
      tools/binman/test/107_cbfs_no_size.dts
  56. 17 0
      tools/binman/test/108_cbfs_no_contents.dts
  57. 18 0
      tools/binman/test/109_cbfs_bad_compress.dts
  58. 24 0
      tools/binman/test/110_cbfs_name.dts
  59. 29 0
      tools/binman/test/111_x86-rom-ifwi.dts
  60. 28 0
      tools/binman/test/112_x86-rom-ifwi-nodesc.dts
  61. 29 0
      tools/binman/test/113_x86-rom-ifwi-nodata.dts
  62. 26 0
      tools/binman/test/114_cbfs_offset.dts
  63. 13 0
      tools/binman/test/115_fdtmap.dts
  64. 17 0
      tools/binman/test/116_fdtmap_hdr.dts
  65. 19 0
      tools/binman/test/117_fdtmap_hdr_start.dts
  66. 19 0
      tools/binman/test/118_fdtmap_hdr_pos.dts
  67. 16 0
      tools/binman/test/119_fdtmap_hdr_missing.dts
  68. 16 0
      tools/binman/test/120_hdr_no_location.dts
  69. 20 0
      tools/binman/test/121_entry_expand.dts
  70. 21 0
      tools/binman/test/122_entry_expand_twice.dts
  71. 22 0
      tools/binman/test/123_entry_expand_section.dts
  72. 14 0
      tools/binman/test/124_compress_dtb.dts
  73. 21 0
      tools/binman/test/125_cbfs_update.dts
  74. 17 0
      tools/binman/test/126_cbfs_bad_type.dts
  75. 33 0
      tools/binman/test/127_list.dts
  76. 36 0
      tools/binman/test/128_decode_image.dts
  77. 33 0
      tools/binman/test/129_decode_image_nohdr.dts
  78. 36 0
      tools/binman/test/130_list_fdtmap.dts
  79. 28 0
      tools/binman/test/131_pack_align_section.dts
  80. BIN
      tools/binman/test/fitimage.bin.gz
  81. BIN
      tools/binman/test/ifwi.bin.gz
  82. 2 2
      tools/buildman/README
  83. 2304 0
      tools/ifwitool.c
  84. 2 2
      tools/patman/command.py
  85. 4 2
      tools/patman/test_util.py
  86. 136 5
      tools/patman/tools.py
  87. 9 1
      tools/patman/tout.py

+ 1 - 1
.gitlab-ci.yml

@@ -176,7 +176,7 @@ Run binman and dtoc testsuite:
       ./tools/buildman/buildman -P sandbox_spl && 
      export PYTHONPATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc/pylibfdt";
      export PATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc:${PATH}";
-     ./tools/binman/binman -t &&
+     ./tools/binman/binman --toolpath ${UBOOT_TRAVIS_BUILD_DIR}/tools test &&
      ./tools/dtoc/dtoc -t
 
 # Test sandbox with test.py

+ 2 - 1
.travis.yml

@@ -32,6 +32,7 @@ addons:
     - device-tree-compiler
     - lzop
     - liblz4-tool
+    - lzma-alone
     - libisl15
     - clang-7
     - srecord
@@ -146,7 +147,7 @@ script:
    if [[ -n "${TEST_PY_TOOLS}" ]]; then
      PYTHONPATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc/pylibfdt"
      PATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc:${PATH}"
-     ./tools/binman/binman -t &&
+     ./tools/binman/binman --toolpath ${UBOOT_TRAVIS_BUILD_DIR}/tools test &&
      ./tools/patman/patman --test &&
      ./tools/buildman/buildman -t &&
      PYTHONPATH="${UBOOT_TRAVIS_BUILD_DIR}/scripts/dtc/pylibfdt"

+ 2 - 2
Makefile

@@ -1196,9 +1196,9 @@ u-boot.ldr:	u-boot
 # ---------------------------------------------------------------------------
 # Use 'make BINMAN_DEBUG=1' to enable debugging
 quiet_cmd_binman = BINMAN  $@
-cmd_binman = $(srctree)/tools/binman/binman -u -d u-boot.dtb -O . -m \
+cmd_binman = $(srctree)/tools/binman/binman build -u -d u-boot.dtb -O . -m \
 		-I . -I $(srctree) -I $(srctree)/board/$(BOARDDIR) \
-		$(if $(BINMAN_DEBUG),-D) $(BINMAN_$(@F)) $<
+		$(if $(BINMAN_DEBUG),-D) $(BINMAN_$(@F))
 
 OBJCOPYFLAGS_u-boot.ldr.hex := -I binary -O ihex
 

+ 15 - 12
common/fdt_support.c

@@ -671,30 +671,33 @@ int fdt_pci_dma_ranges(void *blob, int phb_off, struct pci_controller *hose) {
 
 		dma_range[0] = 0;
 		if (size >= 0x100000000ull)
-			dma_range[0] |= FDT_PCI_MEM64;
+			dma_range[0] |= cpu_to_fdt32(FDT_PCI_MEM64);
 		else
-			dma_range[0] |= FDT_PCI_MEM32;
+			dma_range[0] |= cpu_to_fdt32(FDT_PCI_MEM32);
 		if (hose->regions[r].flags & PCI_REGION_PREFETCH)
-			dma_range[0] |= FDT_PCI_PREFETCH;
+			dma_range[0] |= cpu_to_fdt32(FDT_PCI_PREFETCH);
 #ifdef CONFIG_SYS_PCI_64BIT
-		dma_range[1] = bus_start >> 32;
+		dma_range[1] = cpu_to_fdt32(bus_start >> 32);
 #else
 		dma_range[1] = 0;
 #endif
-		dma_range[2] = bus_start & 0xffffffff;
+		dma_range[2] = cpu_to_fdt32(bus_start & 0xffffffff);
 
 		if (addrcell == 2) {
-			dma_range[3] = phys_start >> 32;
-			dma_range[4] = phys_start & 0xffffffff;
+			dma_range[3] = cpu_to_fdt32(phys_start >> 32);
+			dma_range[4] = cpu_to_fdt32(phys_start & 0xffffffff);
 		} else {
-			dma_range[3] = phys_start & 0xffffffff;
+			dma_range[3] = cpu_to_fdt32(phys_start & 0xffffffff);
 		}
 
 		if (sizecell == 2) {
-			dma_range[3 + addrcell + 0] = size >> 32;
-			dma_range[3 + addrcell + 1] = size & 0xffffffff;
+			dma_range[3 + addrcell + 0] =
+				cpu_to_fdt32(size >> 32);
+			dma_range[3 + addrcell + 1] =
+				cpu_to_fdt32(size & 0xffffffff);
 		} else {
-			dma_range[3 + addrcell + 0] = size & 0xffffffff;
+			dma_range[3 + addrcell + 0] =
+				cpu_to_fdt32(size & 0xffffffff);
 		}
 
 		dma_range += (3 + addrcell + sizecell);
@@ -1552,7 +1555,7 @@ u64 fdt_get_base_address(const void *fdt, int node)
 
 	prop = fdt_getprop(fdt, node, "reg", &size);
 
-	return prop ? fdt_translate_address(fdt, node, prop) : 0;
+	return prop ? fdt_translate_address(fdt, node, prop) : OF_BAD_ADDR;
 }
 
 /*

+ 2 - 0
drivers/clk/clk-uclass.c

@@ -51,6 +51,8 @@ static int clk_of_xlate_default(struct clk *clk,
 	else
 		clk->id = 0;
 
+	clk->data = 0;
+
 	return 0;
 }
 

+ 12 - 5
drivers/core/device.c

@@ -388,7 +388,8 @@ int device_probe(struct udevice *dev)
 	if (dev->parent && device_get_uclass_id(dev) != UCLASS_PINCTRL)
 		pinctrl_select_state(dev, "default");
 
-	if (dev->parent && device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) {
+	if (CONFIG_IS_ENABLED(POWER_DOMAIN) && dev->parent &&
+	    device_get_uclass_id(dev) != UCLASS_POWER_DOMAIN) {
 		if (!power_domain_get(dev, &pd))
 			power_domain_on(&pd);
 	}
@@ -409,10 +410,16 @@ int device_probe(struct udevice *dev)
 			goto fail;
 	}
 
-	/* Process 'assigned-{clocks/clock-parents/clock-rates}' properties */
-	ret = clk_set_defaults(dev);
-	if (ret)
-		goto fail;
+	/* Only handle devices that have a valid ofnode */
+	if (dev_of_valid(dev)) {
+		/*
+		 * Process 'assigned-{clocks/clock-parents/clock-rates}'
+		 * properties
+		 */
+		ret = clk_set_defaults(dev);
+		if (ret)
+			goto fail;
+	}
 
 	if (drv->probe) {
 		ret = drv->probe(dev);

+ 1 - 1
drivers/core/ofnode.c

@@ -884,5 +884,5 @@ int ofnode_set_enabled(ofnode node, bool value)
 	if (value)
 		return ofnode_write_string(node, "status", "okay");
 	else
-		return ofnode_write_string(node, "status", "disable");
+		return ofnode_write_string(node, "status", "disabled");
 }

+ 4 - 0
drivers/timer/timer-uclass.c

@@ -48,6 +48,10 @@ static int timer_pre_probe(struct udevice *dev)
 	int err;
 	ulong ret;
 
+	/* It is possible that a timer device has a null ofnode */
+	if (!dev_of_valid(dev))
+		return 0;
+
 	err = clk_get_by_index(dev, 0, &timer_clk);
 	if (!err) {
 		ret = clk_get_rate(&timer_clk);

+ 2 - 2
fs/cbfs/cbfs.c

@@ -55,7 +55,7 @@ static void swap_file_header(struct cbfs_fileheader *dest,
 	memcpy(&dest->magic, &src->magic, sizeof(dest->magic));
 	dest->len = be32_to_cpu(src->len);
 	dest->type = be32_to_cpu(src->type);
-	dest->checksum = be32_to_cpu(src->checksum);
+	dest->attributes_offset = be32_to_cpu(src->attributes_offset);
 	dest->offset = be32_to_cpu(src->offset);
 }
 
@@ -108,7 +108,7 @@ static int file_cbfs_next_file(u8 *start, u32 size, u32 align,
 		newNode->name = (char *)fileHeader +
 				sizeof(struct cbfs_fileheader);
 		newNode->name_length = name_len;
-		newNode->checksum = header.checksum;
+		newNode->attributes_offset = header.attributes_offset;
 
 		step = header.len;
 		if (step % align)

+ 14 - 2
include/cbfs.h

@@ -40,6 +40,17 @@ enum cbfs_filetype {
 	CBFS_TYPE_CMOS_LAYOUT = 0x01aa
 };
 
+enum {
+	CBFS_HEADER_MAGIC	= 0x4f524243,
+};
+
+/**
+ * struct cbfs_header - header at the start of a CBFS region
+ *
+ * All fields use big-endian format.
+ *
+ * @magic: Magic number (CBFS_HEADER_MAGIC)
+ */
 struct cbfs_header {
 	u32 magic;
 	u32 version;
@@ -54,7 +65,8 @@ struct cbfs_fileheader {
 	u8 magic[8];
 	u32 len;
 	u32 type;
-	u32 checksum;
+	/* offset to struct cbfs_file_attribute or 0 */
+	u32 attributes_offset;
 	u32 offset;
 } __packed;
 
@@ -65,7 +77,7 @@ struct cbfs_cachenode {
 	u32 data_length;
 	char *name;
 	u32 name_length;
-	u32 checksum;
+	u32 attributes_offset;
 } __packed;
 
 extern enum cbfs_result file_cbfs_result;

+ 1 - 1
include/dm/read.h

@@ -227,7 +227,7 @@ fdt_addr_t dev_read_addr_size(struct udevice *dev, const char *propname,
 /**
  * dev_read_name() - get the name of a device's node
  *
- * @node: valid node to look up
+ * @dev: Device to read from
  * @return name of node
  */
 const char *dev_read_name(struct udevice *dev);

+ 1 - 1
include/dm/uclass.h

@@ -297,7 +297,7 @@ int uclass_first_device_err(enum uclass_id id, struct udevice **devp);
  *
  * The device returned is probed if necessary, and ready for use
  *
- * This function is useful to start iterating through a list of devices which
+ * This function is useful to iterate through a list of devices which
  * are functioning correctly and can be probed.
  *
  * @devp: On entry, pointer to device to lookup. On exit, returns pointer

+ 6 - 3
test/run

@@ -33,12 +33,14 @@ run_test "sandbox_flattree" ./test/py/test.py --bd sandbox_flattree --build \
 	-k test_ut
 
 # Set up a path to dtc (device-tree compiler) and libfdt.py, a library it
-# provides and which is built by the sandbox_spl config.
+# provides and which is built by the sandbox_spl config. Also set up the path
+# to tools build by the build.
 DTC_DIR=build-sandbox_spl/scripts/dtc
 export PYTHONPATH=${DTC_DIR}/pylibfdt
 export DTC=${DTC_DIR}/dtc
+TOOLS_DIR=build-sandbox_spl/tools
 
-run_test "binman" ./tools/binman/binman -t
+run_test "binman" ./tools/binman/binman --toolpath ${TOOLS_DIR} test
 run_test "patman" ./tools/patman/patman --test
 
 [ "$1" == "quick" ] && skip=--skip-net-tests
@@ -49,7 +51,8 @@ run_test "dtoc" ./tools/dtoc/dtoc -t
 # This needs you to set up Python test coverage tools.
 # To enable Python test coverage on Debian-type distributions (e.g. Ubuntu):
 #   $ sudo apt-get install python-pytest python-coverage
-run_test "binman code coverage" ./tools/binman/binman -T
+export PATH=$PATH:${TOOLS_DIR}
+run_test "binman code coverage" ./tools/binman/binman test -T
 run_test "dtoc code coverage" ./tools/dtoc/dtoc -T
 run_test "fdt code coverage" ./tools/dtoc/test_fdt -T
 

+ 3 - 0
tools/Makefile

@@ -175,6 +175,9 @@ HOSTCFLAGS_mkexynosspl.o := -pedantic
 ifdtool-objs := $(LIBFDT_OBJS) ifdtool.o
 hostprogs-$(CONFIG_X86) += ifdtool
 
+ifwitool-objs := ifwitool.o
+hostprogs-$(CONFIG_X86)$(CONFIG_SANDBOX) += ifwitool
+
 hostprogs-$(CONFIG_MX23) += mxsboot
 hostprogs-$(CONFIG_MX28) += mxsboot
 HOSTCFLAGS_mxsboot.o := -pedantic

+ 136 - 18
tools/binman/README

@@ -36,10 +36,9 @@ suitable padding and alignment. It provides a way to process binaries before
 they are included, by adding a Python plug-in. The device tree is available
 to U-Boot at run-time so that the images can be interpreted.
 
-Binman does not yet update the device tree with the final location of
-everything when it is done. A simple C structure could be generated for
-constrained environments like SPL (using dtoc) but this is also not
-implemented.
+Binman can update the device tree with the final location of everything when it
+is done. Entry positions can be provided to U-Boot SPL as run-time symbols,
+avoiding device-tree code overhead.
 
 Binman can also support incorporating filesystems in the image if required.
 For example x86 platforms may use CBFS in some cases.
@@ -181,9 +180,14 @@ the configuration of the Intel-format descriptor.
 Running binman
 --------------
 
+First install prerequisites, e.g.
+
+	sudo apt-get install python-pyelftools python3-pyelftools lzma-alone \
+		liblz4-tool
+
 Type:
 
-	binman -b <board_name>
+	binman build -b <board_name>
 
 to build an image for a board. The board name is the same name used when
 configuring U-Boot (e.g. for sandbox_defconfig the board name is 'sandbox').
@@ -191,7 +195,7 @@ Binman assumes that the input files for the build are in ../b/<board_name>.
 
 Or you can specify this explicitly:
 
-	binman -I <build_path>
+	binman build -I <build_path>
 
 where <build_path> is the build directory containing the output of the U-Boot
 build.
@@ -335,6 +339,10 @@ expand-size:
 	limited by the size of the image/section and the position of the next
 	entry.
 
+compress:
+	Sets the compression algortihm to use (for blobs only). See the entry
+	documentation for details.
+
 The attributes supported for images and sections are described below. Several
 are similar to those for entries.
 
@@ -479,7 +487,92 @@ Entry Documentation
 For details on the various entry types supported by binman and how to use them,
 see README.entries. This is generated from the source code using:
 
-	binman -E >tools/binman/README.entries
+	binman entry-docs >tools/binman/README.entries
+
+
+Listing images
+--------------
+
+It is possible to list the entries in an existing firmware image created by
+binman, provided that there is an 'fdtmap' entry in the image. For example:
+
+    $ binman ls -i image.bin
+    Name              Image-pos  Size  Entry-type    Offset  Uncomp-size
+    ----------------------------------------------------------------------
+    main-section                  c00  section            0
+      u-boot                  0     4  u-boot             0
+      section                     5fc  section            4
+        cbfs                100   400  cbfs               0
+          u-boot            138     4  u-boot            38
+          u-boot-dtb        180   108  u-boot-dtb        80          3b5
+        u-boot-dtb          500   1ff  u-boot-dtb       400          3b5
+      fdtmap                6fc   381  fdtmap           6fc
+      image-header          bf8     8  image-header     bf8
+
+This shows the hierarchy of the image, the position, size and type of each
+entry, the offset of each entry within its parent and the uncompressed size if
+the entry is compressed.
+
+It is also possible to list just some files in an image, e.g.
+
+    $ binman ls -i image.bin section/cbfs
+    Name              Image-pos  Size  Entry-type  Offset  Uncomp-size
+    --------------------------------------------------------------------
+        cbfs                100   400  cbfs             0
+          u-boot            138     4  u-boot          38
+          u-boot-dtb        180   108  u-boot-dtb      80          3b5
+
+or with wildcards:
+
+    $ binman ls -i image.bin "*cb*" "*head*"
+    Name              Image-pos  Size  Entry-type    Offset  Uncomp-size
+    ----------------------------------------------------------------------
+        cbfs                100   400  cbfs               0
+          u-boot            138     4  u-boot            38
+          u-boot-dtb        180   108  u-boot-dtb        80          3b5
+      image-header          bf8     8  image-header     bf8
+
+
+Extracting files from images
+----------------------------
+
+You can extract files from an existing firmware image created by binman,
+provided that there is an 'fdtmap' entry in the image. For example:
+
+    $ binman extract -i image.bin section/cbfs/u-boot
+
+which will write the uncompressed contents of that entry to the file 'u-boot' in
+the current directory. You can also extract to a particular file, in this case
+u-boot.bin:
+
+    $ binman extract -i image.bin section/cbfs/u-boot -f u-boot.bin
+
+It is possible to extract all files into a destination directory, which will
+put files in subdirectories matching the entry hierarchy:
+
+    $ binman extract -i image.bin -O outdir
+
+or just a selection:
+
+    $ binman extract -i image.bin "*u-boot*" -O outdir
+
+
+Logging
+-------
+
+Binman normally operates silently unless there is an error, in which case it
+just displays the error. The -D/--debug option can be used to create a full
+backtrace when errors occur.
+
+Internally binman logs some output while it is running. This can be displayed
+by increasing the -v/--verbosity from the default of 1:
+
+   0: silent
+   1: warnings only
+   2: notices (important messages)
+   3: info about major operations
+   4: detailed information about each operation
+   5: debug (all output)
 
 
 Hashing Entries
@@ -558,7 +651,8 @@ tree. This sets the correct 'offset' and 'size' vaues, for example.
 The default implementatoin does nothing. This can be overriden to adjust the
 contents of an entry in some way. For example, it would be possible to create
 an entry containing a hash of the contents of some other entries. At this
-stage the offset and size of entries should not be adjusted.
+stage the offset and size of entries should not be adjusted unless absolutely
+necessary, since it requires a repack (going back to PackEntries()).
 
 10. WriteSymbols() - write the value of symbols into the U-Boot SPL binary.
 See 'Access to binman entry offsets at run time' below for a description of
@@ -634,20 +728,27 @@ the image definition, binman calculates the final values and writes these to
 the device tree. These can be used by U-Boot at run-time to find the location
 of each entry.
 
+Alternatively, an FDT map entry can be used to add a special FDT containing
+just the information about the image. This is preceded by a magic string so can
+be located anywhere in the image. An image header (typically at the start or end
+of the image) can be used to point to the FDT map. See fdtmap and image-header
+entries for more information.
+
 
 Compression
 -----------
 
 Binman support compression for 'blob' entries (those of type 'blob' and
-derivatives). To enable this for an entry, add a 'compression' property:
+derivatives). To enable this for an entry, add a 'compress' property:
 
     blob {
         filename = "datafile";
-        compression = "lz4";
+        compress = "lz4";
     };
 
 The entry will then contain the compressed data, using the 'lz4' compression
-algorithm. Currently this is the only one that is supported.
+algorithm. Currently this is the only one that is supported. The uncompressed
+size is written to the node in an 'uncomp-size' property, if -u is used.
 
 
 
@@ -691,15 +792,25 @@ Not all properties can be provided this way. Only some entries support it,
 typically for filenames.
 
 
+External tools
+--------------
+
+Binman can make use of external command-line tools to handle processing of
+entry contents or to generate entry contents. These tools are executed using
+the 'tools' module's Run() method. The tools generally must exist on the PATH,
+but the --toolpath option can be used to specify additional search paths to
+use. This option can be specified multiple times to add more than one path.
+
+
 Code coverage
 -------------
 
 Binman is a critical tool and is designed to be very testable. Entry
-implementations target 100% test coverage. Run 'binman -T' to check this.
+implementations target 100% test coverage. Run 'binman test -T' to check this.
 
 To enable Python test coverage on Debian-type distributions (e.g. Ubuntu):
 
-   $ sudo apt-get install python-coverage python-pytest
+   $ sudo apt-get install python-coverage python3-coverage python-pytest
 
 
 Concurrent tests
@@ -716,6 +827,14 @@ Use '-P 1' to disable this. It is automatically disabled when code coverage is
 being used (-T) since they are incompatible.
 
 
+Debugging tests
+---------------
+
+Sometimes when debugging tests it is useful to keep the input and output
+directories so they can be examined later. Use -X or --test-preserve-dirs for
+this.
+
+
 Advanced Features / Technical docs
 ----------------------------------
 
@@ -788,13 +907,12 @@ Some ideas:
 - Use of-platdata to make the information available to code that is unable
   to use device tree (such as a very small SPL image)
 - Allow easy building of images by specifying just the board name
-- Produce a full Python binding for libfdt (for upstream). This is nearing
-    completion but some work remains
-- Add an option to decode an image into the constituent binaries
 - Support building an image for a board (-b) more completely, with a
   configurable build directory
-- Consider making binman work with buildman, although if it is used in the
-  Makefile, this will be automatic
+- Support updating binaries in an image (with no size change / repacking)
+- Support updating binaries in an image (with repacking)
+- Support adding FITs to an image
+- Support for ARM Trusted Firmware (ATF)
 
 --
 Simon Glass <sjg@chromium.org>

+ 273 - 9
tools/binman/README.entries

@@ -60,6 +60,158 @@ See cros_ec_rw for an example of this.
 
 
 
+Entry: cbfs: Entry containing a Coreboot Filesystem (CBFS)
+----------------------------------------------------------
+
+A CBFS provides a way to group files into a group. It has a simple directory
+structure and allows the position of individual files to be set, since it is
+designed to support execute-in-place in an x86 SPI-flash device. Where XIP
+is not used, it supports compression and storing ELF files.
+
+CBFS is used by coreboot as its way of orgnanising SPI-flash contents.
+
+The contents of the CBFS are defined by subnodes of the cbfs entry, e.g.:
+
+    cbfs {
+        size = <0x100000>;
+        u-boot {
+            cbfs-type = "raw";
+        };
+        u-boot-dtb {
+            cbfs-type = "raw";
+        };
+    };
+
+This creates a CBFS 1MB in size two files in it: u-boot.bin and u-boot.dtb.
+Note that the size is required since binman does not support calculating it.
+The contents of each entry is just what binman would normally provide if it
+were not a CBFS node. A blob type can be used to import arbitrary files as
+with the second subnode below:
+
+    cbfs {
+        size = <0x100000>;
+        u-boot {
+            cbfs-name = "BOOT";
+            cbfs-type = "raw";
+        };
+
+        dtb {
+            type = "blob";
+            filename = "u-boot.dtb";
+            cbfs-type = "raw";
+            cbfs-compress = "lz4";
+            cbfs-offset = <0x100000>;
+        };
+    };
+
+This creates a CBFS 1MB in size with u-boot.bin (named "BOOT") and
+u-boot.dtb (named "dtb") and compressed with the lz4 algorithm.
+
+
+Properties supported in the top-level CBFS node:
+
+cbfs-arch:
+    Defaults to "x86", but you can specify the architecture if needed.
+
+
+Properties supported in the CBFS entry subnodes:
+
+cbfs-name:
+    This is the name of the file created in CBFS. It defaults to the entry
+    name (which is the node name), but you can override it with this
+    property.
+
+cbfs-type:
+    This is the CBFS file type. The following are supported:
+
+    raw:
+        This is a 'raw' file, although compression is supported. It can be
+        used to store any file in CBFS.
+
+    stage:
+        This is an ELF file that has been loaded (i.e. mapped to memory), so
+        appears in the CBFS as a flat binary. The input file must be an ELF
+        image, for example this puts "u-boot" (the ELF image) into a 'stage'
+        entry:
+
+            cbfs {
+                size = <0x100000>;
+                u-boot-elf {
+                    cbfs-name = "BOOT";
+                    cbfs-type = "stage";
+                };
+            };
+
+        You can use your own ELF file with something like:
+
+            cbfs {
+                size = <0x100000>;
+                something {
+                    type = "blob";
+                    filename = "cbfs-stage.elf";
+                    cbfs-type = "stage";
+                };
+            };
+
+        As mentioned, the file is converted to a flat binary, so it is
+        equivalent to adding "u-boot.bin", for example, but with the load and
+        start addresses specified by the ELF. At present there is no option
+        to add a flat binary with a load/start address, similar to the
+        'add-flat-binary' option in cbfstool.
+
+cbfs-offset:
+    This is the offset of the file's data within the CBFS. It is used to
+    specify where the file should be placed in cases where a fixed position
+    is needed. Typical uses are for code which is not relocatable and must
+    execute in-place from a particular address. This works because SPI flash
+    is generally mapped into memory on x86 devices. The file header is
+    placed before this offset so that the data start lines up exactly with
+    the chosen offset. If this property is not provided, then the file is
+    placed in the next available spot.
+
+The current implementation supports only a subset of CBFS features. It does
+not support other file types (e.g. payload), adding multiple files (like the
+'files' entry with a pattern supported by binman), putting files at a
+particular offset in the CBFS and a few other things.
+
+Of course binman can create images containing multiple CBFSs, simply by
+defining these in the binman config:
+
+
+    binman {
+        size = <0x800000>;
+        cbfs {
+            offset = <0x100000>;
+            size = <0x100000>;
+            u-boot {
+                cbfs-type = "raw";
+            };
+            u-boot-dtb {
+                cbfs-type = "raw";
+            };
+        };
+
+        cbfs2 {
+            offset = <0x700000>;
+            size = <0x100000>;
+            u-boot {
+                cbfs-type = "raw";
+            };
+            u-boot-dtb {
+                cbfs-type = "raw";
+            };
+            image {
+                type = "blob";
+                filename = "image.jpg";
+            };
+        };
+    };
+
+This creates an 8MB image with two CBFSs, one at offset 1MB, one at 7MB,
+both of size 1MB.
+
+
+
 Entry: cros-ec-rw: A blob entry which contains a Chromium OS read-write EC image
 --------------------------------------------------------------------------------
 
@@ -71,6 +223,44 @@ updating the EC on startup via software sync.
 
 
 
+Entry: fdtmap: An entry which contains an FDT map
+-------------------------------------------------
+
+Properties / Entry arguments:
+    None
+
+An FDT map is just a header followed by an FDT containing a list of all the
+entries in the image.
+
+The header is the string _FDTMAP_ followed by 8 unused bytes.
+
+When used, this entry will be populated with an FDT map which reflects the
+entries in the current image. Hierarchy is preserved, and all offsets and
+sizes are included.
+
+Note that the -u option must be provided to ensure that binman updates the
+FDT with the position of each entry.
+
+Example output for a simple image with U-Boot and an FDT map:
+
+/ {
+    size = <0x00000112>;
+    image-pos = <0x00000000>;
+    offset = <0x00000000>;
+    u-boot {
+        size = <0x00000004>;
+        image-pos = <0x00000000>;
+        offset = <0x00000000>;
+    };
+    fdtmap {
+        size = <0x0000010e>;
+        image-pos = <0x00000004>;
+        offset = <0x00000004>;
+    };
+};
+
+
+
 Entry: files: Entry containing a set of files
 ---------------------------------------------
 
@@ -141,6 +331,25 @@ README.chromium for how to obtain the required keys and tools.
 
 
 
+Entry: image-header: An entry which contains a pointer to the FDT map
+---------------------------------------------------------------------
+
+Properties / Entry arguments:
+    location: Location of header ("start" or "end" of image). This is
+        optional. If omitted then the entry must have an offset property.
+
+This adds an 8-byte entry to the start or end of the image, pointing to the
+location of the FDT map. The format is a magic number followed by an offset
+from the start or end of the image, in twos-compliment format.
+
+This entry must be in the top-level part of the image.
+
+NOTE: If the location is at the start/end, you will probably need to specify
+sort-by-offset for the image, unless you actually put the image header
+first/last in the entry list.
+
+
+
 Entry: intel-cmc: Entry containing an Intel Chipset Micro Code (CMC) file
 -------------------------------------------------------------------------
 
@@ -192,6 +401,34 @@ See README.x86 for information about x86 binary blobs.
 
 
 
+Entry: intel-ifwi: Entry containing an Intel Integrated Firmware Image (IFWI) file
+----------------------------------------------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of file to read into entry. This is either the
+        IFWI file itself, or a file that can be converted into one using a
+        tool
+    - convert-fit: If present this indicates that the ifwitool should be
+        used to convert the provided file into a IFWI.
+
+This file contains code and data used by the SoC that is required to make
+it work. It includes U-Boot TPL, microcode, things related to the CSE
+(Converged Security Engine, the microcontroller that loads all the firmware)
+and other items beyond the wit of man.
+
+A typical filename is 'ifwi.bin' for an IFWI file, or 'fitimage.bin' for a
+file that will be converted to an IFWI.
+
+The position of this entry is generally set by the intel-descriptor entry.
+
+The contents of the IFWI are specified by the subnodes of the IFWI node.
+Each subnode describes an entry which is placed into the IFWFI with a given
+sub-partition (and optional entry name).
+
+See README.x86 for information about x86 binary blobs.
+
+
+
 Entry: intel-me: Entry containing an Intel Management Engine (ME) file
 ----------------------------------------------------------------------
 
@@ -206,6 +443,8 @@ does not directly execute code in the ME binary.
 
 A typical filename is 'me.bin'.
 
+The position of this entry is generally set by the intel-descriptor entry.
+
 See README.x86 for information about x86 binary blobs.
 
 
@@ -282,16 +521,21 @@ Entry: section: Entry that contains other entries
 -------------------------------------------------
 
 Properties / Entry arguments: (see binman README for more information)
-    - size: Size of section in bytes
-    - align-size: Align size to a particular power of two
-    - pad-before: Add padding before the entry
-    - pad-after: Add padding after the entry
-    - pad-byte: Pad byte to use when padding
-    - sort-by-offset: Reorder the entries by offset
-    - end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
-    - name-prefix: Adds a prefix to the name of every entry in the section
+    pad-byte: Pad byte to use when padding
+    sort-by-offset: True if entries should be sorted by offset, False if
+        they must be in-order in the device tree description
+    end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
+    skip-at-start: Number of bytes before the first entry starts. These
+        effectively adjust the starting offset of entries. For example,
+        if this is 16, then the first entry would start at 16. An entry
+        with offset = 20 would in fact be written at offset 4 in the image
+        file, since the first 16 bytes are skipped when writing.
+    name-prefix: Adds a prefix to the name of every entry in the section
         when writing out the map
 
+Since a section is also an entry, it inherits all the properies of entries
+too.
+
 A section is an entry which can contain other entries, thus allowing
 hierarchical images to be created. See 'Sections and hierarchical images'
 in the binman README for more information.
@@ -310,6 +554,8 @@ Properties / Entry arguments:
         that contains the string to place in the entry
     <xxx> (actual name is the value of text-label): contains the string to
         place in the entry.
+    <text>: The text to place in the entry (overrides the above mechanism).
+        This is useful when the text is constant.
 
 Example node:
 
@@ -332,6 +578,13 @@ It is also possible to put the string directly in the node:
         message = "a message directly in the node"
     };
 
+or just:
+
+    text {
+        size = <8>;
+        text = "some text directly in the node"
+    };
+
 The text is not itself nul-terminated. This can be achieved, if required,
 by setting the size of the entry to something larger than the text.
 
@@ -485,7 +738,7 @@ Entry: u-boot-spl-elf: U-Boot SPL ELF image
 -------------------------------------------
 
 Properties / Entry arguments:
-    - filename: Filename of SPL u-boot (default 'spl/u-boot')
+    - filename: Filename of SPL u-boot (default 'spl/u-boot-spl')
 
 This is the U-Boot SPL ELF image. It does not include a device tree but can
 be relocated to any address for execution.
@@ -563,6 +816,17 @@ process.
 
 
 
+Entry: u-boot-tpl-elf: U-Boot TPL ELF image
+-------------------------------------------
+
+Properties / Entry arguments:
+    - filename: Filename of TPL u-boot (default 'tpl/u-boot-tpl')
+
+This is the U-Boot TPL ELF image. It does not include a device tree but can
+be relocated to any address for execution.
+
+
+
 Entry: u-boot-tpl-with-ucode-ptr: U-Boot TPL with embedded microcode pointer
 ----------------------------------------------------------------------------
 

+ 61 - 30
tools/binman/binman.py

@@ -11,23 +11,32 @@
 
 from __future__ import print_function
 
+from distutils.sysconfig import get_python_lib
 import glob
 import multiprocessing
 import os
+import site
 import sys
 import traceback
 import unittest
 
-# Bring in the patman and dtoc libraries
+# Bring in the patman and dtoc libraries (but don't override the first path
+# in PYTHONPATH)
 our_path = os.path.dirname(os.path.realpath(__file__))
 for dirname in ['../patman', '../dtoc', '..', '../concurrencytest']:
-    sys.path.insert(0, os.path.join(our_path, dirname))
+    sys.path.insert(2, os.path.join(our_path, dirname))
 
 # Bring in the libfdt module
-sys.path.insert(0, 'scripts/dtc/pylibfdt')
-sys.path.insert(0, os.path.join(our_path,
+sys.path.insert(2, 'scripts/dtc/pylibfdt')
+sys.path.insert(2, os.path.join(our_path,
                 '../../build-sandbox_spl/scripts/dtc/pylibfdt'))
 
+# When running under python-coverage on Ubuntu 16.04, the dist-packages
+# directories are dropped from the python path. Add them in so that we can find
+# the elffile module. We could use site.getsitepackages() here but unfortunately
+# that is not available in a virtualenv.
+sys.path.append(get_python_lib())
+
 import cmdline
 import command
 use_concurrent = True
@@ -38,15 +47,23 @@ except:
 import control
 import test_util
 
-def RunTests(debug, processes, args):
+def RunTests(debug, verbosity, processes, test_preserve_dirs, args, toolpath):
     """Run the functional tests and any embedded doctests
 
     Args:
         debug: True to enable debugging, which shows a full stack trace on error
-        args: List of positional args provided to binman. This can hold a test
-            name to execute (as in 'binman -t testSections', for example)
+        verbosity: Verbosity level to use
+        test_preserve_dirs: True to preserve the input directory used by tests
+            so that it can be examined afterwards (only useful for debugging
+            tests). If a single test is selected (in args[0]) it also preserves
+            the output directory for this test. Both directories are displayed
+            on the command line.
         processes: Number of processes to use to run tests (None=same as #CPUs)
+        args: List of positional args provided to binman. This can hold a test
+            name to execute (as in 'binman test testSections', for example)
+        toolpath: List of paths to use for tools
     """
+    import cbfs_util_test
     import elf_test
     import entry_test
     import fdt_test
@@ -63,8 +80,11 @@ def RunTests(debug, processes, args):
     sys.argv = [sys.argv[0]]
     if debug:
         sys.argv.append('-D')
-    if debug:
-        sys.argv.append('-D')
+    if verbosity:
+        sys.argv.append('-v%d' % verbosity)
+    if toolpath:
+        for path in toolpath:
+            sys.argv += ['--toolpath', path]
 
     # Run the entry tests first ,since these need to be the first to import the
     # 'entry' module.
@@ -72,7 +92,14 @@ def RunTests(debug, processes, args):
     suite = unittest.TestSuite()
     loader = unittest.TestLoader()
     for module in (entry_test.TestEntry, ftest.TestFunctional, fdt_test.TestFdt,
-                   elf_test.TestElf, image_test.TestImage):
+                   elf_test.TestElf, image_test.TestImage,
+                   cbfs_util_test.TestCbfs):
+        # Test the test module about our arguments, if it is interested
+        if hasattr(module, 'setup_test_args'):
+            setup_test_args = getattr(module, 'setup_test_args')
+            setup_test_args(preserve_indir=test_preserve_dirs,
+                preserve_outdirs=test_preserve_dirs and test_name is not None,
+                toolpath=toolpath, verbosity=verbosity)
         if test_name:
             try:
                 suite.addTests(loader.loadTestsFromName(test_name, module))
@@ -104,9 +131,14 @@ def RunTests(debug, processes, args):
         print(test.id(), err)
     for test, err in result.failures:
         print(err, result.failures)
+    if result.skipped:
+        print('%d binman test%s SKIPPED:' %
+              (len(result.skipped), 's' if len(result.skipped) > 1 else ''))
+        for skip_info in result.skipped:
+            print('%s: %s' % (skip_info[0], skip_info[1]))
     if result.errors or result.failures:
-      print('binman tests FAILED')
-      return 1
+        print('binman tests FAILED')
+        return 1
     return 0
 
 def GetEntryModules(include_testing=True):
@@ -127,38 +159,36 @@ def RunTestCoverage():
                    for item in glob_list if '_testing' not in item])
     test_util.RunTestCoverage('tools/binman/binman.py', None,
             ['*test*', '*binman.py', 'tools/patman/*', 'tools/dtoc/*'],
-            options.build_dir, all_set)
+            args.build_dir, all_set)
 
-def RunBinman(options, args):
+def RunBinman(args):
     """Main entry point to binman once arguments are parsed
 
     Args:
-        options: Command-line options
-        args: Non-option arguments
+        args: Command line arguments Namespace object
     """
     ret_code = 0
 
-    # For testing: This enables full exception traces.
-    #options.debug = True
-
-    if not options.debug:
+    if not args.debug:
         sys.tracebacklimit = 0
 
-    if options.test:
-        ret_code = RunTests(options.debug, options.processes, args[1:])
-
-    elif options.test_coverage:
-        RunTestCoverage()
+    if args.cmd == 'test':
+        if args.test_coverage:
+            RunTestCoverage()
+        else:
+            ret_code = RunTests(args.debug, args.verbosity, args.processes,
+                                args.test_preserve_dirs, args.tests,
+                                args.toolpath)
 
-    elif options.entry_docs:
+    elif args.cmd == 'entry-docs':
         control.WriteEntryDocs(GetEntryModules())
 
     else:
         try:
-            ret_code = control.Binman(options, args)
+            ret_code = control.Binman(args)
         except Exception as e:
             print('binman: %s' % e)
-            if options.debug:
+            if args.debug:
                 print()
                 traceback.print_exc()
             ret_code = 1
@@ -166,6 +196,7 @@ def RunBinman(options, args):
 
 
 if __name__ == "__main__":
-    (options, args) = cmdline.ParseArgs(sys.argv)
-    ret_code = RunBinman(options, args)
+    args = cmdline.ParseArgs(sys.argv[1:])
+
+    ret_code = RunBinman(args)
     sys.exit(ret_code)

+ 0 - 464
tools/binman/bsection.py

@@ -1,464 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0+
-# Copyright (c) 2018 Google, Inc
-# Written by Simon Glass <sjg@chromium.org>
-#
-# Base class for sections (collections of entries)
-#
-
-from __future__ import print_function
-
-from collections import OrderedDict
-import sys
-
-import fdt_util
-import re
-import state
-import tools
-
-class Section(object):
-    """A section which contains multiple entries
-
-    A section represents a collection of entries. There must be one or more
-    sections in an image. Sections are used to group entries together.
-
-    Attributes:
-        _node: Node object that contains the section definition in device tree
-        _parent_section: Parent Section object which created this Section
-        _size: Section size in bytes, or None if not known yet
-        _align_size: Section size alignment, or None
-        _pad_before: Number of bytes before the first entry starts. This
-            effectively changes the place where entry offset 0 starts
-        _pad_after: Number of bytes after the last entry ends. The last
-            entry will finish on or before this boundary
-        _pad_byte: Byte to use to pad the section where there is no entry
-        _sort: True if entries should be sorted by offset, False if they
-            must be in-order in the device tree description
-        _skip_at_start: Number of bytes before the first entry starts. These
-            effectively adjust the starting offset of entries. For example,
-            if _pad_before is 16, then the first entry would start at 16.
-            An entry with offset = 20 would in fact be written at offset 4
-            in the image file.
-        _end_4gb: Indicates that the section ends at the 4GB boundary. This is
-            used for x86 images, which want to use offsets such that a memory
-            address (like 0xff800000) is the first entry offset. This causes
-            _skip_at_start to be set to the starting memory address.
-        _name_prefix: Prefix to add to the name of all entries within this
-            section
-        _entries: OrderedDict() of entries
-    """
-    def __init__(self, name, parent_section, node, image, test=False):
-        global entry
-        global Entry
-        import entry
-        from entry import Entry
-
-        self._parent_section = parent_section
-        self._name = name
-        self._node = node
-        self._image = image
-        self._offset = None
-        self._size = None
-        self._align_size = None
-        self._pad_before = 0
-        self._pad_after = 0
-        self._pad_byte = 0
-        self._sort = False
-        self._skip_at_start = None
-        self._end_4gb = False
-        self._name_prefix = ''
-        self._entries = OrderedDict()
-        self._image_pos = None
-        if not test:
-            self._ReadNode()
-            self._ReadEntries()
-
-    def _ReadNode(self):
-        """Read properties from the section node"""
-        self._offset = fdt_util.GetInt(self._node, 'offset')
-        self._size = fdt_util.GetInt(self._node, 'size')
-        self._align_size = fdt_util.GetInt(self._node, 'align-size')
-        if tools.NotPowerOfTwo(self._align_size):
-            self._Raise("Alignment size %s must be a power of two" %
-                        self._align_size)
-        self._pad_before = fdt_util.GetInt(self._node, 'pad-before', 0)
-        self._pad_after = fdt_util.GetInt(self._node, 'pad-after', 0)
-        self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0)
-        self._sort = fdt_util.GetBool(self._node, 'sort-by-offset')
-        self._end_4gb = fdt_util.GetBool(self._node, 'end-at-4gb')
-        self._skip_at_start = fdt_util.GetInt(self._node, 'skip-at-start')
-        if self._end_4gb:
-            if not self._size:
-                self._Raise("Section size must be provided when using end-at-4gb")
-            if self._skip_at_start is not None:
-                self._Raise("Provide either 'end-at-4gb' or 'skip-at-start'")
-            else:
-                self._skip_at_start = 0x100000000 - self._size
-        else:
-            if self._skip_at_start is None:
-                self._skip_at_start = 0
-        self._name_prefix = fdt_util.GetString(self._node, 'name-prefix')
-
-    def _ReadEntries(self):
-        for node in self._node.subnodes:
-            if node.name == 'hash':
-                continue
-            entry = Entry.Create(self, node)
-            entry.SetPrefix(self._name_prefix)
-            self._entries[node.name] = entry
-
-    def GetFdtSet(self):
-        """Get the set of device tree files used by this image"""
-        fdt_set = set()
-        for entry in self._entries.values():
-            fdt_set.update(entry.GetFdtSet())
-        return fdt_set
-
-    def SetOffset(self, offset):
-        self._offset = offset
-
-    def ExpandEntries(self):
-        for entry in self._entries.values():
-            entry.ExpandEntries()
-
-    def AddMissingProperties(self):
-        """Add new properties to the device tree as needed for this entry"""
-        for prop in ['offset', 'size', 'image-pos']:
-            if not prop in self._node.props:
-                state.AddZeroProp(self._node, prop)
-        state.CheckAddHashProp(self._node)
-        for entry in self._entries.values():
-            entry.AddMissingProperties()
-
-    def SetCalculatedProperties(self):
-        state.SetInt(self._node, 'offset', self._offset or 0)
-        state.SetInt(self._node, 'size', self._size)
-        image_pos = self._image_pos
-        if self._parent_section:
-            image_pos -= self._parent_section.GetRootSkipAtStart()
-        state.SetInt(self._node, 'image-pos', image_pos)
-        for entry in self._entries.values():
-            entry.SetCalculatedProperties()
-
-    def ProcessFdt(self, fdt):
-        todo = self._entries.values()
-        for passnum in range(3):
-            next_todo = []
-            for entry in todo:
-                if not entry.ProcessFdt(fdt):
-                    next_todo.append(entry)
-            todo = next_todo
-            if not todo:
-                break
-        if todo:
-            self._Raise('Internal error: Could not complete processing of Fdt: '
-                        'remaining %s' % todo)
-        return True
-
-    def CheckSize(self):
-        """Check that the section contents does not exceed its size, etc."""
-        contents_size = 0
-        for entry in self._entries.values():
-            contents_size = max(contents_size, entry.offset + entry.size)
-
-        contents_size -= self._skip_at_start
-
-        size = self._size
-        if not size:
-            size = self._pad_before + contents_size + self._pad_after
-            size = tools.Align(size, self._align_size)
-
-        if self._size and contents_size > self._size:
-            self._Raise("contents size %#x (%d) exceeds section size %#x (%d)" %
-                       (contents_size, contents_size, self._size, self._size))
-        if not self._size:
-            self._size = size
-        if self._size != tools.Align(self._size, self._align_size):
-            self._Raise("Size %#x (%d) does not match align-size %#x (%d)" %
-                  (self._size, self._size, self._align_size, self._align_size))
-        return size
-
-    def _Raise(self, msg):
-        """Raises an error for this section
-
-        Args:
-            msg: Error message to use in the raise string
-        Raises:
-            ValueError()
-        """
-        raise ValueError("Section '%s': %s" % (self._node.path, msg))
-
-    def GetPath(self):
-        """Get the path of an image (in the FDT)
-
-        Returns:
-            Full path of the node for this image
-        """
-        return self._node.path
-
-    def FindEntryType(self, etype):
-        """Find an entry type in the section
-
-        Args:
-            etype: Entry type to find
-        Returns:
-            entry matching that type, or None if not found
-        """
-        for entry in self._entries.values():
-            if entry.etype == etype:
-                return entry
-        return None
-
-    def GetEntryContents(self):
-        """Call ObtainContents() for each entry
-
-        This calls each entry's ObtainContents() a few times until they all
-        return True. We stop calling an entry's function once it returns
-        True. This allows the contents of one entry to depend on another.
-
-        After 3 rounds we give up since it's likely an error.
-        """
-        todo = self._entries.values()
-        for passnum in range(3):
-            next_todo = []
-            for entry in todo:
-                if not entry.ObtainContents():
-                    next_todo.append(entry)
-            todo = next_todo
-            if not todo:
-                break
-        if todo:
-            self._Raise('Internal error: Could not complete processing of '
-                        'contents: remaining %s' % todo)
-        return True
-
-    def _SetEntryOffsetSize(self, name, offset, size):
-        """Set the offset and size of an entry
-
-        Args:
-            name: Entry name to update
-            offset: New offset
-            size: New size
-        """
-        entry = self._entries.get(name)
-        if not entry:
-            self._Raise("Unable to set offset/size for unknown entry '%s'" %
-                        name)
-        entry.SetOffsetSize(self._skip_at_start + offset, size)
-
-    def GetEntryOffsets(self):
-        """Handle entries that want to set the offset/size of other entries
-
-        This calls each entry's GetOffsets() method. If it returns a list
-        of entries to update, it updates them.
-        """
-        for entry in self._entries.values():
-            offset_dict = entry.GetOffsets()
-            for name, info in offset_dict.items():
-                self._SetEntryOffsetSize(name, *info)
-
-    def PackEntries(self):
-        """Pack all entries into the section"""
-        offset = self._skip_at_start
-        for entry in self._entries.values():
-            offset = entry.Pack(offset)
-        self._size = self.CheckSize()
-
-    def _SortEntries(self):
-        """Sort entries by offset"""
-        entries = sorted(self._entries.values(), key=lambda entry: entry.offset)
-        self._entries.clear()
-        for entry in entries:
-            self._entries[entry._node.name] = entry
-
-    def _ExpandEntries(self):
-        """Expand any entries that are permitted to"""
-        exp_entry = None
-        for entry in self._entries.values():
-            if exp_entry:
-                exp_entry.ExpandToLimit(entry.offset)
-                exp_entry = None
-            if entry.expand_size:
-                exp_entry = entry
-        if exp_entry:
-            exp_entry.ExpandToLimit(self._size)
-
-    def CheckEntries(self):
-        """Check that entries do not overlap or extend outside the section
-
-        This also sorts entries, if needed and expands
-        """
-        if self._sort:
-            self._SortEntries()
-        self._ExpandEntries()
-        offset = 0
-        prev_name = 'None'
-        for entry in self._entries.values():
-            entry.CheckOffset()
-            if (entry.offset < self._skip_at_start or
-                entry.offset + entry.size > self._skip_at_start + self._size):
-                entry.Raise("Offset %#x (%d) is outside the section starting "
-                            "at %#x (%d)" %
-                            (entry.offset, entry.offset, self._skip_at_start,
-                             self._skip_at_start))
-            if entry.offset < offset:
-                entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' "
-                            "ending at %#x (%d)" %
-                            (entry.offset, entry.offset, prev_name, offset, offset))
-            offset = entry.offset + entry.size
-            prev_name = entry.GetPath()
-
-    def SetImagePos(self, image_pos):
-        self._image_pos = image_pos
-        for entry in self._entries.values():
-            entry.SetImagePos(image_pos)
-
-    def ProcessEntryContents(self):
-        """Call the ProcessContents() method for each entry
-
-        This is intended to adjust the contents as needed by the entry type.
-        """
-        for entry in self._entries.values():
-            entry.ProcessContents()
-
-    def WriteSymbols(self):
-        """Write symbol values into binary files for access at run time"""
-        for entry in self._entries.values():
-            entry.WriteSymbols(self)
-
-    def BuildSection(self, fd, base_offset):
-        """Write the section to a file"""
-        fd.seek(base_offset)
-        fd.write(self.GetData())
-
-    def GetData(self):
-        """Get the contents of the section"""
-        section_data = tools.GetBytes(self._pad_byte, self._size)
-
-        for entry in self._entries.values():
-            data = entry.GetData()
-            base = self._pad_before + entry.offset - self._skip_at_start
-            section_data = (section_data[:base] + data +
-                            section_data[base + len(data):])
-        return section_data
-
-    def LookupSymbol(self, sym_name, optional, msg):
-        """Look up a symbol in an ELF file
-
-        Looks up a symbol in an ELF file. Only entry types which come from an
-        ELF image can be used by this function.
-
-        At present the only entry property supported is offset.
-
-        Args:
-            sym_name: Symbol name in the ELF file to look up in the format
-                _binman_<entry>_prop_<property> where <entry> is the name of
-                the entry and <property> is the property to find (e.g.
-                _binman_u_boot_prop_offset). As a special case, you can append
-                _any to <entry> to have it search for any matching entry. E.g.
-                _binman_u_boot_any_prop_offset will match entries called u-boot,
-                u-boot-img and u-boot-nodtb)
-            optional: True if the symbol is optional. If False this function
-                will raise if the symbol is not found
-            msg: Message to display if an error occurs
-
-        Returns:
-            Value that should be assigned to that symbol, or None if it was
-                optional and not found
-
-        Raises:
-            ValueError if the symbol is invalid or not found, or references a
-                property which is not supported
-        """
-        m = re.match(r'^_binman_(\w+)_prop_(\w+)$', sym_name)
-        if not m:
-            raise ValueError("%s: Symbol '%s' has invalid format" %
-                             (msg, sym_name))
-        entry_name, prop_name = m.groups()
-        entry_name = entry_name.replace('_', '-')
-        entry = self._entries.get(entry_name)
-        if not entry:
-            if entry_name.endswith('-any'):
-                root = entry_name[:-4]
-                for name in self._entries:
-                    if name.startswith(root):
-                        rest = name[len(root):]
-                        if rest in ['', '-img', '-nodtb']:
-                            entry = self._entries[name]
-        if not entry:
-            err = ("%s: Entry '%s' not found in list (%s)" %
-                   (msg, entry_name, ','.join(self._entries.keys())))
-            if optional:
-                print('Warning: %s' % err, file=sys.stderr)
-                return None
-            raise ValueError(err)
-        if prop_name == 'offset':
-            return entry.offset
-        elif prop_name == 'image_pos':
-            return entry.image_pos
-        else:
-            raise ValueError("%s: No such property '%s'" % (msg, prop_name))
-
-    def GetEntries(self):
-        """Get the number of entries in a section
-
-        Returns:
-            Number of entries in a section
-        """
-        return self._entries
-
-    def GetSize(self):
-        """Get the size of a section in bytes
-
-        This is only meaningful if the section has a pre-defined size, or the
-        entries within it have been packed, so that the size has been
-        calculated.
-
-        Returns:
-            Entry size in bytes
-        """
-        return self._size
-
-    def WriteMap(self, fd, indent):
-        """Write a map of the section to a .map file
-
-        Args:
-            fd: File to write the map to
-        """
-        Entry.WriteMapLine(fd, indent, self._name, self._offset or 0,
-                           self._size, self._image_pos)
-        for entry in self._entries.values():
-            entry.WriteMap(fd, indent + 1)
-
-    def GetContentsByPhandle(self, phandle, source_entry):
-        """Get the data contents of an entry specified by a phandle
-
-        This uses a phandle to look up a node and and find the entry
-        associated with it. Then it returnst he contents of that entry.
-
-        Args:
-            phandle: Phandle to look up (integer)
-            source_entry: Entry containing that phandle (used for error
-                reporting)
-
-        Returns:
-            data from associated entry (as a string), or None if not found
-        """
-        node = self._node.GetFdt().LookupPhandle(phandle)
-        if not node:
-            source_entry.Raise("Cannot find node for phandle %d" % phandle)
-        for entry in self._entries.values():
-            if entry._node == node:
-                return entry.GetData()
-        source_entry.Raise("Cannot find entry for node '%s'" % node.name)
-
-    def ExpandSize(self, size):
-        if size != self._size:
-            self._size = size
-
-    def GetRootSkipAtStart(self):
-        if self._parent_section:
-            return self._parent_section.GetRootSkipAtStart()
-        return self._skip_at_start
-
-    def GetImageSize(self):
-        return self._image._size

+ 887 - 0
tools/binman/cbfs_util.py

@@ -0,0 +1,887 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Support for coreboot's CBFS format
+
+CBFS supports a header followed by a number of files, generally targeted at SPI
+flash.
+
+The format is somewhat defined by documentation in the coreboot tree although
+it is necessary to rely on the C structures and source code (mostly cbfstool)
+to fully understand it.
+
+Currently supported: raw and stage types with compression, padding empty areas
+    with empty files, fixed-offset files
+"""
+
+from __future__ import print_function
+
+from collections import OrderedDict
+import io
+import struct
+import sys
+
+import command
+import elf
+import tools
+
+# Set to True to enable printing output while working
+DEBUG = False
+
+# Set to True to enable output from running cbfstool for debugging
+VERBOSE = False
+
+# The master header, at the start of the CBFS
+HEADER_FORMAT      = '>IIIIIIII'
+HEADER_LEN         = 0x20
+HEADER_MAGIC       = 0x4f524243
+HEADER_VERSION1    = 0x31313131
+HEADER_VERSION2    = 0x31313132
+
+# The file header, at the start of each file in the CBFS
+FILE_HEADER_FORMAT = b'>8sIIII'
+FILE_HEADER_LEN    = 0x18
+FILE_MAGIC         = b'LARCHIVE'
+FILENAME_ALIGN     = 16  # Filename lengths are aligned to this
+
+# A stage header containing information about 'stage' files
+# Yes this is correct: this header is in litte-endian format
+STAGE_FORMAT       = '<IQQII'
+STAGE_LEN          = 0x1c
+
+# An attribute describring the compression used in a file
+ATTR_COMPRESSION_FORMAT = '>IIII'
+ATTR_COMPRESSION_LEN = 0x10
+
+# Attribute tags
+# Depending on how the header was initialised, it may be backed with 0x00 or
+# 0xff. Support both.
+FILE_ATTR_TAG_UNUSED        = 0
+FILE_ATTR_TAG_UNUSED2       = 0xffffffff
+FILE_ATTR_TAG_COMPRESSION   = 0x42435a4c
+FILE_ATTR_TAG_HASH          = 0x68736148
+FILE_ATTR_TAG_POSITION      = 0x42435350  # PSCB
+FILE_ATTR_TAG_ALIGNMENT     = 0x42434c41  # ALCB
+FILE_ATTR_TAG_PADDING       = 0x47444150  # PDNG
+
+# This is 'the size of bootblock reserved in firmware image (cbfs.txt)'
+# Not much more info is available, but we set it to 4, due to this comment in
+# cbfstool.c:
+# This causes 4 bytes to be left out at the end of the image, for two reasons:
+# 1. The cbfs master header pointer resides there
+# 2. Ssme cbfs implementations assume that an image that resides below 4GB has
+#    a bootblock and get confused when the end of the image is at 4GB == 0.
+MIN_BOOTBLOCK_SIZE     = 4
+
+# Files start aligned to this boundary in the CBFS
+ENTRY_ALIGN    = 0x40
+
+# CBFSs must declare an architecture since much of the logic is designed with
+# x86 in mind. The effect of setting this value is not well documented, but in
+# general x86 is used and this makes use of a boot block and an image that ends
+# at the end of 32-bit address space.
+ARCHITECTURE_UNKNOWN  = 0xffffffff
+ARCHITECTURE_X86      = 0x00000001
+ARCHITECTURE_ARM      = 0x00000010
+ARCHITECTURE_AARCH64  = 0x0000aa64
+ARCHITECTURE_MIPS     = 0x00000100
+ARCHITECTURE_RISCV    = 0xc001d0de
+ARCHITECTURE_PPC64    = 0x407570ff
+
+ARCH_NAMES = {
+    ARCHITECTURE_UNKNOWN  : 'unknown',
+    ARCHITECTURE_X86      : 'x86',
+    ARCHITECTURE_ARM      : 'arm',
+    ARCHITECTURE_AARCH64  : 'arm64',
+    ARCHITECTURE_MIPS     : 'mips',
+    ARCHITECTURE_RISCV    : 'riscv',
+    ARCHITECTURE_PPC64    : 'ppc64',
+    }
+
+# File types. Only supported ones are included here
+TYPE_CBFSHEADER     = 0x02   # Master header, HEADER_FORMAT
+TYPE_STAGE          = 0x10   # Stage, holding an executable, see STAGE_FORMAT
+TYPE_RAW            = 0x50   # Raw file, possibly compressed
+TYPE_EMPTY          = 0xffffffff     # Empty data
+
+# Compression types
+COMPRESS_NONE, COMPRESS_LZMA, COMPRESS_LZ4 = range(3)
+
+COMPRESS_NAMES = {
+    COMPRESS_NONE : 'none',
+    COMPRESS_LZMA : 'lzma',
+    COMPRESS_LZ4  : 'lz4',
+    }
+
+def find_arch(find_name):
+    """Look up an architecture name
+
+    Args:
+        find_name: Architecture name to find
+
+    Returns:
+        ARCHITECTURE_... value or None if not found
+    """
+    for arch, name in ARCH_NAMES.items():
+        if name == find_name:
+            return arch
+    return None
+
+def find_compress(find_name):
+    """Look up a compression algorithm name
+
+    Args:
+        find_name: Compression algorithm name to find
+
+    Returns:
+        COMPRESS_... value or None if not found
+    """
+    for compress, name in COMPRESS_NAMES.items():
+        if name == find_name:
+            return compress
+    return None
+
+def compress_name(compress):
+    """Look up the name of a compression algorithm
+
+    Args:
+        compress: Compression algorithm number to find (COMPRESS_...)
+
+    Returns:
+        Compression algorithm name (string)
+
+    Raises:
+        KeyError if the algorithm number is invalid
+    """
+    return COMPRESS_NAMES[compress]
+
+def align_int(val, align):
+    """Align a value up to the given alignment
+
+    Args:
+        val: Integer value to align
+        align: Integer alignment value (e.g. 4 to align to 4-byte boundary)
+
+    Returns:
+        integer value aligned to the required boundary, rounding up if necessary
+    """
+    return int((val + align - 1) / align) * align
+
+def align_int_down(val, align):
+    """Align a value down to the given alignment
+
+    Args:
+        val: Integer value to align
+        align: Integer alignment value (e.g. 4 to align to 4-byte boundary)
+
+    Returns:
+        integer value aligned to the required boundary, rounding down if
+            necessary
+    """
+    return int(val / align) * align
+
+def _pack_string(instr):
+    """Pack a string to the required aligned size by adding padding
+
+    Args:
+        instr: String to process
+
+    Returns:
+        String with required padding (at least one 0x00 byte) at the end
+    """
+    val = tools.ToBytes(instr)
+    pad_len = align_int(len(val) + 1, FILENAME_ALIGN)
+    return val + tools.GetBytes(0, pad_len - len(val))
+
+
+class CbfsFile(object):
+    """Class to represent a single CBFS file
+
+    This is used to hold the information about a file, including its contents.
+    Use the get_data_and_offset() method to obtain the raw output for writing to
+    CBFS.
+
+    Properties:
+        name: Name of file
+        offset: Offset of file data from start of file header
+        cbfs_offset: Offset of file data in bytes from start of CBFS, or None to
+            place this file anyway
+        data: Contents of file, uncompressed
+        data_len: Length of (possibly compressed) data in bytes
+        ftype: File type (TYPE_...)
+        compression: Compression type (COMPRESS_...)
+        memlen: Length of data in memory, i.e. the uncompressed length, None if
+            no compression algortihm is selected
+        load: Load address in memory if known, else None
+        entry: Entry address in memory if known, else None. This is where
+            execution starts after the file is loaded
+        base_address: Base address to use for 'stage' files
+        erase_byte: Erase byte to use for padding between the file header and
+            contents (used for empty files)
+        size: Size of the file in bytes (used for empty files)
+    """
+    def __init__(self, name, ftype, data, cbfs_offset, compress=COMPRESS_NONE):
+        self.name = name
+        self.offset = None
+        self.cbfs_offset = cbfs_offset
+        self.data = data
+        self.ftype = ftype
+        self.compress = compress
+        self.memlen = None
+        self.load = None
+        self.entry = None
+        self.base_address = None
+        self.data_len = len(data)
+        self.erase_byte = None
+        self.size = None
+
+    def decompress(self):
+        """Handle decompressing data if necessary"""
+        indata = self.data
+        if self.compress == COMPRESS_LZ4:
+            data = tools.Decompress(indata, 'lz4')
+        elif self.compress == COMPRESS_LZMA:
+            data = tools.Decompress(indata, 'lzma')
+        else:
+            data = indata
+        self.memlen = len(data)
+        self.data = data
+        self.data_len = len(indata)
+
+    @classmethod
+    def stage(cls, base_address, name, data, cbfs_offset):
+        """Create a new stage file
+
+        Args:
+            base_address: Int base address for memory-mapping of ELF file
+            name: String file name to put in CBFS (does not need to correspond
+                to the name that the file originally came from)
+            data: Contents of file
+            cbfs_offset: Offset of file data in bytes from start of CBFS, or
+                None to place this file anyway
+
+        Returns:
+            CbfsFile object containing the file information
+        """
+        cfile = CbfsFile(name, TYPE_STAGE, data, cbfs_offset)
+        cfile.base_address = base_address
+        return cfile
+
+    @classmethod
+    def raw(cls, name, data, cbfs_offset, compress):
+        """Create a new raw file
+
+        Args:
+            name: String file name to put in CBFS (does not need to correspond
+                to the name that the file originally came from)
+            data: Contents of file
+            cbfs_offset: Offset of file data in bytes from start of CBFS, or
+                None to place this file anyway
+            compress: Compression algorithm to use (COMPRESS_...)
+
+        Returns:
+            CbfsFile object containing the file information
+        """
+        return CbfsFile(name, TYPE_RAW, data, cbfs_offset, compress)
+
+    @classmethod
+    def empty(cls, space_to_use, erase_byte):
+        """Create a new empty file of a given size
+
+        Args:
+            space_to_use:: Size of available space, which must be at least as
+                large as the alignment size for this CBFS
+            erase_byte: Byte to use for contents of file (repeated through the
+                whole file)
+
+        Returns:
+            CbfsFile object containing the file information
+        """
+        cfile = CbfsFile('', TYPE_EMPTY, b'', None)
+        cfile.size = space_to_use - FILE_HEADER_LEN - FILENAME_ALIGN
+        cfile.erase_byte = erase_byte
+        return cfile
+
+    def calc_start_offset(self):
+        """Check if this file needs to start at a particular offset in CBFS
+
+        Returns:
+            None if the file can be placed anywhere, or
+            the largest offset where the file could start (integer)
+        """
+        if self.cbfs_offset is None:
+            return None
+        return self.cbfs_offset - self.get_header_len()
+
+    def get_header_len(self):
+        """Get the length of headers required for a file
+
+        This is the minimum length required before the actual data for this file
+        could start. It might start later if there is padding.
+
+        Returns:
+            Total length of all non-data fields, in bytes
+        """
+        name = _pack_string(self.name)
+        hdr_len = len(name) + FILE_HEADER_LEN
+        if self.ftype == TYPE_STAGE:
+            pass
+        elif self.ftype == TYPE_RAW:
+            hdr_len += ATTR_COMPRESSION_LEN
+        elif self.ftype == TYPE_EMPTY:
+            pass
+        else:
+            raise ValueError('Unknown file type %#x\n' % self.ftype)
+        return hdr_len
+
+    def get_data_and_offset(self, offset=None, pad_byte=None):
+        """Obtain the contents of the file, in CBFS format and the offset of
+        the data within the file
+
+        Returns:
+            tuple:
+                bytes representing the contents of this file, packed and aligned
+                    for directly inserting into the final CBFS output
+                offset to the file data from the start of the returned data.
+        """
+        name = _pack_string(self.name)
+        hdr_len = len(name) + FILE_HEADER_LEN
+        attr_pos = 0
+        content = b''
+        attr = b''
+        pad = b''
+        data = self.data
+        if self.ftype == TYPE_STAGE:
+            elf_data = elf.DecodeElf(data, self.base_address)
+            content = struct.pack(STAGE_FORMAT, self.compress,
+                                  elf_data.entry, elf_data.load,
+                                  len(elf_data.data), elf_data.memsize)
+            data = elf_data.data
+        elif self.ftype == TYPE_RAW:
+            orig_data = data
+            if self.compress == COMPRESS_LZ4:
+                data = tools.Compress(orig_data, 'lz4')
+            elif self.compress == COMPRESS_LZMA:
+                data = tools.Compress(orig_data, 'lzma')
+            self.memlen = len(orig_data)
+            self.data_len = len(data)
+            attr = struct.pack(ATTR_COMPRESSION_FORMAT,
+                               FILE_ATTR_TAG_COMPRESSION, ATTR_COMPRESSION_LEN,
+                               self.compress, self.memlen)
+        elif self.ftype == TYPE_EMPTY:
+            data = tools.GetBytes(self.erase_byte, self.size)
+        else:
+            raise ValueError('Unknown type %#x when writing\n' % self.ftype)
+        if attr:
+            attr_pos = hdr_len
+            hdr_len += len(attr)
+        if self.cbfs_offset is not None:
+            pad_len = self.cbfs_offset - offset - hdr_len
+            if pad_len < 0:  # pragma: no cover
+                # Test coverage of this is not available since this should never
+                # happen. It indicates that get_header_len() provided an
+                # incorrect value (too small) so that we decided that we could
+                # put this file at the requested place, but in fact a previous
+                # file extends far enough into the CBFS that this is not
+                # possible.
+                raise ValueError("Internal error: CBFS file '%s': Requested offset %#x but current output position is %#x" %
+                                 (self.name, self.cbfs_offset, offset))
+            pad = tools.GetBytes(pad_byte, pad_len)
+            hdr_len += pad_len
+
+        # This is the offset of the start of the file's data,
+        size = len(content) + len(data)
+        hdr = struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, size,
+                          self.ftype, attr_pos, hdr_len)
+
+        # Do a sanity check of the get_header_len() function, to ensure that it
+        # stays in lockstep with this function
+        expected_len = self.get_header_len()
+        actual_len = len(hdr + name + attr)
+        if expected_len != actual_len:  # pragma: no cover
+            # Test coverage of this is not available since this should never
+            # happen. It probably indicates that get_header_len() is broken.
+            raise ValueError("Internal error: CBFS file '%s': Expected headers of %#x bytes, got %#d" %
+                             (self.name, expected_len, actual_len))
+        return hdr + name + attr + pad + content + data, hdr_len
+
+
+class CbfsWriter(object):
+    """Class to handle writing a Coreboot File System (CBFS)
+
+    Usage is something like:
+
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', tools.ReadFile('u-boot.bin'))
+        ...
+        data, cbfs_offset = cbw.get_data_and_offset()
+
+    Attributes:
+        _master_name: Name of the file containing the master header
+        _size: Size of the filesystem, in bytes
+        _files: Ordered list of files in the CBFS, each a CbfsFile
+        _arch: Architecture of the CBFS (ARCHITECTURE_...)
+        _bootblock_size: Size of the bootblock, typically at the end of the CBFS
+        _erase_byte: Byte to use for empty space in the CBFS
+        _align: Alignment to use for files, typically ENTRY_ALIGN
+        _base_address: Boot block offset in bytes from the start of CBFS.
+            Typically this is located at top of the CBFS. It is 0 when there is
+            no boot block
+        _header_offset: Offset of master header in bytes from start of CBFS
+        _contents_offset: Offset of first file header
+        _hdr_at_start: True if the master header is at the start of the CBFS,
+            instead of the end as normal for x86
+        _add_fileheader: True to add a fileheader around the master header
+    """
+    def __init__(self, size, arch=ARCHITECTURE_X86):
+        """Set up a new CBFS
+
+        This sets up all properties to default values. Files can be added using
+        add_file_raw(), etc.
+
+        Args:
+            size: Size of CBFS in bytes
+            arch: Architecture to declare for CBFS
+        """
+        self._master_name = 'cbfs master header'
+        self._size = size
+        self._files = OrderedDict()
+        self._arch = arch
+        self._bootblock_size = 0
+        self._erase_byte = 0xff
+        self._align = ENTRY_ALIGN
+        self._add_fileheader = False
+        if self._arch == ARCHITECTURE_X86:
+            # Allow 4 bytes for the header pointer. That holds the
+            # twos-compliment negative offset of the master header in bytes
+            # measured from one byte past the end of the CBFS
+            self._base_address = self._size - max(self._bootblock_size,
+                                                  MIN_BOOTBLOCK_SIZE)
+            self._header_offset = self._base_address - HEADER_LEN
+            self._contents_offset = 0
+            self._hdr_at_start = False
+        else:
+            # For non-x86, different rules apply
+            self._base_address = 0
+            self._header_offset = align_int(self._base_address +
+                                            self._bootblock_size, 4)
+            self._contents_offset = align_int(self._header_offset +
+                                              FILE_HEADER_LEN +
+                                              self._bootblock_size, self._align)
+            self._hdr_at_start = True
+
+    def _skip_to(self, fd, offset):
+        """Write out pad bytes until a given offset
+
+        Args:
+            fd: File objext to write to
+            offset: Offset to write to
+        """
+        if fd.tell() > offset:
+            raise ValueError('No space for data before offset %#x (current offset %#x)' %
+                             (offset, fd.tell()))
+        fd.write(tools.GetBytes(self._erase_byte, offset - fd.tell()))
+
+    def _pad_to(self, fd, offset):
+        """Write out pad bytes and/or an empty file until a given offset
+
+        Args:
+            fd: File objext to write to
+            offset: Offset to write to
+        """
+        self._align_to(fd, self._align)
+        upto = fd.tell()
+        if upto > offset:
+            raise ValueError('No space for data before pad offset %#x (current offset %#x)' %
+                             (offset, upto))
+        todo = align_int_down(offset - upto, self._align)
+        if todo:
+            cbf = CbfsFile.empty(todo, self._erase_byte)
+            fd.write(cbf.get_data_and_offset()[0])
+        self._skip_to(fd, offset)
+
+    def _align_to(self, fd, align):
+        """Write out pad bytes until a given alignment is reached
+
+        This only aligns if the resulting output would not reach the end of the
+        CBFS, since we want to leave the last 4 bytes for the master-header
+        pointer.
+
+        Args:
+            fd: File objext to write to
+            align: Alignment to require (e.g. 4 means pad to next 4-byte
+                boundary)
+        """
+        offset = align_int(fd.tell(), align)
+        if offset < self._size:
+            self._skip_to(fd, offset)
+
+    def add_file_stage(self, name, data, cbfs_offset=None):
+        """Add a new stage file to the CBFS
+
+        Args:
+            name: String file name to put in CBFS (does not need to correspond
+                to the name that the file originally came from)
+            data: Contents of file
+            cbfs_offset: Offset of this file's data within the CBFS, in bytes,
+                or None to place this file anywhere
+
+        Returns:
+            CbfsFile object created
+        """
+        cfile = CbfsFile.stage(self._base_address, name, data, cbfs_offset)
+        self._files[name] = cfile
+        return cfile
+
+    def add_file_raw(self, name, data, cbfs_offset=None,
+                     compress=COMPRESS_NONE):
+        """Create a new raw file
+
+        Args:
+            name: String file name to put in CBFS (does not need to correspond
+                to the name that the file originally came from)
+            data: Contents of file
+            cbfs_offset: Offset of this file's data within the CBFS, in bytes,
+                or None to place this file anywhere
+            compress: Compression algorithm to use (COMPRESS_...)
+
+        Returns:
+            CbfsFile object created
+        """
+        cfile = CbfsFile.raw(name, data, cbfs_offset, compress)
+        self._files[name] = cfile
+        return cfile
+
+    def _write_header(self, fd, add_fileheader):
+        """Write out the master header to a CBFS
+
+        Args:
+            fd: File object
+            add_fileheader: True to place the master header in a file header
+                record
+        """
+        if fd.tell() > self._header_offset:
+            raise ValueError('No space for header at offset %#x (current offset %#x)' %
+                             (self._header_offset, fd.tell()))
+        if not add_fileheader:
+            self._pad_to(fd, self._header_offset)
+        hdr = struct.pack(HEADER_FORMAT, HEADER_MAGIC, HEADER_VERSION2,
+                          self._size, self._bootblock_size, self._align,
+                          self._contents_offset, self._arch, 0xffffffff)
+        if add_fileheader:
+            name = _pack_string(self._master_name)
+            fd.write(struct.pack(FILE_HEADER_FORMAT, FILE_MAGIC, len(hdr),
+                                 TYPE_CBFSHEADER, 0,
+                                 FILE_HEADER_LEN + len(name)))
+            fd.write(name)
+            self._header_offset = fd.tell()
+            fd.write(hdr)
+            self._align_to(fd, self._align)
+        else:
+            fd.write(hdr)
+
+    def get_data(self):
+        """Obtain the full contents of the CBFS
+
+        Thhis builds the CBFS with headers and all required files.
+
+        Returns:
+            'bytes' type containing the data
+        """
+        fd = io.BytesIO()
+
+        # THe header can go at the start in some cases
+        if self._hdr_at_start:
+            self._write_header(fd, add_fileheader=self._add_fileheader)
+        self._skip_to(fd, self._contents_offset)
+
+        # Write out each file
+        for cbf in self._files.values():
+            # Place the file at its requested place, if any
+            offset = cbf.calc_start_offset()
+            if offset is not None:
+                self._pad_to(fd, align_int_down(offset, self._align))
+            pos = fd.tell()
+            data, data_offset = cbf.get_data_and_offset(pos, self._erase_byte)
+            fd.write(data)
+            self._align_to(fd, self._align)
+            cbf.calced_cbfs_offset = pos + data_offset
+        if not self._hdr_at_start:
+            self._write_header(fd, add_fileheader=self._add_fileheader)
+
+        # Pad to the end and write a pointer to the CBFS master header
+        self._pad_to(fd, self._base_address or self._size - 4)
+        rel_offset = self._header_offset - self._size
+        fd.write(struct.pack('<I', rel_offset & 0xffffffff))
+
+        return fd.getvalue()
+
+
+class CbfsReader(object):
+    """Class to handle reading a Coreboot File System (CBFS)
+
+    Usage is something like:
+        cbfs = cbfs_util.CbfsReader(data)
+        cfile = cbfs.files['u-boot']
+        self.WriteFile('u-boot.bin', cfile.data)
+
+    Attributes:
+        files: Ordered list of CbfsFile objects
+        align: Alignment to use for files, typically ENTRT_ALIGN
+        stage_base_address: Base address to use when mapping ELF files into the
+            CBFS for TYPE_STAGE files. If this is larger than the code address
+            of the ELF file, then data at the start of the ELF file will not
+            appear in the CBFS. Currently there are no tests for behaviour as
+            documentation is sparse
+        magic: Integer magic number from master header (HEADER_MAGIC)
+        version: Version number of CBFS (HEADER_VERSION2)
+        rom_size: Size of CBFS
+        boot_block_size: Size of boot block
+        cbfs_offset: Offset of the first file in bytes from start of CBFS
+        arch: Architecture of CBFS file (ARCHITECTURE_...)
+    """
+    def __init__(self, data, read=True):
+        self.align = ENTRY_ALIGN
+        self.arch = None
+        self.boot_block_size = None
+        self.cbfs_offset = None
+        self.files = OrderedDict()
+        self.magic = None
+        self.rom_size = None
+        self.stage_base_address = 0
+        self.version = None
+        self.data = data
+        if read:
+            self.read()
+
+    def read(self):
+        """Read all the files in the CBFS and add them to self.files"""
+        with io.BytesIO(self.data) as fd:
+            # First, get the master header
+            if not self._find_and_read_header(fd, len(self.data)):
+                raise ValueError('Cannot find master header')
+            fd.seek(self.cbfs_offset)
+
+            # Now read in the files one at a time
+            while True:
+                cfile = self._read_next_file(fd)
+                if cfile:
+                    self.files[cfile.name] = cfile
+                elif cfile is False:
+                    break
+
+    def _find_and_read_header(self, fd, size):
+        """Find and read the master header in the CBFS
+
+        This looks at the pointer word at the very end of the CBFS. This is an
+        offset to the header relative to the size of the CBFS, which is assumed
+        to be known. Note that the offset is in *little endian* format.
+
+        Args:
+            fd: File to read from
+            size: Size of file
+
+        Returns:
+            True if header was found, False if not
+        """
+        orig_pos = fd.tell()
+        fd.seek(size - 4)
+        rel_offset, = struct.unpack('<I', fd.read(4))
+        pos = (size + rel_offset) & 0xffffffff
+        fd.seek(pos)
+        found = self._read_header(fd)
+        if not found:
+            print('Relative offset seems wrong, scanning whole image')
+            for pos in range(0, size - HEADER_LEN, 4):
+                fd.seek(pos)
+                found = self._read_header(fd)
+                if found:
+                    break
+        fd.seek(orig_pos)
+        return found
+
+    def _read_next_file(self, fd):
+        """Read the next file from a CBFS
+
+        Args:
+            fd: File to read from
+
+        Returns:
+            CbfsFile object, if found
+            None if no object found, but data was parsed (e.g. TYPE_CBFSHEADER)
+            False if at end of CBFS and reading should stop
+        """
+        file_pos = fd.tell()
+        data = fd.read(FILE_HEADER_LEN)
+        if len(data) < FILE_HEADER_LEN:
+            print('File header at %x ran out of data' % file_pos)
+            return False
+        magic, size, ftype, attr, offset = struct.unpack(FILE_HEADER_FORMAT,
+                                                         data)
+        if magic != FILE_MAGIC:
+            return False
+        pos = fd.tell()
+        name = self._read_string(fd)
+        if name is None:
+            print('String at %x ran out of data' % pos)
+            return False
+
+        if DEBUG:
+            print('name', name)
+
+        # If there are attribute headers present, read those
+        compress = self._read_attr(fd, file_pos, attr, offset)
+        if compress is None:
+            return False
+
+        # Create the correct CbfsFile object depending on the type
+        cfile = None
+        cbfs_offset = file_pos + offset
+        fd.seek(cbfs_offset, io.SEEK_SET)
+        if ftype == TYPE_CBFSHEADER:
+            self._read_header(fd)
+        elif ftype == TYPE_STAGE:
+            data = fd.read(STAGE_LEN)
+            cfile = CbfsFile.stage(self.stage_base_address, name, b'',
+                                   cbfs_offset)
+            (cfile.compress, cfile.entry, cfile.load, cfile.data_len,
+             cfile.memlen) = struct.unpack(STAGE_FORMAT, data)
+            cfile.data = fd.read(cfile.data_len)
+        elif ftype == TYPE_RAW:
+            data = fd.read(size)
+            cfile = CbfsFile.raw(name, data, cbfs_offset, compress)
+            cfile.decompress()
+            if DEBUG:
+                print('data', data)
+        elif ftype == TYPE_EMPTY:
+            # Just read the data and discard it, since it is only padding
+            fd.read(size)
+            cfile = CbfsFile('', TYPE_EMPTY, b'', cbfs_offset)
+        else:
+            raise ValueError('Unknown type %#x when reading\n' % ftype)
+        if cfile:
+            cfile.offset = offset
+
+        # Move past the padding to the start of a possible next file. If we are
+        # already at an alignment boundary, then there is no padding.
+        pad = (self.align - fd.tell() % self.align) % self.align
+        fd.seek(pad, io.SEEK_CUR)
+        return cfile
+
+    @classmethod
+    def _read_attr(cls, fd, file_pos, attr, offset):
+        """Read attributes from the file
+
+        CBFS files can have attributes which are things that cannot fit into the
+        header. The only attributes currently supported are compression and the
+        unused tag.
+
+        Args:
+            fd: File to read from
+            file_pos: Position of file in fd
+            attr: Offset of attributes, 0 if none
+            offset: Offset of file data (used to indicate the end of the
+                                         attributes)
+
+        Returns:
+            Compression to use for the file (COMPRESS_...)
+        """
+        compress = COMPRESS_NONE
+        if not attr:
+            return compress
+        attr_size = offset - attr
+        fd.seek(file_pos + attr, io.SEEK_SET)
+        while attr_size:
+            pos = fd.tell()
+            hdr = fd.read(8)
+            if len(hdr) < 8:
+                print('Attribute tag at %x ran out of data' % pos)
+                return None
+            atag, alen = struct.unpack(">II", hdr)
+            data = hdr + fd.read(alen - 8)
+            if atag == FILE_ATTR_TAG_COMPRESSION:
+                # We don't currently use this information
+                atag, alen, compress, _decomp_size = struct.unpack(
+                    ATTR_COMPRESSION_FORMAT, data)
+            elif atag == FILE_ATTR_TAG_UNUSED2:
+                break
+            else:
+                print('Unknown attribute tag %x' % atag)
+            attr_size -= len(data)
+        return compress
+
+    def _read_header(self, fd):
+        """Read the master header
+
+        Reads the header and stores the information obtained into the member
+        variables.
+
+        Args:
+            fd: File to read from
+
+        Returns:
+            True if header was read OK, False if it is truncated or has the
+                wrong magic or version
+        """
+        pos = fd.tell()
+        data = fd.read(HEADER_LEN)
+        if len(data) < HEADER_LEN:
+            print('Header at %x ran out of data' % pos)
+            return False
+        (self.magic, self.version, self.rom_size, self.boot_block_size,
+         self.align, self.cbfs_offset, self.arch, _) = struct.unpack(
+             HEADER_FORMAT, data)
+        return self.magic == HEADER_MAGIC and (
+            self.version == HEADER_VERSION1 or
+            self.version == HEADER_VERSION2)
+
+    @classmethod
+    def _read_string(cls, fd):
+        """Read a string from a file
+
+        This reads a string and aligns the data to the next alignment boundary
+
+        Args:
+            fd: File to read from
+
+        Returns:
+            string read ('str' type) encoded to UTF-8, or None if we ran out of
+                data
+        """
+        val = b''
+        while True:
+            data = fd.read(FILENAME_ALIGN)
+            if len(data) < FILENAME_ALIGN:
+                return None
+            pos = data.find(b'\0')
+            if pos == -1:
+                val += data
+            else:
+                val += data[:pos]
+                break
+        return val.decode('utf-8')
+
+
+def cbfstool(fname, *cbfs_args, **kwargs):
+    """Run cbfstool with provided arguments
+
+    If the tool fails then this function raises an exception and prints out the
+    output and stderr.
+
+    Args:
+        fname: Filename of CBFS
+        *cbfs_args: List of arguments to pass to cbfstool
+
+    Returns:
+        CommandResult object containing the results
+    """
+    args = ['cbfstool', fname] + list(cbfs_args)
+    if kwargs.get('base') is not None:
+        args += ['-b', '%#x' % kwargs['base']]
+    result = command.RunPipe([args], capture=not VERBOSE,
+                             capture_stderr=not VERBOSE, raise_on_error=False)
+    if result.return_code:
+        print(result.stderr, file=sys.stderr)
+        raise Exception("Failed to run (error %d): '%s'" %
+                        (result.return_code, ' '.join(args)))

+ 625 - 0
tools/binman/cbfs_util_test.py

@@ -0,0 +1,625 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Tests for cbfs_util
+
+These create and read various CBFSs and compare the results with expected
+values and with cbfstool
+"""
+
+from __future__ import print_function
+
+import io
+import os
+import shutil
+import struct
+import tempfile
+import unittest
+
+import cbfs_util
+from cbfs_util import CbfsWriter
+import elf
+import test_util
+import tools
+
+U_BOOT_DATA           = b'1234'
+U_BOOT_DTB_DATA       = b'udtb'
+COMPRESS_DATA         = b'compress xxxxxxxxxxxxxxxxxxxxxx data'
+
+
+class TestCbfs(unittest.TestCase):
+    """Test of cbfs_util classes"""
+    #pylint: disable=W0212
+    @classmethod
+    def setUpClass(cls):
+        # Create a temporary directory for test files
+        cls._indir = tempfile.mkdtemp(prefix='cbfs_util.')
+        tools.SetInputDirs([cls._indir])
+
+        # Set up some useful data files
+        TestCbfs._make_input_file('u-boot.bin', U_BOOT_DATA)
+        TestCbfs._make_input_file('u-boot.dtb', U_BOOT_DTB_DATA)
+        TestCbfs._make_input_file('compress', COMPRESS_DATA)
+
+        # Set up a temporary output directory, used by the tools library when
+        # compressing files
+        tools.PrepareOutputDir(None)
+
+        cls.have_cbfstool = True
+        try:
+            tools.Run('which', 'cbfstool')
+        except:
+            cls.have_cbfstool = False
+
+        cls.have_lz4 = True
+        try:
+            tools.Run('lz4', '--no-frame-crc', '-c',
+                      tools.GetInputFilename('u-boot.bin'))
+        except:
+            cls.have_lz4 = False
+
+    @classmethod
+    def tearDownClass(cls):
+        """Remove the temporary input directory and its contents"""
+        if cls._indir:
+            shutil.rmtree(cls._indir)
+        cls._indir = None
+        tools.FinaliseOutputDir()
+
+    @classmethod
+    def _make_input_file(cls, fname, contents):
+        """Create a new test input file, creating directories as needed
+
+        Args:
+            fname: Filename to create
+            contents: File contents to write in to the file
+        Returns:
+            Full pathname of file created
+        """
+        pathname = os.path.join(cls._indir, fname)
+        tools.WriteFile(pathname, contents)
+        return pathname
+
+    def _check_hdr(self, data, size, offset=0, arch=cbfs_util.ARCHITECTURE_X86):
+        """Check that the CBFS has the expected header
+
+        Args:
+            data: Data to check
+            size: Expected ROM size
+            offset: Expected offset to first CBFS file
+            arch: Expected architecture
+
+        Returns:
+            CbfsReader object containing the CBFS
+        """
+        cbfs = cbfs_util.CbfsReader(data)
+        self.assertEqual(cbfs_util.HEADER_MAGIC, cbfs.magic)
+        self.assertEqual(cbfs_util.HEADER_VERSION2, cbfs.version)
+        self.assertEqual(size, cbfs.rom_size)
+        self.assertEqual(0, cbfs.boot_block_size)
+        self.assertEqual(cbfs_util.ENTRY_ALIGN, cbfs.align)
+        self.assertEqual(offset, cbfs.cbfs_offset)
+        self.assertEqual(arch, cbfs.arch)
+        return cbfs
+
+    def _check_uboot(self, cbfs, ftype=cbfs_util.TYPE_RAW, offset=0x38,
+                     data=U_BOOT_DATA, cbfs_offset=None):
+        """Check that the U-Boot file is as expected
+
+        Args:
+            cbfs: CbfsReader object to check
+            ftype: Expected file type
+            offset: Expected offset of file
+            data: Expected data in file
+            cbfs_offset: Expected CBFS offset for file's data
+
+        Returns:
+            CbfsFile object containing the file
+        """
+        self.assertIn('u-boot', cbfs.files)
+        cfile = cbfs.files['u-boot']
+        self.assertEqual('u-boot', cfile.name)
+        self.assertEqual(offset, cfile.offset)
+        if cbfs_offset is not None:
+            self.assertEqual(cbfs_offset, cfile.cbfs_offset)
+        self.assertEqual(data, cfile.data)
+        self.assertEqual(ftype, cfile.ftype)
+        self.assertEqual(cbfs_util.COMPRESS_NONE, cfile.compress)
+        self.assertEqual(len(data), cfile.memlen)
+        return cfile
+
+    def _check_dtb(self, cbfs, offset=0x38, data=U_BOOT_DTB_DATA,
+                   cbfs_offset=None):
+        """Check that the U-Boot dtb file is as expected
+
+        Args:
+            cbfs: CbfsReader object to check
+            offset: Expected offset of file
+            data: Expected data in file
+            cbfs_offset: Expected CBFS offset for file's data
+        """
+        self.assertIn('u-boot-dtb', cbfs.files)
+        cfile = cbfs.files['u-boot-dtb']
+        self.assertEqual('u-boot-dtb', cfile.name)
+        self.assertEqual(offset, cfile.offset)
+        if cbfs_offset is not None:
+            self.assertEqual(cbfs_offset, cfile.cbfs_offset)
+        self.assertEqual(U_BOOT_DTB_DATA, cfile.data)
+        self.assertEqual(cbfs_util.TYPE_RAW, cfile.ftype)
+        self.assertEqual(cbfs_util.COMPRESS_NONE, cfile.compress)
+        self.assertEqual(len(U_BOOT_DTB_DATA), cfile.memlen)
+
+    def _check_raw(self, data, size, offset=0, arch=cbfs_util.ARCHITECTURE_X86):
+        """Check that two raw files are added as expected
+
+        Args:
+            data: Data to check
+            size: Expected ROM size
+            offset: Expected offset to first CBFS file
+            arch: Expected architecture
+        """
+        cbfs = self._check_hdr(data, size, offset=offset, arch=arch)
+        self._check_uboot(cbfs)
+        self._check_dtb(cbfs)
+
+    def _get_expected_cbfs(self, size, arch='x86', compress=None, base=None):
+        """Get the file created by cbfstool for a particular scenario
+
+        Args:
+            size: Size of the CBFS in bytes
+            arch: Architecture of the CBFS, as a string
+            compress: Compression to use, e.g. cbfs_util.COMPRESS_LZMA
+            base: Base address of file, or None to put it anywhere
+
+        Returns:
+            Resulting CBFS file, or None if cbfstool is not available
+        """
+        if not self.have_cbfstool or not self.have_lz4:
+            return None
+        cbfs_fname = os.path.join(self._indir, 'test.cbfs')
+        cbfs_util.cbfstool(cbfs_fname, 'create', '-m', arch, '-s', '%#x' % size)
+        if base:
+            base = [(1 << 32) - size + b for b in base]
+        cbfs_util.cbfstool(cbfs_fname, 'add', '-n', 'u-boot', '-t', 'raw',
+                           '-c', compress and compress[0] or 'none',
+                           '-f', tools.GetInputFilename(
+                               compress and 'compress' or 'u-boot.bin'),
+                           base=base[0] if base else None)
+        cbfs_util.cbfstool(cbfs_fname, 'add', '-n', 'u-boot-dtb', '-t', 'raw',
+                           '-c', compress and compress[1] or 'none',
+                           '-f', tools.GetInputFilename(
+                               compress and 'compress' or 'u-boot.dtb'),
+                           base=base[1] if base else None)
+        return cbfs_fname
+
+    def _compare_expected_cbfs(self, data, cbfstool_fname):
+        """Compare against what cbfstool creates
+
+        This compares what binman creates with what cbfstool creates for what
+        is proportedly the same thing.
+
+        Args:
+            data: CBFS created by binman
+            cbfstool_fname: CBFS created by cbfstool
+        """
+        if not self.have_cbfstool or not self.have_lz4:
+            return
+        expect = tools.ReadFile(cbfstool_fname)
+        if expect != data:
+            tools.WriteFile('/tmp/expect', expect)
+            tools.WriteFile('/tmp/actual', data)
+            print('diff -y <(xxd -g1 /tmp/expect) <(xxd -g1 /tmp/actual) | colordiff')
+            self.fail('cbfstool produced a different result')
+
+    def test_cbfs_functions(self):
+        """Test global functions of cbfs_util"""
+        self.assertEqual(cbfs_util.ARCHITECTURE_X86, cbfs_util.find_arch('x86'))
+        self.assertIsNone(cbfs_util.find_arch('bad-arch'))
+
+        self.assertEqual(cbfs_util.COMPRESS_LZMA, cbfs_util.find_compress('lzma'))
+        self.assertIsNone(cbfs_util.find_compress('bad-comp'))
+
+    def test_cbfstool_failure(self):
+        """Test failure to run cbfstool"""
+        if not self.have_cbfstool:
+            self.skipTest('No cbfstool available')
+        try:
+            # In verbose mode this test fails since stderr is not captured. Fix
+            # this by turning off verbosity.
+            old_verbose = cbfs_util.VERBOSE
+            cbfs_util.VERBOSE = False
+            with test_util.capture_sys_output() as (_stdout, stderr):
+                with self.assertRaises(Exception) as e:
+                    cbfs_util.cbfstool('missing-file', 'bad-command')
+        finally:
+            cbfs_util.VERBOSE = old_verbose
+        self.assertIn('Unknown command', stderr.getvalue())
+        self.assertIn('Failed to run', str(e.exception))
+
+    def test_cbfs_raw(self):
+        """Test base handling of a Coreboot Filesystem (CBFS)"""
+        size = 0xb0
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
+        data = cbw.get_data()
+        self._check_raw(data, size)
+        cbfs_fname = self._get_expected_cbfs(size=size)
+        self._compare_expected_cbfs(data, cbfs_fname)
+
+    def test_cbfs_invalid_file_type(self):
+        """Check handling of an invalid file type when outputiing a CBFS"""
+        size = 0xb0
+        cbw = CbfsWriter(size)
+        cfile = cbw.add_file_raw('u-boot', U_BOOT_DATA)
+
+        # Change the type manually before generating the CBFS, and make sure
+        # that the generator complains
+        cfile.ftype = 0xff
+        with self.assertRaises(ValueError) as e:
+            cbw.get_data()
+        self.assertIn('Unknown type 0xff when writing', str(e.exception))
+
+    def test_cbfs_invalid_file_type_on_read(self):
+        """Check handling of an invalid file type when reading the CBFS"""
+        size = 0xb0
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+
+        data = cbw.get_data()
+
+        # Read in the first file header
+        cbr = cbfs_util.CbfsReader(data, read=False)
+        with io.BytesIO(data) as fd:
+            self.assertTrue(cbr._find_and_read_header(fd, len(data)))
+            pos = fd.tell()
+            hdr_data = fd.read(cbfs_util.FILE_HEADER_LEN)
+            magic, size, ftype, attr, offset = struct.unpack(
+                cbfs_util.FILE_HEADER_FORMAT, hdr_data)
+
+        # Create a new CBFS with a change to the file type
+        ftype = 0xff
+        newdata = data[:pos]
+        newdata += struct.pack(cbfs_util.FILE_HEADER_FORMAT, magic, size, ftype,
+                               attr, offset)
+        newdata += data[pos + cbfs_util.FILE_HEADER_LEN:]
+
+        # Read in this CBFS and make sure that the reader complains
+        with self.assertRaises(ValueError) as e:
+            cbfs_util.CbfsReader(newdata)
+        self.assertIn('Unknown type 0xff when reading', str(e.exception))
+
+    def test_cbfs_no_space(self):
+        """Check handling of running out of space in the CBFS"""
+        size = 0x60
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        with self.assertRaises(ValueError) as e:
+            cbw.get_data()
+        self.assertIn('No space for header', str(e.exception))
+
+    def test_cbfs_no_space_skip(self):
+        """Check handling of running out of space in CBFS with file header"""
+        size = 0x5c
+        cbw = CbfsWriter(size, arch=cbfs_util.ARCHITECTURE_PPC64)
+        cbw._add_fileheader = True
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        with self.assertRaises(ValueError) as e:
+            cbw.get_data()
+        self.assertIn('No space for data before offset', str(e.exception))
+
+    def test_cbfs_no_space_pad(self):
+        """Check handling of running out of space in CBFS with file header"""
+        size = 0x70
+        cbw = CbfsWriter(size)
+        cbw._add_fileheader = True
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        with self.assertRaises(ValueError) as e:
+            cbw.get_data()
+        self.assertIn('No space for data before pad offset', str(e.exception))
+
+    def test_cbfs_bad_header_ptr(self):
+        """Check handling of a bad master-header pointer"""
+        size = 0x70
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        data = cbw.get_data()
+
+        # Add one to the pointer to make it invalid
+        newdata = data[:-4] + struct.pack('<I', cbw._header_offset + 1)
+
+        # We should still be able to find the master header by searching
+        with test_util.capture_sys_output() as (stdout, _stderr):
+            cbfs = cbfs_util.CbfsReader(newdata)
+        self.assertIn('Relative offset seems wrong', stdout.getvalue())
+        self.assertIn('u-boot', cbfs.files)
+        self.assertEqual(size, cbfs.rom_size)
+
+    def test_cbfs_bad_header(self):
+        """Check handling of a bad master header"""
+        size = 0x70
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        data = cbw.get_data()
+
+        # Drop most of the header and try reading the modified CBFS
+        newdata = data[:cbw._header_offset + 4]
+
+        with test_util.capture_sys_output() as (stdout, _stderr):
+            with self.assertRaises(ValueError) as e:
+                cbfs_util.CbfsReader(newdata)
+        self.assertIn('Relative offset seems wrong', stdout.getvalue())
+        self.assertIn('Cannot find master header', str(e.exception))
+
+    def test_cbfs_bad_file_header(self):
+        """Check handling of a bad file header"""
+        size = 0x70
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        data = cbw.get_data()
+
+        # Read in the CBFS master header (only), then stop
+        cbr = cbfs_util.CbfsReader(data, read=False)
+        with io.BytesIO(data) as fd:
+            self.assertTrue(cbr._find_and_read_header(fd, len(data)))
+            pos = fd.tell()
+
+        # Remove all but 4 bytes of the file headerm and try to read the file
+        newdata = data[:pos + 4]
+        with test_util.capture_sys_output() as (stdout, _stderr):
+            with io.BytesIO(newdata) as fd:
+                fd.seek(pos)
+                self.assertEqual(False, cbr._read_next_file(fd))
+        self.assertIn('File header at 0 ran out of data', stdout.getvalue())
+
+    def test_cbfs_bad_file_string(self):
+        """Check handling of an incomplete filename string"""
+        size = 0x70
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('16-characters xx', U_BOOT_DATA)
+        data = cbw.get_data()
+
+        # Read in the CBFS master header (only), then stop
+        cbr = cbfs_util.CbfsReader(data, read=False)
+        with io.BytesIO(data) as fd:
+            self.assertTrue(cbr._find_and_read_header(fd, len(data)))
+            pos = fd.tell()
+
+        # Create a new CBFS with only the first 16 bytes of the file name, then
+        # try to read the file
+        newdata = data[:pos + cbfs_util.FILE_HEADER_LEN + 16]
+        with test_util.capture_sys_output() as (stdout, _stderr):
+            with io.BytesIO(newdata) as fd:
+                fd.seek(pos)
+                self.assertEqual(False, cbr._read_next_file(fd))
+        self.assertIn('String at %x ran out of data' %
+                      cbfs_util.FILE_HEADER_LEN, stdout.getvalue())
+
+    def test_cbfs_debug(self):
+        """Check debug output"""
+        size = 0x70
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        data = cbw.get_data()
+
+        try:
+            cbfs_util.DEBUG = True
+            with test_util.capture_sys_output() as (stdout, _stderr):
+                cbfs_util.CbfsReader(data)
+            self.assertEqual('name u-boot\ndata %s\n' % U_BOOT_DATA,
+                             stdout.getvalue())
+        finally:
+            cbfs_util.DEBUG = False
+
+    def test_cbfs_bad_attribute(self):
+        """Check handling of bad attribute tag"""
+        if not self.have_lz4:
+            self.skipTest('lz4 --no-frame-crc not available')
+        size = 0x140
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
+                         compress=cbfs_util.COMPRESS_LZ4)
+        data = cbw.get_data()
+
+        # Search the CBFS for the expected compression tag
+        with io.BytesIO(data) as fd:
+            while True:
+                pos = fd.tell()
+                tag, = struct.unpack('>I', fd.read(4))
+                if tag == cbfs_util.FILE_ATTR_TAG_COMPRESSION:
+                    break
+
+        # Create a new CBFS with the tag changed to something invalid
+        newdata = data[:pos] + struct.pack('>I', 0x123) + data[pos + 4:]
+        with test_util.capture_sys_output() as (stdout, _stderr):
+            cbfs_util.CbfsReader(newdata)
+        self.assertEqual('Unknown attribute tag 123\n', stdout.getvalue())
+
+    def test_cbfs_missing_attribute(self):
+        """Check handling of an incomplete attribute tag"""
+        if not self.have_lz4:
+            self.skipTest('lz4 --no-frame-crc not available')
+        size = 0x140
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
+                         compress=cbfs_util.COMPRESS_LZ4)
+        data = cbw.get_data()
+
+        # Read in the CBFS master header (only), then stop
+        cbr = cbfs_util.CbfsReader(data, read=False)
+        with io.BytesIO(data) as fd:
+            self.assertTrue(cbr._find_and_read_header(fd, len(data)))
+            pos = fd.tell()
+
+        # Create a new CBFS with only the first 4 bytes of the compression tag,
+        # then try to read the file
+        tag_pos = pos + cbfs_util.FILE_HEADER_LEN + cbfs_util.FILENAME_ALIGN
+        newdata = data[:tag_pos + 4]
+        with test_util.capture_sys_output() as (stdout, _stderr):
+            with io.BytesIO(newdata) as fd:
+                fd.seek(pos)
+                self.assertEqual(False, cbr._read_next_file(fd))
+        self.assertIn('Attribute tag at %x ran out of data' % tag_pos,
+                      stdout.getvalue())
+
+    def test_cbfs_file_master_header(self):
+        """Check handling of a file containing a master header"""
+        size = 0x100
+        cbw = CbfsWriter(size)
+        cbw._add_fileheader = True
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        data = cbw.get_data()
+
+        cbr = cbfs_util.CbfsReader(data)
+        self.assertIn('u-boot', cbr.files)
+        self.assertEqual(size, cbr.rom_size)
+
+    def test_cbfs_arch(self):
+        """Test on non-x86 architecture"""
+        size = 0x100
+        cbw = CbfsWriter(size, arch=cbfs_util.ARCHITECTURE_PPC64)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
+        data = cbw.get_data()
+        self._check_raw(data, size, offset=0x40,
+                        arch=cbfs_util.ARCHITECTURE_PPC64)
+
+        # Compare against what cbfstool creates
+        cbfs_fname = self._get_expected_cbfs(size=size, arch='ppc64')
+        self._compare_expected_cbfs(data, cbfs_fname)
+
+    def test_cbfs_stage(self):
+        """Tests handling of a Coreboot Filesystem (CBFS)"""
+        if not elf.ELF_TOOLS:
+            self.skipTest('Python elftools not available')
+        elf_fname = os.path.join(self._indir, 'cbfs-stage.elf')
+        elf.MakeElf(elf_fname, U_BOOT_DATA, U_BOOT_DTB_DATA)
+
+        size = 0xb0
+        cbw = CbfsWriter(size)
+        cbw.add_file_stage('u-boot', tools.ReadFile(elf_fname))
+
+        data = cbw.get_data()
+        cbfs = self._check_hdr(data, size)
+        load = 0xfef20000
+        entry = load + 2
+
+        cfile = self._check_uboot(cbfs, cbfs_util.TYPE_STAGE, offset=0x28,
+                                  data=U_BOOT_DATA + U_BOOT_DTB_DATA)
+
+        self.assertEqual(entry, cfile.entry)
+        self.assertEqual(load, cfile.load)
+        self.assertEqual(len(U_BOOT_DATA) + len(U_BOOT_DTB_DATA),
+                         cfile.data_len)
+
+        # Compare against what cbfstool creates
+        if self.have_cbfstool:
+            cbfs_fname = os.path.join(self._indir, 'test.cbfs')
+            cbfs_util.cbfstool(cbfs_fname, 'create', '-m', 'x86', '-s',
+                               '%#x' % size)
+            cbfs_util.cbfstool(cbfs_fname, 'add-stage', '-n', 'u-boot',
+                               '-f', elf_fname)
+            self._compare_expected_cbfs(data, cbfs_fname)
+
+    def test_cbfs_raw_compress(self):
+        """Test base handling of compressing raw files"""
+        if not self.have_lz4:
+            self.skipTest('lz4 --no-frame-crc not available')
+        size = 0x140
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', COMPRESS_DATA, None,
+                         compress=cbfs_util.COMPRESS_LZ4)
+        cbw.add_file_raw('u-boot-dtb', COMPRESS_DATA, None,
+                         compress=cbfs_util.COMPRESS_LZMA)
+        data = cbw.get_data()
+
+        cbfs = self._check_hdr(data, size)
+        self.assertIn('u-boot', cbfs.files)
+        cfile = cbfs.files['u-boot']
+        self.assertEqual(cfile.name, 'u-boot')
+        self.assertEqual(cfile.offset, 56)
+        self.assertEqual(cfile.data, COMPRESS_DATA)
+        self.assertEqual(cfile.ftype, cbfs_util.TYPE_RAW)
+        self.assertEqual(cfile.compress, cbfs_util.COMPRESS_LZ4)
+        self.assertEqual(cfile.memlen, len(COMPRESS_DATA))
+
+        self.assertIn('u-boot-dtb', cbfs.files)
+        cfile = cbfs.files['u-boot-dtb']
+        self.assertEqual(cfile.name, 'u-boot-dtb')
+        self.assertEqual(cfile.offset, 56)
+        self.assertEqual(cfile.data, COMPRESS_DATA)
+        self.assertEqual(cfile.ftype, cbfs_util.TYPE_RAW)
+        self.assertEqual(cfile.compress, cbfs_util.COMPRESS_LZMA)
+        self.assertEqual(cfile.memlen, len(COMPRESS_DATA))
+
+        cbfs_fname = self._get_expected_cbfs(size=size, compress=['lz4', 'lzma'])
+        self._compare_expected_cbfs(data, cbfs_fname)
+
+    def test_cbfs_raw_space(self):
+        """Test files with unused space in the CBFS"""
+        size = 0xf0
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
+        data = cbw.get_data()
+        self._check_raw(data, size)
+        cbfs_fname = self._get_expected_cbfs(size=size)
+        self._compare_expected_cbfs(data, cbfs_fname)
+
+    def test_cbfs_offset(self):
+        """Test a CBFS with files at particular offsets"""
+        size = 0x200
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA, 0x40)
+        cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA, 0x140)
+
+        data = cbw.get_data()
+        cbfs = self._check_hdr(data, size)
+        self._check_uboot(cbfs, ftype=cbfs_util.TYPE_RAW, offset=0x40,
+                          cbfs_offset=0x40)
+        self._check_dtb(cbfs, offset=0x40, cbfs_offset=0x140)
+
+        cbfs_fname = self._get_expected_cbfs(size=size, base=(0x40, 0x140))
+        self._compare_expected_cbfs(data, cbfs_fname)
+
+    def test_cbfs_invalid_file_type_header(self):
+        """Check handling of an invalid file type when outputting a header"""
+        size = 0xb0
+        cbw = CbfsWriter(size)
+        cfile = cbw.add_file_raw('u-boot', U_BOOT_DATA, 0)
+
+        # Change the type manually before generating the CBFS, and make sure
+        # that the generator complains
+        cfile.ftype = 0xff
+        with self.assertRaises(ValueError) as e:
+            cbw.get_data()
+        self.assertIn('Unknown file type 0xff', str(e.exception))
+
+    def test_cbfs_offset_conflict(self):
+        """Test a CBFS with files that want to overlap"""
+        size = 0x200
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA, 0x40)
+        cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA, 0x80)
+
+        with self.assertRaises(ValueError) as e:
+            cbw.get_data()
+        self.assertIn('No space for data before pad offset', str(e.exception))
+
+    def test_cbfs_check_offset(self):
+        """Test that we can discover the offset of a file after writing it"""
+        size = 0xb0
+        cbw = CbfsWriter(size)
+        cbw.add_file_raw('u-boot', U_BOOT_DATA)
+        cbw.add_file_raw('u-boot-dtb', U_BOOT_DTB_DATA)
+        data = cbw.get_data()
+
+        cbfs = cbfs_util.CbfsReader(data)
+        self.assertEqual(0x38, cbfs.files['u-boot'].cbfs_offset)
+        self.assertEqual(0x78, cbfs.files['u-boot-dtb'].cbfs_offset)
+
+
+if __name__ == '__main__':
+    unittest.main()

+ 66 - 33
tools/binman/cmdline.py

@@ -5,7 +5,7 @@
 # Command-line parser for binman
 #
 
-from optparse import OptionParser
+from argparse import ArgumentParser
 
 def ParseArgs(argv):
     """Parse the binman command-line arguments
@@ -17,50 +17,83 @@ def ParseArgs(argv):
             options provides access to the options (e.g. option.debug)
             args is a list of string arguments
     """
-    parser = OptionParser()
-    parser.add_option('-a', '--entry-arg', type='string', action='append',
+    if '-H' in argv:
+        argv.append('build')
+
+    epilog = '''Binman creates and manipulate images for a board from a set of binaries. Binman is
+controlled by a description in the board device tree.'''
+
+    parser = ArgumentParser(epilog=epilog)
+    parser.add_argument('-B', '--build-dir', type=str, default='b',
+        help='Directory containing the build output')
+    parser.add_argument('-D', '--debug', action='store_true',
+        help='Enabling debugging (provides a full traceback on error)')
+    parser.add_argument('-H', '--full-help', action='store_true',
+        default=False, help='Display the README file')
+    parser.add_argument('--toolpath', type=str, action='append',
+        help='Add a path to the directories containing tools')
+    parser.add_argument('-v', '--verbosity', default=1,
+        type=int, help='Control verbosity: 0=silent, 1=warnings, 2=notices, '
+        '3=info, 4=detail, 5=debug')
+
+    subparsers = parser.add_subparsers(dest='cmd')
+
+    build_parser = subparsers.add_parser('build', help='Build firmware image')
+    build_parser.add_argument('-a', '--entry-arg', type=str, action='append',
             help='Set argument value arg=value')
-    parser.add_option('-b', '--board', type='string',
+    build_parser.add_argument('-b', '--board', type=str,
             help='Board name to build')
-    parser.add_option('-B', '--build-dir', type='string', default='b',
-            help='Directory containing the build output')
-    parser.add_option('-d', '--dt', type='string',
+    build_parser.add_argument('-d', '--dt', type=str,
             help='Configuration file (.dtb) to use')
-    parser.add_option('-D', '--debug', action='store_true',
-            help='Enabling debugging (provides a full traceback on error)')
-    parser.add_option('-E', '--entry-docs', action='store_true',
-            help='Write out entry documentation (see README.entries)')
-    parser.add_option('--fake-dtb', action='store_true',
+    build_parser.add_argument('--fake-dtb', action='store_true',
             help='Use fake device tree contents (for testing only)')
-    parser.add_option('-i', '--image', type='string', action='append',
+    build_parser.add_argument('-i', '--image', type=str, action='append',
             help='Image filename to build (if not specified, build all)')
-    parser.add_option('-I', '--indir', action='append',
-            help='Add a path to a directory to use for input files')
-    parser.add_option('-H', '--full-help', action='store_true',
-        default=False, help='Display the README file')
-    parser.add_option('-m', '--map', action='store_true',
+    build_parser.add_argument('-I', '--indir', action='append',
+            help='Add a path to the list of directories to use for input files')
+    build_parser.add_argument('-m', '--map', action='store_true',
         default=False, help='Output a map file for each image')
-    parser.add_option('-O', '--outdir', type='string',
+    build_parser.add_argument('-O', '--outdir', type=str,
         action='store', help='Path to directory to use for intermediate and '
         'output files')
-    parser.add_option('-p', '--preserve', action='store_true',\
+    build_parser.add_argument('-p', '--preserve', action='store_true',\
         help='Preserve temporary output directory even if option -O is not '
              'given')
-    parser.add_option('-P', '--processes', type=int,
-                      help='set number of processes to use for running tests')
-    parser.add_option('-t', '--test', action='store_true',
-                    default=False, help='run tests')
-    parser.add_option('-T', '--test-coverage', action='store_true',
-                    default=False, help='run tests and check for 100% coverage')
-    parser.add_option('-u', '--update-fdt', action='store_true',
+    build_parser.add_argument('-u', '--update-fdt', action='store_true',
         default=False, help='Update the binman node with offset/size info')
-    parser.add_option('-v', '--verbosity', default=1,
-        type='int', help='Control verbosity: 0=silent, 1=progress, 3=full, '
-        '4=debug')
 
-    parser.usage += """
+    entry_parser = subparsers.add_parser('entry-docs',
+        help='Write out entry documentation (see README.entries)')
+
+    list_parser = subparsers.add_parser('ls', help='List files in an image')
+    list_parser.add_argument('-i', '--image', type=str, required=True,
+                             help='Image filename to list')
+    list_parser.add_argument('paths', type=str, nargs='*',
+                             help='Paths within file to list (wildcard)')
+
+    extract_parser = subparsers.add_parser('extract',
+                                           help='Extract files from an image')
+    extract_parser.add_argument('-i', '--image', type=str, required=True,
+                                help='Image filename to extract')
+    extract_parser.add_argument('-f', '--filename', type=str,
+                                help='Output filename to write to')
+    extract_parser.add_argument('-O', '--outdir', type=str, default='',
+        help='Path to directory to use for output files')
+    extract_parser.add_argument('paths', type=str, nargs='*',
+                                help='Paths within file to extract (wildcard)')
+    extract_parser.add_argument('-U', '--uncompressed', action='store_true',
+        help='Output raw uncompressed data for compressed entries')
 
-Create images for a board from a set of binaries. It is controlled by a
-description in the board device tree."""
+    test_parser = subparsers.add_parser('test', help='Run tests')
+    test_parser.add_argument('-P', '--processes', type=int,
+        help='set number of processes to use for running tests')
+    test_parser.add_argument('-T', '--test-coverage', action='store_true',
+        default=False, help='run tests and check for 100%% coverage')
+    test_parser.add_argument('-X', '--test-preserve-dirs', action='store_true',
+        help='Preserve and display test-created input directories; also '
+             'preserve the output directory if a single test is run (pass test '
+             'name at the end of the command line')
+    test_parser.add_argument('tests', nargs='*',
+                             help='Test names to run (omit for all)')
 
     return parser.parse_args(argv)

+ 175 - 37
tools/binman/control.py

@@ -12,6 +12,7 @@ import os
 import sys
 import tools
 
+import cbfs_util
 import command
 import elf
 from image import Image
@@ -66,19 +67,120 @@ def WriteEntryDocs(modules, test_missing=None):
     from entry import Entry
     Entry.WriteDocs(modules, test_missing)
 
-def Binman(options, args):
+
+def ListEntries(image_fname, entry_paths):
+    """List the entries in an image
+
+    This decodes the supplied image and displays a table of entries from that
+    image, preceded by a header.
+
+    Args:
+        image_fname: Image filename to process
+        entry_paths: List of wildcarded paths (e.g. ['*dtb*', 'u-boot*',
+                                                     'section/u-boot'])
+    """
+    image = Image.FromFile(image_fname)
+
+    entries, lines, widths = image.GetListEntries(entry_paths)
+
+    num_columns = len(widths)
+    for linenum, line in enumerate(lines):
+        if linenum == 1:
+            # Print header line
+            print('-' * (sum(widths) + num_columns * 2))
+        out = ''
+        for i, item in enumerate(line):
+            width = -widths[i]
+            if item.startswith('>'):
+                width = -width
+                item = item[1:]
+            txt = '%*s  ' % (width, item)
+            out += txt
+        print(out.rstrip())
+
+
+def ReadEntry(image_fname, entry_path, decomp=True):
+    """Extract an entry from an image
+
+    This extracts the data from a particular entry in an image
+
+    Args:
+        image_fname: Image filename to process
+        entry_path: Path to entry to extract
+        decomp: True to return uncompressed data, if the data is compress
+            False to return the raw data
+
+    Returns:
+        data extracted from the entry
+    """
+    image = Image.FromFile(image_fname)
+    entry = image.FindEntryPath(entry_path)
+    return entry.ReadData(decomp)
+
+
+def ExtractEntries(image_fname, output_fname, outdir, entry_paths,
+                   decomp=True):
+    """Extract the data from one or more entries and write it to files
+
+    Args:
+        image_fname: Image filename to process
+        output_fname: Single output filename to use if extracting one file, None
+            otherwise
+        outdir: Output directory to use (for any number of files), else None
+        entry_paths: List of entry paths to extract
+        decomp: True to compress the entry data
+
+    Returns:
+        List of EntryInfo records that were written
+    """
+    image = Image.FromFile(image_fname)
+
+    # Output an entry to a single file, as a special case
+    if output_fname:
+        if not entry_paths:
+            raise ValueError('Must specify an entry path to write with -o')
+        if len(entry_paths) != 1:
+            raise ValueError('Must specify exactly one entry path to write with -o')
+        entry = image.FindEntryPath(entry_paths[0])
+        data = entry.ReadData(decomp)
+        tools.WriteFile(output_fname, data)
+        tout.Notice("Wrote %#x bytes to file '%s'" % (len(data), output_fname))
+        return
+
+    # Otherwise we will output to a path given by the entry path of each entry.
+    # This means that entries will appear in subdirectories if they are part of
+    # a sub-section.
+    einfos = image.GetListEntries(entry_paths)[0]
+    tout.Notice('%d entries match and will be written' % len(einfos))
+    for einfo in einfos:
+        entry = einfo.entry
+        data = entry.ReadData(decomp)
+        path = entry.GetPath()[1:]
+        fname = os.path.join(outdir, path)
+
+        # If this entry has children, create a directory for it and put its
+        # data in a file called 'root' in that directory
+        if entry.GetEntries():
+            if not os.path.exists(fname):
+                os.makedirs(fname)
+            fname = os.path.join(fname, 'root')
+        tout.Notice("Write entry '%s' to '%s'" % (entry.GetPath(), fname))
+        tools.WriteFile(fname, data)
+    return einfos
+
+
+def Binman(args):
     """The main control code for binman
 
     This assumes that help and test options have already been dealt with. It
     deals with the core task of building images.
 
     Args:
-        options: Command line options object
-        args: Command line arguments (list of strings)
+        args: Command line arguments Namespace object
     """
     global images
 
-    if options.full_help:
+    if args.full_help:
         pager = os.getenv('PAGER')
         if not pager:
             pager = 'more'
@@ -87,18 +189,31 @@ def Binman(options, args):
         command.Run(pager, fname)
         return 0
 
+    if args.cmd == 'ls':
+        ListEntries(args.image, args.paths)
+        return 0
+
+    if args.cmd == 'extract':
+        try:
+            tools.PrepareOutputDir(None)
+            ExtractEntries(args.image, args.filename, args.outdir, args.paths,
+                           not args.uncompressed)
+        finally:
+            tools.FinaliseOutputDir()
+        return 0
+
     # Try to figure out which device tree contains our image description
-    if options.dt:
-        dtb_fname = options.dt
+    if args.dt:
+        dtb_fname = args.dt
     else:
-        board = options.board
+        board = args.board
         if not board:
             raise ValueError('Must provide a board to process (use -b <board>)')
-        board_pathname = os.path.join(options.build_dir, board)
+        board_pathname = os.path.join(args.build_dir, board)
         dtb_fname = os.path.join(board_pathname, 'u-boot.dtb')
-        if not options.indir:
-            options.indir = ['.']
-        options.indir.append(board_pathname)
+        if not args.indir:
+            args.indir = ['.']
+        args.indir.append(board_pathname)
 
     try:
         # Import these here in case libfdt.py is not available, in which case
@@ -106,13 +221,15 @@ def Binman(options, args):
         import fdt
         import fdt_util
 
-        tout.Init(options.verbosity)
-        elf.debug = options.debug
-        state.use_fake_dtb = options.fake_dtb
+        tout.Init(args.verbosity)
+        elf.debug = args.debug
+        cbfs_util.VERBOSE = args.verbosity > 2
+        state.use_fake_dtb = args.fake_dtb
         try:
-            tools.SetInputDirs(options.indir)
-            tools.PrepareOutputDir(options.outdir, options.preserve)
-            state.SetEntryArgs(options.entry_arg)
+            tools.SetInputDirs(args.indir)
+            tools.PrepareOutputDir(args.outdir, args.preserve)
+            tools.SetToolPaths(args.toolpath)
+            state.SetEntryArgs(args.entry_arg)
 
             # Get the device tree ready by compiling it and copying the compiled
             # output into a file in our output directly. Then scan it for use
@@ -129,16 +246,16 @@ def Binman(options, args):
 
             images = _ReadImageDesc(node)
 
-            if options.image:
+            if args.image:
                 skip = []
                 new_images = OrderedDict()
                 for name, image in images.items():
-                    if name in options.image:
+                    if name in args.image:
                         new_images[name] = image
                     else:
                         skip.append(name)
                 images = new_images
-                if skip and options.verbosity >= 2:
+                if skip and args.verbosity >= 2:
                     print('Skipping images: %s' % ', '.join(skip))
 
             state.Prepare(images, dtb)
@@ -152,7 +269,7 @@ def Binman(options, args):
             # entry offsets remain the same.
             for image in images.values():
                 image.ExpandEntries()
-                if options.update_fdt:
+                if args.update_fdt:
                     image.AddMissingProperties()
                 image.ProcessFdt(dtb)
 
@@ -168,24 +285,45 @@ def Binman(options, args):
                 # completed and written, but that does not seem important.
                 image.GetEntryContents()
                 image.GetEntryOffsets()
-                try:
-                    image.PackEntries()
-                    image.CheckSize()
-                    image.CheckEntries()
-                except Exception as e:
-                    if options.map:
-                        fname = image.WriteMap()
-                        print("Wrote map file '%s' to show errors"  % fname)
-                    raise
-                image.SetImagePos()
-                if options.update_fdt:
-                    image.SetCalculatedProperties()
-                    for dtb_item in state.GetFdts():
-                        dtb_item.Sync()
-                image.ProcessEntryContents()
+
+                # We need to pack the entries to figure out where everything
+                # should be placed. This sets the offset/size of each entry.
+                # However, after packing we call ProcessEntryContents() which
+                # may result in an entry changing size. In that case we need to
+                # do another pass. Since the device tree often contains the
+                # final offset/size information we try to make space for this in
+                # AddMissingProperties() above. However, if the device is
+                # compressed we cannot know this compressed size in advance,
+                # since changing an offset from 0x100 to 0x104 (for example) can
+                # alter the compressed size of the device tree. So we need a
+                # third pass for this.
+                passes = 3
+                for pack_pass in range(passes):
+                    try:
+                        image.PackEntries()
+                        image.CheckSize()
+                        image.CheckEntries()
+                    except Exception as e:
+                        if args.map:
+                            fname = image.WriteMap()
+                            print("Wrote map file '%s' to show errors"  % fname)
+                        raise
+                    image.SetImagePos()
+                    if args.update_fdt:
+                        image.SetCalculatedProperties()
+                        for dtb_item in state.GetFdts():
+                            dtb_item.Sync()
+                    sizes_ok = image.ProcessEntryContents()
+                    if sizes_ok:
+                        break
+                    image.ResetForPack()
+                if not sizes_ok:
+                    image.Raise('Entries expanded after packing (tried %s passes)' %
+                                passes)
+
                 image.WriteSymbols()
                 image.BuildImage()
-                if options.map:
+                if args.map:
                     image.WriteMap()
 
             # Write the updated FDTs to our output files

+ 174 - 0
tools/binman/elf.py

@@ -5,19 +5,39 @@
 # Handle various things related to ELF images
 #
 
+from __future__ import print_function
+
 from collections import namedtuple, OrderedDict
 import command
+import io
 import os
 import re
+import shutil
 import struct
+import tempfile
 
 import tools
 
+ELF_TOOLS = True
+try:
+    from elftools.elf.elffile import ELFFile
+    from elftools.elf.sections import SymbolTableSection
+except:  # pragma: no cover
+    ELF_TOOLS = False
+
 # This is enabled from control.py
 debug = False
 
 Symbol = namedtuple('Symbol', ['section', 'address', 'size', 'weak'])
 
+# Information about an ELF file:
+#    data: Extracted program contents of ELF file (this would be loaded by an
+#           ELF loader when reading this file
+#    load: Load address of code
+#    entry: Entry address of code
+#    memsize: Number of bytes in memory occupied by loading this ELF file
+ElfInfo = namedtuple('ElfInfo', ['data', 'load', 'entry', 'memsize'])
+
 
 def GetSymbols(fname, patterns):
     """Get the symbols from an ELF file
@@ -128,3 +148,157 @@ def LookupAndWriteSymbols(elf_fname, entry, section):
                       (msg, name, offset, value, len(value_bytes)))
             entry.data = (entry.data[:offset] + value_bytes +
                         entry.data[offset + sym.size:])
+
+def MakeElf(elf_fname, text, data):
+    """Make an elf file with the given data in a single section
+
+    The output file has a several section including '.text' and '.data',
+    containing the info provided in arguments.
+
+    Args:
+        elf_fname: Output filename
+        text: Text (code) to put in the file's .text section
+        data: Data to put in the file's .data section
+    """
+    outdir = tempfile.mkdtemp(prefix='binman.elf.')
+    s_file = os.path.join(outdir, 'elf.S')
+
+    # Spilt the text into two parts so that we can make the entry point two
+    # bytes after the start of the text section
+    text_bytes1 = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in text[:2]]
+    text_bytes2 = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in text[2:]]
+    data_bytes = ['\t.byte\t%#x' % tools.ToByte(byte) for byte in data]
+    with open(s_file, 'w') as fd:
+        print('''/* Auto-generated C program to produce an ELF file for testing */
+
+.section .text
+.code32
+.globl _start
+.type _start, @function
+%s
+_start:
+%s
+.ident "comment"
+
+.comm fred,8,4
+
+.section .empty
+.globl _empty
+_empty:
+.byte 1
+
+.globl ernie
+.data
+.type ernie, @object
+.size ernie, 4
+ernie:
+%s
+''' % ('\n'.join(text_bytes1), '\n'.join(text_bytes2), '\n'.join(data_bytes)),
+        file=fd)
+    lds_file = os.path.join(outdir, 'elf.lds')
+
+    # Use a linker script to set the alignment and text address.
+    with open(lds_file, 'w') as fd:
+        print('''/* Auto-generated linker script to produce an ELF file for testing */
+
+PHDRS
+{
+    text PT_LOAD ;
+    data PT_LOAD ;
+    empty PT_LOAD FLAGS ( 6 ) ;
+    note PT_NOTE ;
+}
+
+SECTIONS
+{
+    . = 0xfef20000;
+    ENTRY(_start)
+    .text . : SUBALIGN(0)
+    {
+        *(.text)
+    } :text
+    .data : {
+        *(.data)
+    } :data
+    _bss_start = .;
+    .empty : {
+        *(.empty)
+    } :empty
+    .note : {
+        *(.comment)
+    } :note
+    .bss _bss_start  (OVERLAY) : {
+        *(.bss)
+    }
+}
+''', file=fd)
+    # -static: Avoid requiring any shared libraries
+    # -nostdlib: Don't link with C library
+    # -Wl,--build-id=none: Don't generate a build ID, so that we just get the
+    #   text section at the start
+    # -m32: Build for 32-bit x86
+    # -T...: Specifies the link script, which sets the start address
+    stdout = command.Output('cc', '-static', '-nostdlib', '-Wl,--build-id=none',
+                            '-m32','-T', lds_file, '-o', elf_fname, s_file)
+    shutil.rmtree(outdir)
+
+def DecodeElf(data, location):
+    """Decode an ELF file and return information about it
+
+    Args:
+        data: Data from ELF file
+        location: Start address of data to return
+
+    Returns:
+        ElfInfo object containing information about the decoded ELF file
+    """
+    file_size = len(data)
+    with io.BytesIO(data) as fd:
+        elf = ELFFile(fd)
+        data_start = 0xffffffff;
+        data_end = 0;
+        mem_end = 0;
+        virt_to_phys = 0;
+
+        for i in range(elf.num_segments()):
+            segment = elf.get_segment(i)
+            if segment['p_type'] != 'PT_LOAD' or not segment['p_memsz']:
+                skipped = 1  # To make code-coverage see this line
+                continue
+            start = segment['p_paddr']
+            mend = start + segment['p_memsz']
+            rend = start + segment['p_filesz']
+            data_start = min(data_start, start)
+            data_end = max(data_end, rend)
+            mem_end = max(mem_end, mend)
+            if not virt_to_phys:
+                virt_to_phys = segment['p_paddr'] - segment['p_vaddr']
+
+        output = bytearray(data_end - data_start)
+        for i in range(elf.num_segments()):
+            segment = elf.get_segment(i)
+            if segment['p_type'] != 'PT_LOAD' or not segment['p_memsz']:
+                skipped = 1  # To make code-coverage see this line
+                continue
+            start = segment['p_paddr']
+            offset = 0
+            if start < location:
+                offset = location - start
+                start = location
+            # A legal ELF file can have a program header with non-zero length
+            # but zero-length file size and a non-zero offset which, added
+            # together, are greater than input->size (i.e. the total file size).
+            #  So we need to not even test in the case that p_filesz is zero.
+            # Note: All of this code is commented out since we don't have a test
+            # case for it.
+            size = segment['p_filesz']
+            #if not size:
+                #continue
+            #end = segment['p_offset'] + segment['p_filesz']
+            #if end > file_size:
+                #raise ValueError('Underflow copying out the segment. File has %#x bytes left, segment end is %#x\n',
+                                 #file_size, end)
+            output[start - data_start:start - data_start + size] = (
+                segment.data()[offset:])
+    return ElfInfo(output, data_start, elf.header['e_entry'] + virt_to_phys,
+                   mem_end - data_start)

+ 41 - 0
tools/binman/elf_test.py

@@ -5,9 +5,12 @@
 # Test for the elf module
 
 import os
+import shutil
 import sys
+import tempfile
 import unittest
 
+import command
 import elf
 import test_util
 import tools
@@ -136,6 +139,44 @@ class TestElf(unittest.TestCase):
         elf.debug = False
         self.assertTrue(len(stdout.getvalue()) > 0)
 
+    def testMakeElf(self):
+        """Test for the MakeElf function"""
+        outdir = tempfile.mkdtemp(prefix='elf.')
+        expected_text = b'1234'
+        expected_data = b'wxyz'
+        elf_fname = os.path.join(outdir, 'elf')
+        bin_fname = os.path.join(outdir, 'elf')
+
+        # Make an Elf file and then convert it to a fkat binary file. This
+        # should produce the original data.
+        elf.MakeElf(elf_fname, expected_text, expected_data)
+        stdout = command.Output('objcopy', '-O', 'binary', elf_fname, bin_fname)
+        with open(bin_fname, 'rb') as fd:
+            data = fd.read()
+        self.assertEqual(expected_text + expected_data, data)
+        shutil.rmtree(outdir)
+
+    def testDecodeElf(self):
+        """Test for the MakeElf function"""
+        if not elf.ELF_TOOLS:
+            self.skipTest('Python elftools not available')
+        outdir = tempfile.mkdtemp(prefix='elf.')
+        expected_text = b'1234'
+        expected_data = b'wxyz'
+        elf_fname = os.path.join(outdir, 'elf')
+        elf.MakeElf(elf_fname, expected_text, expected_data)
+        data = tools.ReadFile(elf_fname)
+
+        load = 0xfef20000
+        entry = load + 2
+        expected = expected_text + expected_data
+        self.assertEqual(elf.ElfInfo(expected, load, entry, len(expected)),
+                         elf.DecodeElf(data, 0))
+        self.assertEqual(elf.ElfInfo(b'\0\0' + expected[2:],
+                                     load, entry, len(expected)),
+                         elf.DecodeElf(data, load + 2))
+        #shutil.rmtree(outdir)
+
 
 if __name__ == '__main__':
     unittest.main()

+ 169 - 18
tools/binman/entry.py

@@ -23,6 +23,7 @@ import sys
 import fdt_util
 import state
 import tools
+import tout
 
 modules = {}
 
@@ -33,6 +34,10 @@ our_path = os.path.dirname(os.path.realpath(__file__))
 # device-tree properties.
 EntryArg = namedtuple('EntryArg', ['name', 'datatype'])
 
+# Information about an entry for use when displaying summaries
+EntryInfo = namedtuple('EntryInfo', ['indent', 'name', 'etype', 'size',
+                                     'image_pos', 'uncomp_size', 'offset',
+                                     'entry'])
 
 class Entry(object):
     """An Entry in the section
@@ -51,6 +56,8 @@ class Entry(object):
         offset: Offset of entry within the section, None if not known yet (in
             which case it will be calculated by Pack())
         size: Entry size in bytes, None if not known
+        uncomp_size: Size of uncompressed data in bytes, if the entry is
+            compressed, else None
         contents_size: Size of contents in bytes, 0 by default
         align: Entry start offset alignment, or None
         align_size: Entry size alignment, or None
@@ -58,6 +65,9 @@ class Entry(object):
         pad_before: Number of pad bytes before the contents, 0 if none
         pad_after: Number of pad bytes after the contents, 0 if none
         data: Contents of entry (string of bytes)
+        compress: Compression algoithm used (e.g. 'lz4'), 'none' if none
+        orig_offset: Original offset value read from node
+        orig_size: Original size value read from node
     """
     def __init__(self, section, etype, node, read_node=True, name_prefix=''):
         self.section = section
@@ -66,6 +76,7 @@ class Entry(object):
         self.name = node and (name_prefix + node.name) or 'none'
         self.offset = None
         self.size = None
+        self.uncomp_size = None
         self.data = None
         self.contents_size = 0
         self.align = None
@@ -76,15 +87,15 @@ class Entry(object):
         self.offset_unset = False
         self.image_pos = None
         self._expand_size = False
+        self.compress = 'none'
         if read_node:
             self.ReadNode()
 
     @staticmethod
-    def Lookup(section, node_path, etype):
+    def Lookup(node_path, etype):
         """Look up the entry class for a node.
 
         Args:
-            section:   Section object containing this node
             node_node: Path name of Node object containing information about
                        the entry to create (used for errors)
             etype:   Entry type to use
@@ -135,7 +146,7 @@ class Entry(object):
         """
         if not etype:
             etype = fdt_util.GetString(node, 'type', node.name)
-        obj = Entry.Lookup(section, node.path, etype)
+        obj = Entry.Lookup(node.path, etype)
 
         # Call its constructor to get the object we want.
         return obj(section, etype, node)
@@ -149,6 +160,14 @@ class Entry(object):
             self.Raise("Please use 'offset' instead of 'pos'")
         self.offset = fdt_util.GetInt(self._node, 'offset')
         self.size = fdt_util.GetInt(self._node, 'size')
+        self.orig_offset = self.offset
+        self.orig_size = self.size
+
+        # These should not be set in input files, but are set in an FDT map,
+        # which is also read by this code.
+        self.image_pos = fdt_util.GetInt(self._node, 'image-pos')
+        self.uncomp_size = fdt_util.GetInt(self._node, 'uncomp-size')
+
         self.align = fdt_util.GetInt(self._node, 'align')
         if tools.NotPowerOfTwo(self.align):
             raise ValueError("Node '%s': Alignment %s must be a power of two" %
@@ -157,8 +176,8 @@ class Entry(object):
         self.pad_after = fdt_util.GetInt(self._node, 'pad-after', 0)
         self.align_size = fdt_util.GetInt(self._node, 'align-size')
         if tools.NotPowerOfTwo(self.align_size):
-            raise ValueError("Node '%s': Alignment size %s must be a power "
-                             "of two" % (self._node.path, self.align_size))
+            self.Raise("Alignment size %s must be a power of two" %
+                       self.align_size)
         self.align_end = fdt_util.GetInt(self._node, 'align-end')
         self.offset_unset = fdt_util.GetBool(self._node, 'offset-unset')
         self.expand_size = fdt_util.GetBool(self._node, 'expand-size')
@@ -188,6 +207,8 @@ class Entry(object):
         for prop in ['offset', 'size', 'image-pos']:
             if not prop in self._node.props:
                 state.AddZeroProp(self._node, prop)
+        if self.compress != 'none':
+            state.AddZeroProp(self._node, 'uncomp-size')
         err = state.CheckAddHashProp(self._node)
         if err:
             self.Raise(err)
@@ -196,8 +217,10 @@ class Entry(object):
         """Set the value of device-tree properties calculated by binman"""
         state.SetInt(self._node, 'offset', self.offset)
         state.SetInt(self._node, 'size', self.size)
-        state.SetInt(self._node, 'image-pos',
-                       self.image_pos - self.section.GetRootSkipAtStart())
+        base = self.section.GetRootSkipAtStart() if self.section else 0
+        state.SetInt(self._node, 'image-pos', self.image_pos - base)
+        if self.uncomp_size is not None:
+            state.SetInt(self._node, 'uncomp-size', self.uncomp_size)
         state.CheckSetHashValue(self._node, self.GetData)
 
     def ProcessFdt(self, fdt):
@@ -229,26 +252,36 @@ class Entry(object):
         This sets both the data and content_size properties
 
         Args:
-            data: Data to set to the contents (string)
+            data: Data to set to the contents (bytes)
         """
         self.data = data
         self.contents_size = len(self.data)
 
     def ProcessContentsUpdate(self, data):
-        """Update the contens of an entry, after the size is fixed
+        """Update the contents of an entry, after the size is fixed
 
-        This checks that the new data is the same size as the old.
+        This checks that the new data is the same size as the old. If the size
+        has changed, this triggers a re-run of the packing algorithm.
 
         Args:
-            data: Data to set to the contents (string)
+            data: Data to set to the contents (bytes)
 
         Raises:
             ValueError if the new data size is not the same as the old
         """
-        if len(data) != self.contents_size:
+        size_ok = True
+        new_size = len(data)
+        if state.AllowEntryExpansion():
+            if new_size > self.contents_size:
+                tout.Debug("Entry '%s' size change from %#x to %#x" % (
+                    self._node.path, self.contents_size, new_size))
+                # self.data will indicate the new size needed
+                size_ok = False
+        elif new_size != self.contents_size:
             self.Raise('Cannot update entry size from %d to %d' %
-                       (len(data), self.contents_size))
+                       (self.contents_size, new_size))
         self.SetContents(data)
+        return size_ok
 
     def ObtainContents(self):
         """Figure out the contents of an entry.
@@ -260,6 +293,11 @@ class Entry(object):
         # No contents by default: subclasses can implement this
         return True
 
+    def ResetForPack(self):
+        """Reset offset/size fields so that packing can be done again"""
+        self.offset = self.orig_offset
+        self.size = self.orig_size
+
     def Pack(self, offset):
         """Figure out how to pack the entry into the section
 
@@ -355,11 +393,34 @@ class Entry(object):
         return self.data
 
     def GetOffsets(self):
+        """Get the offsets for siblings
+
+        Some entry types can contain information about the position or size of
+        other entries. An example of this is the Intel Flash Descriptor, which
+        knows where the Intel Management Engine section should go.
+
+        If this entry knows about the position of other entries, it can specify
+        this by returning values here
+
+        Returns:
+            Dict:
+                key: Entry type
+                value: List containing position and size of the given entry
+                    type. Either can be None if not known
+        """
         return {}
 
-    def SetOffsetSize(self, pos, size):
-        self.offset = pos
-        self.size = size
+    def SetOffsetSize(self, offset, size):
+        """Set the offset and/or size of an entry
+
+        Args:
+            offset: New offset, or None to leave alone
+            size: New size, or None to leave alone
+        """
+        if offset is not None:
+            self.offset = offset
+        if size is not None:
+            self.size = size
 
     def SetImagePos(self, image_pos):
         """Set the position in the image
@@ -370,7 +431,22 @@ class Entry(object):
         self.image_pos = image_pos + self.offset
 
     def ProcessContents(self):
-        pass
+        """Do any post-packing updates of entry contents
+
+        This function should call ProcessContentsUpdate() to update the entry
+        contents, if necessary, returning its return value here.
+
+        Args:
+            data: Data to set to the contents (bytes)
+
+        Returns:
+            True if the new data size is OK, False if expansion is needed
+
+        Raises:
+            ValueError if the new data size is not the same as the old and
+                state.AllowEntryExpansion() is False
+        """
+        return True
 
     def WriteSymbols(self, section):
         """Write symbol values into binary files for access at run time
@@ -482,7 +558,9 @@ features to produce new behaviours.
             modules.remove('_testing')
         missing = []
         for name in modules:
-            module = Entry.Lookup(name, name, name)
+            if name.startswith('__'):
+                continue
+            module = Entry.Lookup(name, name)
             docs = getattr(module, '__doc__')
             if test_missing == name:
                 docs = None
@@ -529,3 +607,76 @@ features to produce new behaviours.
             # the data grows. This should not fail, but check it to be sure.
             if not self.ObtainContents():
                 self.Raise('Cannot obtain contents when expanding entry')
+
+    def HasSibling(self, name):
+        """Check if there is a sibling of a given name
+
+        Returns:
+            True if there is an entry with this name in the the same section,
+                else False
+        """
+        return name in self.section.GetEntries()
+
+    def GetSiblingImagePos(self, name):
+        """Return the image position of the given sibling
+
+        Returns:
+            Image position of sibling, or None if the sibling has no position,
+                or False if there is no such sibling
+        """
+        if not self.HasSibling(name):
+            return False
+        return self.section.GetEntries()[name].image_pos
+
+    @staticmethod
+    def AddEntryInfo(entries, indent, name, etype, size, image_pos,
+                     uncomp_size, offset, entry):
+        """Add a new entry to the entries list
+
+        Args:
+            entries: List (of EntryInfo objects) to add to
+            indent: Current indent level to add to list
+            name: Entry name (string)
+            etype: Entry type (string)
+            size: Entry size in bytes (int)
+            image_pos: Position within image in bytes (int)
+            uncomp_size: Uncompressed size if the entry uses compression, else
+                None
+            offset: Entry offset within parent in bytes (int)
+            entry: Entry object
+        """
+        entries.append(EntryInfo(indent, name, etype, size, image_pos,
+                                 uncomp_size, offset, entry))
+
+    def ListEntries(self, entries, indent):
+        """Add files in this entry to the list of entries
+
+        This can be overridden by subclasses which need different behaviour.
+
+        Args:
+            entries: List (of EntryInfo objects) to add to
+            indent: Current indent level to add to list
+        """
+        self.AddEntryInfo(entries, indent, self.name, self.etype, self.size,
+                          self.image_pos, self.uncomp_size, self.offset, self)
+
+    def ReadData(self, decomp=True):
+        """Read the data for an entry from the image
+
+        This is used when the image has been read in and we want to extract the
+        data for a particular entry from that image.
+
+        Args:
+            decomp: True to decompress any compressed data before returning it;
+                False to return the raw, uncompressed data
+
+        Returns:
+            Entry data (bytes)
+        """
+        # Use True here so that we get an uncompressed section to work from,
+        # although compressed sections are currently not supported
+        data = self.section.ReadData(True)
+        tout.Info('%s: Reading data from offset %#x-%#x, size %#x (avail %#x)' %
+                  (self.GetPath(), self.offset, self.offset + self.size,
+                   self.size, len(data)))
+        return data[self.offset:self.offset + self.size]

+ 16 - 16
tools/binman/entry_test.py

@@ -9,12 +9,11 @@ import os
 import sys
 import unittest
 
+import entry
 import fdt
 import fdt_util
 import tools
 
-entry = None
-
 class TestEntry(unittest.TestCase):
     def setUp(self):
         tools.PrepareOutputDir(None)
@@ -29,16 +28,7 @@ class TestEntry(unittest.TestCase):
         dtb = fdt.FdtScan(fname)
         return dtb.GetNode('/binman/u-boot')
 
-    def test1EntryNoImportLib(self):
-        """Test that we can import Entry subclassess successfully"""
-
-        sys.modules['importlib'] = None
-        global entry
-        import entry
-        entry.Entry.Create(None, self.GetNode(), 'u-boot')
-
-    def test2EntryImportLib(self):
-        del sys.modules['importlib']
+    def _ReloadEntry(self):
         global entry
         if entry:
             if sys.version_info[0] >= 3:
@@ -48,8 +38,21 @@ class TestEntry(unittest.TestCase):
                 reload(entry)
         else:
             import entry
+
+    def test1EntryNoImportLib(self):
+        """Test that we can import Entry subclassess successfully"""
+        sys.modules['importlib'] = None
+        global entry
+        self._ReloadEntry()
+        entry.Entry.Create(None, self.GetNode(), 'u-boot')
+        self.assertFalse(entry.have_importlib)
+
+    def test2EntryImportLib(self):
+        del sys.modules['importlib']
+        global entry
+        self._ReloadEntry()
         entry.Entry.Create(None, self.GetNode(), 'u-boot-spl')
-        del entry
+        self.assertTrue(entry.have_importlib)
 
     def testEntryContents(self):
         """Test the Entry bass class"""
@@ -59,7 +62,6 @@ class TestEntry(unittest.TestCase):
 
     def testUnknownEntry(self):
         """Test that unknown entry types are detected"""
-        import entry
         Node = collections.namedtuple('Node', ['name', 'path'])
         node = Node('invalid-name', 'invalid-path')
         with self.assertRaises(ValueError) as e:
@@ -69,7 +71,6 @@ class TestEntry(unittest.TestCase):
 
     def testUniqueName(self):
         """Test Entry.GetUniqueName"""
-        import entry
         Node = collections.namedtuple('Node', ['name', 'parent'])
         base_node = Node('root', None)
         base_entry = entry.Entry(None, None, base_node, read_node=False)
@@ -80,7 +81,6 @@ class TestEntry(unittest.TestCase):
 
     def testGetDefaultFilename(self):
         """Trivial test for this base class function"""
-        import entry
         base_entry = entry.Entry(None, None, None, read_node=False)
         self.assertIsNone(base_entry.GetDefaultFilename())
 

+ 0 - 0
tools/binman/etype/__init__.py


+ 11 - 3
tools/binman/etype/_testing.py

@@ -50,6 +50,8 @@ class Entry__testing(Entry):
                                                     'bad-update-contents')
         self.return_contents_once = fdt_util.GetBool(self._node,
                                                      'return-contents-once')
+        self.bad_update_contents_twice = fdt_util.GetBool(self._node,
+                                                    'bad-update-contents-twice')
 
         # Set to True when the entry is ready to process the FDT.
         self.process_fdt_ready = False
@@ -71,11 +73,12 @@ class Entry__testing(Entry):
         if self.force_bad_datatype:
             self.GetEntryArgsOrProps([EntryArg('test-bad-datatype-arg', bool)])
         self.return_contents = True
+        self.contents = b'a'
 
     def ObtainContents(self):
         if self.return_unknown_contents or not self.return_contents:
             return False
-        self.data = b'a'
+        self.data = self.contents
         self.contents_size = len(self.data)
         if self.return_contents_once:
             self.return_contents = False
@@ -88,9 +91,14 @@ class Entry__testing(Entry):
 
     def ProcessContents(self):
         if self.bad_update_contents:
-            # Request to update the conents with something larger, to cause a
+            # Request to update the contents with something larger, to cause a
             # failure.
-            self.ProcessContentsUpdate('aa')
+            if self.bad_update_contents_twice:
+                self.contents += b'a'
+            else:
+                self.contents = b'aa'
+            return self.ProcessContentsUpdate(self.contents)
+        return True
 
     def ProcessFdt(self, fdt):
         """Force reprocessing the first time"""

+ 31 - 28
tools/binman/etype/blob.py

@@ -9,6 +9,7 @@ from entry import Entry
 import fdt_util
 import state
 import tools
+import tout
 
 class Entry_blob(Entry):
     """Entry containing an arbitrary binary blob
@@ -33,8 +34,7 @@ class Entry_blob(Entry):
     def __init__(self, section, etype, node):
         Entry.__init__(self, section, etype, node)
         self._filename = fdt_util.GetString(self._node, 'filename', self.etype)
-        self._compress = fdt_util.GetString(self._node, 'compress', 'none')
-        self._uncompressed_size = None
+        self.compress = fdt_util.GetString(self._node, 'compress', 'none')
 
     def ObtainContents(self):
         self._filename = self.GetDefaultFilename()
@@ -42,37 +42,40 @@ class Entry_blob(Entry):
         self.ReadBlobContents()
         return True
 
+    def CompressData(self, indata):
+        if self.compress != 'none':
+            self.uncomp_size = len(indata)
+        data = tools.Compress(indata, self.compress)
+        return data
+
     def ReadBlobContents(self):
-        # We assume the data is small enough to fit into memory. If this
-        # is used for large filesystem image that might not be true.
-        # In that case, Image.BuildImage() could be adjusted to use a
-        # new Entry method which can read in chunks. Then we could copy
-        # the data in chunks and avoid reading it all at once. For now
-        # this seems like an unnecessary complication.
-        data = tools.ReadFile(self._pathname)
-        if self._compress == 'lz4':
-            self._uncompressed_size = len(data)
-            '''
-            import lz4  # Import this only if needed (python-lz4 dependency)
+        """Read blob contents into memory
+
+        This function compresses the data before storing if needed.
 
-            try:
-                data = lz4.frame.compress(data)
-            except AttributeError:
-                data = lz4.compress(data)
-            '''
-            data = tools.Run('lz4', '-c', self._pathname, binary=True)
+        We assume the data is small enough to fit into memory. If this
+        is used for large filesystem image that might not be true.
+        In that case, Image.BuildImage() could be adjusted to use a
+        new Entry method which can read in chunks. Then we could copy
+        the data in chunks and avoid reading it all at once. For now
+        this seems like an unnecessary complication.
+        """
+        indata = tools.ReadFile(self._pathname)
+        data = self.CompressData(indata)
         self.SetContents(data)
         return True
 
     def GetDefaultFilename(self):
         return self._filename
 
-    def AddMissingProperties(self):
-        Entry.AddMissingProperties(self)
-        if self._compress != 'none':
-            state.AddZeroProp(self._node, 'uncomp-size')
-
-    def SetCalculatedProperties(self):
-        Entry.SetCalculatedProperties(self)
-        if self._uncompressed_size is not None:
-            state.SetInt(self._node, 'uncomp-size', self._uncompressed_size)
+    def ReadData(self, decomp=True):
+        indata = Entry.ReadData(self, decomp)
+        if decomp:
+            data = tools.Decompress(indata, self.compress)
+            if self.uncomp_size:
+                tout.Info("%s: Decompressing data size %#x with algo '%s' to data size %#x" %
+                          (self.GetPath(), len(indata), self.compress,
+                           len(data)))
+        else:
+            data = indata
+        return data

+ 5 - 5
tools/binman/etype/blob_dtb.py

@@ -23,11 +23,11 @@ class Entry_blob_dtb(Entry_blob):
     def ObtainContents(self):
         """Get the device-tree from the list held by the 'state' module"""
         self._filename = self.GetDefaultFilename()
-        self._pathname, data = state.GetFdtContents(self._filename)
-        self.SetContents(data)
-        return True
+        self._pathname, _ = state.GetFdtContents(self._filename)
+        return Entry_blob.ReadBlobContents(self)
 
     def ProcessContents(self):
         """Re-read the DTB contents so that we get any calculated properties"""
-        _, data = state.GetFdtContents(self._filename)
-        self.SetContents(data)
+        _, indata = state.GetFdtContents(self._filename)
+        data = self.CompressData(indata)
+        return self.ProcessContentsUpdate(data)

+ 263 - 0
tools/binman/etype/cbfs.py

@@ -0,0 +1,263 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2019 Google LLC
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for a Coreboot Filesystem (CBFS)
+#
+
+from collections import OrderedDict
+
+import cbfs_util
+from cbfs_util import CbfsWriter
+from entry import Entry
+import fdt_util
+import state
+
+class Entry_cbfs(Entry):
+    """Entry containing a Coreboot Filesystem (CBFS)
+
+    A CBFS provides a way to group files into a group. It has a simple directory
+    structure and allows the position of individual files to be set, since it is
+    designed to support execute-in-place in an x86 SPI-flash device. Where XIP
+    is not used, it supports compression and storing ELF files.
+
+    CBFS is used by coreboot as its way of orgnanising SPI-flash contents.
+
+    The contents of the CBFS are defined by subnodes of the cbfs entry, e.g.:
+
+        cbfs {
+            size = <0x100000>;
+            u-boot {
+                cbfs-type = "raw";
+            };
+            u-boot-dtb {
+                cbfs-type = "raw";
+            };
+        };
+
+    This creates a CBFS 1MB in size two files in it: u-boot.bin and u-boot.dtb.
+    Note that the size is required since binman does not support calculating it.
+    The contents of each entry is just what binman would normally provide if it
+    were not a CBFS node. A blob type can be used to import arbitrary files as
+    with the second subnode below:
+
+        cbfs {
+            size = <0x100000>;
+            u-boot {
+                cbfs-name = "BOOT";
+                cbfs-type = "raw";
+            };
+
+            dtb {
+                type = "blob";
+                filename = "u-boot.dtb";
+                cbfs-type = "raw";
+                cbfs-compress = "lz4";
+                cbfs-offset = <0x100000>;
+            };
+        };
+
+    This creates a CBFS 1MB in size with u-boot.bin (named "BOOT") and
+    u-boot.dtb (named "dtb") and compressed with the lz4 algorithm.
+
+
+    Properties supported in the top-level CBFS node:
+
+    cbfs-arch:
+        Defaults to "x86", but you can specify the architecture if needed.
+
+
+    Properties supported in the CBFS entry subnodes:
+
+    cbfs-name:
+        This is the name of the file created in CBFS. It defaults to the entry
+        name (which is the node name), but you can override it with this
+        property.
+
+    cbfs-type:
+        This is the CBFS file type. The following are supported:
+
+        raw:
+            This is a 'raw' file, although compression is supported. It can be
+            used to store any file in CBFS.
+
+        stage:
+            This is an ELF file that has been loaded (i.e. mapped to memory), so
+            appears in the CBFS as a flat binary. The input file must be an ELF
+            image, for example this puts "u-boot" (the ELF image) into a 'stage'
+            entry:
+
+                cbfs {
+                    size = <0x100000>;
+                    u-boot-elf {
+                        cbfs-name = "BOOT";
+                        cbfs-type = "stage";
+                    };
+                };
+
+            You can use your own ELF file with something like:
+
+                cbfs {
+                    size = <0x100000>;
+                    something {
+                        type = "blob";
+                        filename = "cbfs-stage.elf";
+                        cbfs-type = "stage";
+                    };
+                };
+
+            As mentioned, the file is converted to a flat binary, so it is
+            equivalent to adding "u-boot.bin", for example, but with the load and
+            start addresses specified by the ELF. At present there is no option
+            to add a flat binary with a load/start address, similar to the
+            'add-flat-binary' option in cbfstool.
+
+    cbfs-offset:
+        This is the offset of the file's data within the CBFS. It is used to
+        specify where the file should be placed in cases where a fixed position
+        is needed. Typical uses are for code which is not relocatable and must
+        execute in-place from a particular address. This works because SPI flash
+        is generally mapped into memory on x86 devices. The file header is
+        placed before this offset so that the data start lines up exactly with
+        the chosen offset. If this property is not provided, then the file is
+        placed in the next available spot.
+
+    The current implementation supports only a subset of CBFS features. It does
+    not support other file types (e.g. payload), adding multiple files (like the
+    'files' entry with a pattern supported by binman), putting files at a
+    particular offset in the CBFS and a few other things.
+
+    Of course binman can create images containing multiple CBFSs, simply by
+    defining these in the binman config:
+
+
+        binman {
+            size = <0x800000>;
+            cbfs {
+                offset = <0x100000>;
+                size = <0x100000>;
+                u-boot {
+                    cbfs-type = "raw";
+                };
+                u-boot-dtb {
+                    cbfs-type = "raw";
+                };
+            };
+
+            cbfs2 {
+                offset = <0x700000>;
+                size = <0x100000>;
+                u-boot {
+                    cbfs-type = "raw";
+                };
+                u-boot-dtb {
+                    cbfs-type = "raw";
+                };
+                image {
+                    type = "blob";
+                    filename = "image.jpg";
+                };
+            };
+        };
+
+    This creates an 8MB image with two CBFSs, one at offset 1MB, one at 7MB,
+    both of size 1MB.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        self._cbfs_arg = fdt_util.GetString(node, 'cbfs-arch', 'x86')
+        self._cbfs_entries = OrderedDict()
+        self._ReadSubnodes()
+
+    def ObtainContents(self):
+        arch = cbfs_util.find_arch(self._cbfs_arg)
+        if arch is None:
+            self.Raise("Invalid architecture '%s'" % self._cbfs_arg)
+        if self.size is None:
+            self.Raise("'cbfs' entry must have a size property")
+        cbfs = CbfsWriter(self.size, arch)
+        for entry in self._cbfs_entries.values():
+            # First get the input data and put it in a file. If not available,
+            # try later.
+            if not entry.ObtainContents():
+                return False
+            data = entry.GetData()
+            cfile = None
+            if entry._type == 'raw':
+                cfile = cbfs.add_file_raw(entry._cbfs_name, data,
+                                          entry._cbfs_offset,
+                                          entry._cbfs_compress)
+            elif entry._type == 'stage':
+                cfile = cbfs.add_file_stage(entry._cbfs_name, data,
+                                            entry._cbfs_offset)
+            else:
+                entry.Raise("Unknown cbfs-type '%s' (use 'raw', 'stage')" %
+                            entry._type)
+            if cfile:
+                entry._cbfs_file = cfile
+        data = cbfs.get_data()
+        self.SetContents(data)
+        return True
+
+    def _ReadSubnodes(self):
+        """Read the subnodes to find out what should go in this IFWI"""
+        for node in self._node.subnodes:
+            entry = Entry.Create(self.section, node)
+            entry._cbfs_name = fdt_util.GetString(node, 'cbfs-name', entry.name)
+            entry._type = fdt_util.GetString(node, 'cbfs-type')
+            compress = fdt_util.GetString(node, 'cbfs-compress', 'none')
+            entry._cbfs_offset = fdt_util.GetInt(node, 'cbfs-offset')
+            entry._cbfs_compress = cbfs_util.find_compress(compress)
+            if entry._cbfs_compress is None:
+                self.Raise("Invalid compression in '%s': '%s'" %
+                           (node.name, compress))
+            self._cbfs_entries[entry._cbfs_name] = entry
+
+    def SetImagePos(self, image_pos):
+        """Override this function to set all the entry properties from CBFS
+
+        We can only do this once image_pos is known
+
+        Args:
+            image_pos: Position of this entry in the image
+        """
+        Entry.SetImagePos(self, image_pos)
+
+        # Now update the entries with info from the CBFS entries
+        for entry in self._cbfs_entries.values():
+            cfile = entry._cbfs_file
+            entry.size = cfile.data_len
+            entry.offset = cfile.calced_cbfs_offset
+            entry.image_pos = self.image_pos + entry.offset
+            if entry._cbfs_compress:
+                entry.uncomp_size = cfile.memlen
+
+    def AddMissingProperties(self):
+        Entry.AddMissingProperties(self)
+        for entry in self._cbfs_entries.values():
+            entry.AddMissingProperties()
+            if entry._cbfs_compress:
+                state.AddZeroProp(entry._node, 'uncomp-size')
+                # Store the 'compress' property, since we don't look at
+                # 'cbfs-compress' in Entry.ReadData()
+                state.AddString(entry._node, 'compress',
+                                cbfs_util.compress_name(entry._cbfs_compress))
+
+    def SetCalculatedProperties(self):
+        """Set the value of device-tree properties calculated by binman"""
+        Entry.SetCalculatedProperties(self)
+        for entry in self._cbfs_entries.values():
+            state.SetInt(entry._node, 'offset', entry.offset)
+            state.SetInt(entry._node, 'size', entry.size)
+            state.SetInt(entry._node, 'image-pos', entry.image_pos)
+            if entry.uncomp_size is not None:
+                state.SetInt(entry._node, 'uncomp-size', entry.uncomp_size)
+
+    def ListEntries(self, entries, indent):
+        """Override this method to list all files in the section"""
+        Entry.ListEntries(self, entries, indent)
+        for entry in self._cbfs_entries.values():
+            entry.ListEntries(entries, indent + 1)
+
+    def GetEntries(self):
+        return self._cbfs_entries

+ 130 - 0
tools/binman/etype/fdtmap.py

@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+
+"""# Entry-type module for a full map of the firmware image
+
+This handles putting an FDT into the image with just the information about the
+image.
+"""
+
+import libfdt
+
+from entry import Entry
+from fdt import Fdt
+import state
+import tools
+
+FDTMAP_MAGIC   = b'_FDTMAP_'
+FDTMAP_HDR_LEN = 16
+
+def LocateFdtmap(data):
+    """Search an image for an fdt map
+
+    Args:
+        data: Data to search
+
+    Returns:
+        Position of fdt map in data, or None if not found. Note that the
+            position returned is of the FDT header, i.e. before the FDT data
+    """
+    hdr_pos = data.find(FDTMAP_MAGIC)
+    size = len(data)
+    if hdr_pos != -1:
+        hdr = data[hdr_pos:hdr_pos + FDTMAP_HDR_LEN]
+        if len(hdr) == FDTMAP_HDR_LEN:
+            return hdr_pos
+    return None
+
+class Entry_fdtmap(Entry):
+    """An entry which contains an FDT map
+
+    Properties / Entry arguments:
+        None
+
+    An FDT map is just a header followed by an FDT containing a list of all the
+    entries in the image. The root node corresponds to the image node in the
+    original FDT, and an image-name property indicates the image name in that
+    original tree.
+
+    The header is the string _FDTMAP_ followed by 8 unused bytes.
+
+    When used, this entry will be populated with an FDT map which reflects the
+    entries in the current image. Hierarchy is preserved, and all offsets and
+    sizes are included.
+
+    Note that the -u option must be provided to ensure that binman updates the
+    FDT with the position of each entry.
+
+    Example output for a simple image with U-Boot and an FDT map:
+
+    / {
+        size = <0x00000112>;
+        image-pos = <0x00000000>;
+        offset = <0x00000000>;
+        u-boot {
+            size = <0x00000004>;
+            image-pos = <0x00000000>;
+            offset = <0x00000000>;
+        };
+        fdtmap {
+            size = <0x0000010e>;
+            image-pos = <0x00000004>;
+            offset = <0x00000004>;
+        };
+    };
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+
+    def _GetFdtmap(self):
+        """Build an FDT map from the entries in the current image
+
+        Returns:
+            FDT map binary data
+        """
+        def _AddNode(node):
+            """Add a node to the FDT map"""
+            for pname, prop in node.props.items():
+                fsw.property(pname, prop.bytes)
+            for subnode in node.subnodes:
+                with fsw.add_node(subnode.name):
+                    _AddNode(subnode)
+
+        # Get the FDT data into an Fdt object
+        data = state.GetFdtContents()[1]
+        infdt = Fdt.FromData(data)
+        infdt.Scan()
+
+        # Find the node for the image containing the Fdt-map entry
+        path = self.section.GetPath()
+        node = infdt.GetNode(path)
+        if not node:
+            self.Raise("Internal error: Cannot locate node for path '%s'" %
+                       path)
+
+        # Build a new tree with all nodes and properties starting from that node
+        fsw = libfdt.FdtSw()
+        fsw.finish_reservemap()
+        with fsw.add_node(''):
+            _AddNode(node)
+        fdt = fsw.as_fdt()
+
+        # Pack this new FDT and return its contents
+        fdt.pack()
+        outfdt = Fdt.FromData(fdt.as_bytearray())
+        data = FDTMAP_MAGIC + tools.GetBytes(0, 8) + outfdt.GetContents()
+        return data
+
+    def ObtainContents(self):
+        """Obtain a placeholder for the fdt-map contents"""
+        self.SetContents(self._GetFdtmap())
+        return True
+
+    def ProcessContents(self):
+        """Write an updated version of the FDT map to this entry
+
+        This is necessary since new data may have been written back to it during
+        processing, e.g. the image-pos properties.
+        """
+        return self.ProcessContentsUpdate(self._GetFdtmap())

+ 1 - 2
tools/binman/etype/files.py

@@ -14,7 +14,6 @@ import fdt_util
 import state
 import tools
 
-import bsection
 
 class Entry_files(Entry_section):
     """Entry containing a set of files
@@ -54,4 +53,4 @@ class Entry_files(Entry_section):
             state.AddString(subnode, 'compress', self._compress)
 
         # Read entries again, now that we have some
-        self._section._ReadEntries()
+        self._ReadEntries()

+ 2 - 2
tools/binman/etype/fmap.py

@@ -49,7 +49,7 @@ class Entry_fmap(Entry):
                 areas.append(fmap_util.FmapArea(pos or 0, entry.size or 0,
                                             tools.FromUnicode(entry.name), 0))
 
-        entries = self.section._image.GetEntries()
+        entries = self.section.image.GetEntries()
         areas = []
         for entry in entries.values():
             _AddEntries(areas, entry)
@@ -62,4 +62,4 @@ class Entry_fmap(Entry):
         return True
 
     def ProcessContents(self):
-        self.SetContents(self._GetFmap())
+        return self.ProcessContentsUpdate(self._GetFmap())

+ 99 - 0
tools/binman/etype/image_header.py

@@ -0,0 +1,99 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+
+"""Entry-type module for an image header which points to the FDT map
+
+This creates an 8-byte entry with a magic number and the offset of the FDT map
+(which is another entry in the image), relative to the start or end of the
+image.
+"""
+
+import struct
+
+from entry import Entry
+import fdt_util
+
+IMAGE_HEADER_MAGIC = b'BinM'
+IMAGE_HEADER_LEN   = 8
+
+def LocateHeaderOffset(data):
+    """Search an image for an image header
+
+    Args:
+        data: Data to search
+
+    Returns:
+        Offset of image header in the image, or None if not found
+    """
+    hdr_pos = data.find(IMAGE_HEADER_MAGIC)
+    if hdr_pos != -1:
+        size = len(data)
+        hdr = data[hdr_pos:hdr_pos + IMAGE_HEADER_LEN]
+        if len(hdr) == IMAGE_HEADER_LEN:
+            offset = struct.unpack('<I', hdr[4:])[0]
+            if hdr_pos == len(data) - IMAGE_HEADER_LEN:
+                pos = size + offset - (1 << 32)
+            else:
+                pos = offset
+            return pos
+    return None
+
+class Entry_image_header(Entry):
+    """An entry which contains a pointer to the FDT map
+
+    Properties / Entry arguments:
+        location: Location of header ("start" or "end" of image). This is
+            optional. If omitted then the entry must have an offset property.
+
+    This adds an 8-byte entry to the start or end of the image, pointing to the
+    location of the FDT map. The format is a magic number followed by an offset
+    from the start or end of the image, in twos-compliment format.
+
+    This entry must be in the top-level part of the image.
+
+    NOTE: If the location is at the start/end, you will probably need to specify
+    sort-by-offset for the image, unless you actually put the image header
+    first/last in the entry list.
+    """
+    def __init__(self, section, etype, node):
+        Entry.__init__(self, section, etype, node)
+        self.location = fdt_util.GetString(self._node, 'location')
+
+    def _GetHeader(self):
+        image_pos = self.GetSiblingImagePos('fdtmap')
+        if image_pos == False:
+            self.Raise("'image_header' section must have an 'fdtmap' sibling")
+        elif image_pos is None:
+            # This will be available when called from ProcessContents(), but not
+            # when called from ObtainContents()
+            offset = 0xffffffff
+        else:
+            image_size = self.section.GetImageSize() or 0
+            base = (0 if self.location != 'end' else image_size)
+            offset = (image_pos - base) & 0xffffffff
+        data = IMAGE_HEADER_MAGIC + struct.pack('<I', offset)
+        return data
+
+    def ObtainContents(self):
+        """Obtain a placeholder for the header contents"""
+        self.SetContents(self._GetHeader())
+        return True
+
+    def Pack(self, offset):
+        """Special pack method to set the offset to start/end of image"""
+        if not self.offset:
+            if self.location not in ['start', 'end']:
+                self.Raise("Invalid location '%s', expected 'start' or 'end'" %
+                           self.location)
+            image_size = self.section.GetImageSize() or 0
+            self.offset = (0 if self.location != 'end' else image_size - 8)
+        return Entry.Pack(self, offset)
+
+    def ProcessContents(self):
+        """Write an updated version of the FDT map to this entry
+
+        This is necessary since image_pos is not available when ObtainContents()
+        is called, since by then the entries have not been packed in the image.
+        """
+        return self.ProcessContentsUpdate(self._GetHeader())

+ 12 - 4
tools/binman/etype/intel_descriptor.py

@@ -47,17 +47,25 @@ class Entry_intel_descriptor(Entry_blob):
     def __init__(self, section, etype, node):
         Entry_blob.__init__(self, section, etype, node)
         self._regions = []
+        if self.offset is None:
+            self.offset = self.section.GetStartOffset()
 
     def GetOffsets(self):
         offset = self.data.find(FD_SIGNATURE)
         if offset == -1:
-            self.Raise('Cannot find FD signature')
+            self.Raise('Cannot find Intel Flash Descriptor (FD) signature')
         flvalsig, flmap0, flmap1, flmap2 = struct.unpack('<LLLL',
                                                 self.data[offset:offset + 16])
         frba = ((flmap0 >> 16) & 0xff) << 4
         for i in range(MAX_REGIONS):
             self._regions.append(Region(self.data, frba, i))
 
-        # Set the offset for ME only, for now, since the others are not used
-        return {'intel-me': [self._regions[REGION_ME].base,
-                             self._regions[REGION_ME].size]}
+        # Set the offset for ME (Management Engine) and IFWI (Integrated
+        # Firmware Image), for now, since the others are not used.
+        info = {}
+        if self.HasSibling('intel-me'):
+            info['intel-me'] = [self._regions[REGION_ME].base,
+                                self._regions[REGION_ME].size]
+        if self.HasSibling('intel-ifwi'):
+            info['intel-ifwi'] = [self._regions[REGION_BIOS].base, None]
+        return info

+ 100 - 0
tools/binman/etype/intel_ifwi.py

@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2016 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for Intel Management Engine binary blob
+#
+
+from collections import OrderedDict
+
+from entry import Entry
+from blob import Entry_blob
+import fdt_util
+import tools
+
+class Entry_intel_ifwi(Entry_blob):
+    """Entry containing an Intel Integrated Firmware Image (IFWI) file
+
+    Properties / Entry arguments:
+        - filename: Filename of file to read into entry. This is either the
+            IFWI file itself, or a file that can be converted into one using a
+            tool
+        - convert-fit: If present this indicates that the ifwitool should be
+            used to convert the provided file into a IFWI.
+
+    This file contains code and data used by the SoC that is required to make
+    it work. It includes U-Boot TPL, microcode, things related to the CSE
+    (Converged Security Engine, the microcontroller that loads all the firmware)
+    and other items beyond the wit of man.
+
+    A typical filename is 'ifwi.bin' for an IFWI file, or 'fitimage.bin' for a
+    file that will be converted to an IFWI.
+
+    The position of this entry is generally set by the intel-descriptor entry.
+
+    The contents of the IFWI are specified by the subnodes of the IFWI node.
+    Each subnode describes an entry which is placed into the IFWFI with a given
+    sub-partition (and optional entry name).
+
+    See README.x86 for information about x86 binary blobs.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+        self._convert_fit = fdt_util.GetBool(self._node, 'convert-fit')
+        self._ifwi_entries = OrderedDict()
+        self._ReadSubnodes()
+
+    def ObtainContents(self):
+        """Get the contects for the IFWI
+
+        Unfortunately we cannot create anything from scratch here, as Intel has
+        tools which create precursor binaries with lots of data and settings,
+        and these are not incorporated into binman.
+
+        The first step is to get a file in the IFWI format. This is either
+        supplied directly or is extracted from a fitimage using the 'create'
+        subcommand.
+
+        After that we delete the OBBP sub-partition and add each of the files
+        that we want in the IFWI file, one for each sub-entry of the IWFI node.
+        """
+        self._pathname = tools.GetInputFilename(self._filename)
+
+        # Create the IFWI file if needed
+        if self._convert_fit:
+            inname = self._pathname
+            outname = tools.GetOutputFilename('ifwi.bin')
+            tools.RunIfwiTool(inname, tools.CMD_CREATE, outname)
+            self._filename = 'ifwi.bin'
+            self._pathname = outname
+        else:
+            # Provide a different code path here to ensure we have test coverage
+            inname = self._pathname
+
+        # Delete OBBP if it is there, then add the required new items.
+        tools.RunIfwiTool(inname, tools.CMD_DELETE, subpart='OBBP')
+
+        for entry in self._ifwi_entries.values():
+            # First get the input data and put it in a file
+            if not entry.ObtainContents():
+                return False
+            data = entry.GetData()
+            uniq = self.GetUniqueName()
+            input_fname = tools.GetOutputFilename('input.%s' % uniq)
+            tools.WriteFile(input_fname, data)
+
+            tools.RunIfwiTool(inname,
+                tools.CMD_REPLACE if entry._ifwi_replace else tools.CMD_ADD,
+                input_fname, entry._ifwi_subpart, entry._ifwi_entry_name)
+
+        self.ReadBlobContents()
+        return True
+
+    def _ReadSubnodes(self):
+        """Read the subnodes to find out what should go in this IFWI"""
+        for node in self._node.subnodes:
+            entry = Entry.Create(self.section, node)
+            entry._ifwi_replace = fdt_util.GetBool(node, 'replace')
+            entry._ifwi_subpart = fdt_util.GetString(node, 'ifwi-subpart')
+            entry._ifwi_entry_name = fdt_util.GetString(node, 'ifwi-entry')
+            self._ifwi_entries[entry._ifwi_subpart] = entry

+ 2 - 0
tools/binman/etype/intel_me.py

@@ -22,6 +22,8 @@ class Entry_intel_me(Entry_blob):
 
     A typical filename is 'me.bin'.
 
+    The position of this entry is generally set by the intel-descriptor entry.
+
     See README.x86 for information about x86 binary blobs.
     """
     def __init__(self, section, etype, node):

+ 397 - 42
tools/binman/etype/section.py

@@ -1,59 +1,155 @@
 # SPDX-License-Identifier:      GPL-2.0+
 # Copyright (c) 2018 Google, Inc
 # Written by Simon Glass <sjg@chromium.org>
-#
-# Entry-type module for sections, which are entries which can contain other
-# entries.
-#
+
+"""Entry-type module for sections (groups of entries)
+
+Sections are entries which can contain other entries. This allows hierarchical
+images to be created.
+"""
+
+from __future__ import print_function
+
+from collections import OrderedDict
+import re
+import sys
 
 from entry import Entry
 import fdt_util
 import tools
 
-import bsection
 
 class Entry_section(Entry):
     """Entry that contains other entries
 
     Properties / Entry arguments: (see binman README for more information)
-        - size: Size of section in bytes
-        - align-size: Align size to a particular power of two
-        - pad-before: Add padding before the entry
-        - pad-after: Add padding after the entry
-        - pad-byte: Pad byte to use when padding
-        - sort-by-offset: Reorder the entries by offset
-        - end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
-        - name-prefix: Adds a prefix to the name of every entry in the section
+        pad-byte: Pad byte to use when padding
+        sort-by-offset: True if entries should be sorted by offset, False if
+            they must be in-order in the device tree description
+        end-at-4gb: Used to build an x86 ROM which ends at 4GB (2^32)
+        skip-at-start: Number of bytes before the first entry starts. These
+            effectively adjust the starting offset of entries. For example,
+            if this is 16, then the first entry would start at 16. An entry
+            with offset = 20 would in fact be written at offset 4 in the image
+            file, since the first 16 bytes are skipped when writing.
+        name-prefix: Adds a prefix to the name of every entry in the section
             when writing out the map
 
+    Since a section is also an entry, it inherits all the properies of entries
+    too.
+
     A section is an entry which can contain other entries, thus allowing
     hierarchical images to be created. See 'Sections and hierarchical images'
     in the binman README for more information.
     """
-    def __init__(self, section, etype, node):
-        Entry.__init__(self, section, etype, node)
-        self._section = bsection.Section(node.name, section, node,
-                                         section._image)
+    def __init__(self, section, etype, node, test=False):
+        if not test:
+            Entry.__init__(self, section, etype, node)
+        if section:
+            self.image = section.image
+        self._entries = OrderedDict()
+        self._pad_byte = 0
+        self._sort = False
+        self._skip_at_start = None
+        self._end_4gb = False
+        if not test:
+            self._ReadNode()
+            self._ReadEntries()
+
+    def _Raise(self, msg):
+        """Raises an error for this section
+
+        Args:
+            msg: Error message to use in the raise string
+        Raises:
+            ValueError()
+        """
+        raise ValueError("Section '%s': %s" % (self._node.path, msg))
+
+    def _ReadNode(self):
+        """Read properties from the image node"""
+        self._pad_byte = fdt_util.GetInt(self._node, 'pad-byte', 0)
+        self._sort = fdt_util.GetBool(self._node, 'sort-by-offset')
+        self._end_4gb = fdt_util.GetBool(self._node, 'end-at-4gb')
+        self._skip_at_start = fdt_util.GetInt(self._node, 'skip-at-start')
+        if self._end_4gb:
+            if not self.size:
+                self.Raise("Section size must be provided when using end-at-4gb")
+            if self._skip_at_start is not None:
+                self.Raise("Provide either 'end-at-4gb' or 'skip-at-start'")
+            else:
+                self._skip_at_start = 0x100000000 - self.size
+        else:
+            if self._skip_at_start is None:
+                self._skip_at_start = 0
+        self._name_prefix = fdt_util.GetString(self._node, 'name-prefix')
+        filename = fdt_util.GetString(self._node, 'filename')
+        if filename:
+            self._filename = filename
+
+    def _ReadEntries(self):
+        for node in self._node.subnodes:
+            if node.name == 'hash':
+                continue
+            entry = Entry.Create(self, node)
+            entry.SetPrefix(self._name_prefix)
+            self._entries[node.name] = entry
 
     def GetFdtSet(self):
-        return self._section.GetFdtSet()
+        fdt_set = set()
+        for entry in self._entries.values():
+            fdt_set.update(entry.GetFdtSet())
+        return fdt_set
 
     def ProcessFdt(self, fdt):
-        return self._section.ProcessFdt(fdt)
+        """Allow entries to adjust the device tree
+
+        Some entries need to adjust the device tree for their purposes. This
+        may involve adding or deleting properties.
+        """
+        todo = self._entries.values()
+        for passnum in range(3):
+            next_todo = []
+            for entry in todo:
+                if not entry.ProcessFdt(fdt):
+                    next_todo.append(entry)
+            todo = next_todo
+            if not todo:
+                break
+        if todo:
+            self.Raise('Internal error: Could not complete processing of Fdt: remaining %s' %
+                       todo)
+        return True
 
     def ExpandEntries(self):
+        """Expand out any entries which have calculated sub-entries
+
+        Some entries are expanded out at runtime, e.g. 'files', which produces
+        a section containing a list of files. Process these entries so that
+        this information is added to the device tree.
+        """
         Entry.ExpandEntries(self)
-        self._section.ExpandEntries()
+        for entry in self._entries.values():
+            entry.ExpandEntries()
 
     def AddMissingProperties(self):
+        """Add new properties to the device tree as needed for this entry"""
         Entry.AddMissingProperties(self)
-        self._section.AddMissingProperties()
+        for entry in self._entries.values():
+            entry.AddMissingProperties()
 
     def ObtainContents(self):
-        return self._section.GetEntryContents()
+        return self.GetEntryContents()
 
     def GetData(self):
-        return self._section.GetData()
+        section_data = tools.GetBytes(self._pad_byte, self.size)
+
+        for entry in self._entries.values():
+            data = entry.GetData()
+            base = self.pad_before + entry.offset - self._skip_at_start
+            section_data = (section_data[:base] + data +
+                            section_data[base + len(data):])
+        return section_data
 
     def GetOffsets(self):
         """Handle entries that want to set the offset/size of other entries
@@ -61,35 +157,94 @@ class Entry_section(Entry):
         This calls each entry's GetOffsets() method. If it returns a list
         of entries to update, it updates them.
         """
-        self._section.GetEntryOffsets()
+        self.GetEntryOffsets()
         return {}
 
+    def ResetForPack(self):
+        """Reset offset/size fields so that packing can be done again"""
+        Entry.ResetForPack(self)
+        for entry in self._entries.values():
+            entry.ResetForPack()
+
     def Pack(self, offset):
         """Pack all entries into the section"""
-        self._section.PackEntries()
-        if self._section._offset is None:
-            self._section.SetOffset(offset)
-        self.size = self._section.GetSize()
-        return super(Entry_section, self).Pack(offset)
+        self._PackEntries()
+        return Entry.Pack(self, offset)
 
-    def SetImagePos(self, image_pos):
-        Entry.SetImagePos(self, image_pos)
-        self._section.SetImagePos(image_pos + self.offset)
+    def _PackEntries(self):
+        """Pack all entries into the image"""
+        offset = self._skip_at_start
+        for entry in self._entries.values():
+            offset = entry.Pack(offset)
+        self.size = self.CheckSize()
+
+    def _ExpandEntries(self):
+        """Expand any entries that are permitted to"""
+        exp_entry = None
+        for entry in self._entries.values():
+            if exp_entry:
+                exp_entry.ExpandToLimit(entry.offset)
+                exp_entry = None
+            if entry.expand_size:
+                exp_entry = entry
+        if exp_entry:
+            exp_entry.ExpandToLimit(self.size)
+
+    def _SortEntries(self):
+        """Sort entries by offset"""
+        entries = sorted(self._entries.values(), key=lambda entry: entry.offset)
+        self._entries.clear()
+        for entry in entries:
+            self._entries[entry._node.name] = entry
+
+    def CheckEntries(self):
+        """Check that entries do not overlap or extend outside the image"""
+        if self._sort:
+            self._SortEntries()
+        self._ExpandEntries()
+        offset = 0
+        prev_name = 'None'
+        for entry in self._entries.values():
+            entry.CheckOffset()
+            if (entry.offset < self._skip_at_start or
+                    entry.offset + entry.size > self._skip_at_start +
+                    self.size):
+                entry.Raise("Offset %#x (%d) is outside the section starting "
+                            "at %#x (%d)" %
+                            (entry.offset, entry.offset, self._skip_at_start,
+                             self._skip_at_start))
+            if entry.offset < offset:
+                entry.Raise("Offset %#x (%d) overlaps with previous entry '%s' "
+                            "ending at %#x (%d)" %
+                            (entry.offset, entry.offset, prev_name, offset, offset))
+            offset = entry.offset + entry.size
+            prev_name = entry.GetPath()
 
     def WriteSymbols(self, section):
         """Write symbol values into binary files for access at run time"""
-        self._section.WriteSymbols()
+        for entry in self._entries.values():
+            entry.WriteSymbols(self)
 
     def SetCalculatedProperties(self):
         Entry.SetCalculatedProperties(self)
-        self._section.SetCalculatedProperties()
+        for entry in self._entries.values():
+            entry.SetCalculatedProperties()
+
+    def SetImagePos(self, image_pos):
+        Entry.SetImagePos(self, image_pos)
+        for entry in self._entries.values():
+            entry.SetImagePos(image_pos + self.offset)
 
     def ProcessContents(self):
-        self._section.ProcessEntryContents()
-        super(Entry_section, self).ProcessContents()
+        sizes_ok_base = super(Entry_section, self).ProcessContents()
+        sizes_ok = True
+        for entry in self._entries.values():
+            if not entry.ProcessContents():
+                sizes_ok = False
+        return sizes_ok and sizes_ok_base
 
     def CheckOffset(self):
-        self._section.CheckEntries()
+        self.CheckEntries()
 
     def WriteMap(self, fd, indent):
         """Write a map of the section to a .map file
@@ -97,11 +252,211 @@ class Entry_section(Entry):
         Args:
             fd: File to write the map to
         """
-        self._section.WriteMap(fd, indent)
+        Entry.WriteMapLine(fd, indent, self.name, self.offset or 0,
+                           self.size, self.image_pos)
+        for entry in self._entries.values():
+            entry.WriteMap(fd, indent + 1)
 
     def GetEntries(self):
-        return self._section.GetEntries()
+        return self._entries
+
+    def GetContentsByPhandle(self, phandle, source_entry):
+        """Get the data contents of an entry specified by a phandle
+
+        This uses a phandle to look up a node and and find the entry
+        associated with it. Then it returnst he contents of that entry.
+
+        Args:
+            phandle: Phandle to look up (integer)
+            source_entry: Entry containing that phandle (used for error
+                reporting)
+
+        Returns:
+            data from associated entry (as a string), or None if not found
+        """
+        node = self._node.GetFdt().LookupPhandle(phandle)
+        if not node:
+            source_entry.Raise("Cannot find node for phandle %d" % phandle)
+        for entry in self._entries.values():
+            if entry._node == node:
+                return entry.GetData()
+        source_entry.Raise("Cannot find entry for node '%s'" % node.name)
+
+    def LookupSymbol(self, sym_name, optional, msg):
+        """Look up a symbol in an ELF file
+
+        Looks up a symbol in an ELF file. Only entry types which come from an
+        ELF image can be used by this function.
+
+        At present the only entry property supported is offset.
+
+        Args:
+            sym_name: Symbol name in the ELF file to look up in the format
+                _binman_<entry>_prop_<property> where <entry> is the name of
+                the entry and <property> is the property to find (e.g.
+                _binman_u_boot_prop_offset). As a special case, you can append
+                _any to <entry> to have it search for any matching entry. E.g.
+                _binman_u_boot_any_prop_offset will match entries called u-boot,
+                u-boot-img and u-boot-nodtb)
+            optional: True if the symbol is optional. If False this function
+                will raise if the symbol is not found
+            msg: Message to display if an error occurs
+
+        Returns:
+            Value that should be assigned to that symbol, or None if it was
+                optional and not found
+
+        Raises:
+            ValueError if the symbol is invalid or not found, or references a
+                property which is not supported
+        """
+        m = re.match(r'^_binman_(\w+)_prop_(\w+)$', sym_name)
+        if not m:
+            raise ValueError("%s: Symbol '%s' has invalid format" %
+                             (msg, sym_name))
+        entry_name, prop_name = m.groups()
+        entry_name = entry_name.replace('_', '-')
+        entry = self._entries.get(entry_name)
+        if not entry:
+            if entry_name.endswith('-any'):
+                root = entry_name[:-4]
+                for name in self._entries:
+                    if name.startswith(root):
+                        rest = name[len(root):]
+                        if rest in ['', '-img', '-nodtb']:
+                            entry = self._entries[name]
+        if not entry:
+            err = ("%s: Entry '%s' not found in list (%s)" %
+                   (msg, entry_name, ','.join(self._entries.keys())))
+            if optional:
+                print('Warning: %s' % err, file=sys.stderr)
+                return None
+            raise ValueError(err)
+        if prop_name == 'offset':
+            return entry.offset
+        elif prop_name == 'image_pos':
+            return entry.image_pos
+        else:
+            raise ValueError("%s: No such property '%s'" % (msg, prop_name))
+
+    def GetRootSkipAtStart(self):
+        """Get the skip-at-start value for the top-level section
+
+        This is used to find out the starting offset for root section that
+        contains this section. If this is a top-level section then it returns
+        the skip-at-start offset for this section.
+
+        This is used to get the absolute position of section within the image.
+
+        Returns:
+            Integer skip-at-start value for the root section containing this
+                section
+        """
+        if self.section:
+            return self.section.GetRootSkipAtStart()
+        return self._skip_at_start
+
+    def GetStartOffset(self):
+        """Get the start offset for this section
+
+        Returns:
+            The first available offset in this section (typically 0)
+        """
+        return self._skip_at_start
+
+    def GetImageSize(self):
+        """Get the size of the image containing this section
+
+        Returns:
+            Image size as an integer number of bytes, which may be None if the
+                image size is dynamic and its sections have not yet been packed
+        """
+        return self.image.size
+
+    def FindEntryType(self, etype):
+        """Find an entry type in the section
+
+        Args:
+            etype: Entry type to find
+        Returns:
+            entry matching that type, or None if not found
+        """
+        for entry in self._entries.values():
+            if entry.etype == etype:
+                return entry
+        return None
+
+    def GetEntryContents(self):
+        """Call ObtainContents() for the section
+        """
+        todo = self._entries.values()
+        for passnum in range(3):
+            next_todo = []
+            for entry in todo:
+                if not entry.ObtainContents():
+                    next_todo.append(entry)
+            todo = next_todo
+            if not todo:
+                break
+        if todo:
+            self.Raise('Internal error: Could not complete processing of contents: remaining %s' %
+                       todo)
+        return True
+
+    def _SetEntryOffsetSize(self, name, offset, size):
+        """Set the offset and size of an entry
+
+        Args:
+            name: Entry name to update
+            offset: New offset, or None to leave alone
+            size: New size, or None to leave alone
+        """
+        entry = self._entries.get(name)
+        if not entry:
+            self._Raise("Unable to set offset/size for unknown entry '%s'" %
+                        name)
+        entry.SetOffsetSize(self._skip_at_start + offset if offset else None,
+                            size)
+
+    def GetEntryOffsets(self):
+        """Handle entries that want to set the offset/size of other entries
+
+        This calls each entry's GetOffsets() method. If it returns a list
+        of entries to update, it updates them.
+        """
+        for entry in self._entries.values():
+            offset_dict = entry.GetOffsets()
+            for name, info in offset_dict.items():
+                self._SetEntryOffsetSize(name, *info)
+
+
+    def CheckSize(self):
+        """Check that the image contents does not exceed its size, etc."""
+        contents_size = 0
+        for entry in self._entries.values():
+            contents_size = max(contents_size, entry.offset + entry.size)
+
+        contents_size -= self._skip_at_start
+
+        size = self.size
+        if not size:
+            size = self.pad_before + contents_size + self.pad_after
+            size = tools.Align(size, self.align_size)
+
+        if self.size and contents_size > self.size:
+            self._Raise("contents size %#x (%d) exceeds section size %#x (%d)" %
+                        (contents_size, contents_size, self.size, self.size))
+        if not self.size:
+            self.size = size
+        if self.size != tools.Align(self.size, self.align_size):
+            self._Raise("Size %#x (%d) does not match align-size %#x (%d)" %
+                        (self.size, self.size, self.align_size,
+                         self.align_size))
+        return size
 
-    def ExpandToLimit(self, limit):
-        super(Entry_section, self).ExpandToLimit(limit)
-        self._section.ExpandSize(self.size)
+    def ListEntries(self, entries, indent):
+        """List the files in the section"""
+        Entry.AddEntryInfo(entries, indent, self.name, 'section', self.size,
+                           self.image_pos, None, self.offset, self)
+        for entry in self._entries.values():
+            entry.ListEntries(entries, indent + 1)

+ 19 - 4
tools/binman/etype/text.py

@@ -22,6 +22,8 @@ class Entry_text(Entry):
             that contains the string to place in the entry
         <xxx> (actual name is the value of text-label): contains the string to
             place in the entry.
+        <text>: The text to place in the entry (overrides the above mechanism).
+            This is useful when the text is constant.
 
     Example node:
 
@@ -44,15 +46,28 @@ class Entry_text(Entry):
             message = "a message directly in the node"
         };
 
+    or just:
+
+        text {
+            size = <8>;
+            text = "some text directly in the node"
+        };
+
     The text is not itself nul-terminated. This can be achieved, if required,
     by setting the size of the entry to something larger than the text.
     """
     def __init__(self, section, etype, node):
         Entry.__init__(self, section, etype, node)
-        label, = self.GetEntryArgsOrProps([EntryArg('text-label', str)])
-        self.text_label = tools.ToStr(label) if type(label) != str else label
-        value, = self.GetEntryArgsOrProps([EntryArg(self.text_label, str)])
-        value = tools.ToBytes(value) if value is not None else value
+        value = fdt_util.GetString(self._node, 'text')
+        if value:
+            value = tools.ToBytes(value)
+        else:
+            label, = self.GetEntryArgsOrProps([EntryArg('text-label', str)])
+            self.text_label = label
+            if self.text_label:
+                value, = self.GetEntryArgsOrProps([EntryArg(self.text_label,
+                                                            str)])
+                value = tools.ToBytes(value) if value is not None else value
         self.value = value
 
     def ObtainContents(self):

+ 1 - 1
tools/binman/etype/u_boot_spl_elf.py

@@ -12,7 +12,7 @@ class Entry_u_boot_spl_elf(Entry_blob):
     """U-Boot SPL ELF image
 
     Properties / Entry arguments:
-        - filename: Filename of SPL u-boot (default 'spl/u-boot')
+        - filename: Filename of SPL u-boot (default 'spl/u-boot-spl')
 
     This is the U-Boot SPL ELF image. It does not include a device tree but can
     be relocated to any address for execution.

+ 24 - 0
tools/binman/etype/u_boot_tpl_elf.py

@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (c) 2018 Google, Inc
+# Written by Simon Glass <sjg@chromium.org>
+#
+# Entry-type module for U-Boot TPL ELF image
+#
+
+from entry import Entry
+from blob import Entry_blob
+
+class Entry_u_boot_tpl_elf(Entry_blob):
+    """U-Boot TPL ELF image
+
+    Properties / Entry arguments:
+        - filename: Filename of TPL u-boot (default 'tpl/u-boot-tpl')
+
+    This is the U-Boot TPL ELF image. It does not include a device tree but can
+    be relocated to any address for execution.
+    """
+    def __init__(self, section, etype, node):
+        Entry_blob.__init__(self, section, etype, node)
+
+    def GetDefaultFilename(self):
+        return 'tpl/u-boot-tpl'

+ 4 - 4
tools/binman/etype/u_boot_with_ucode_ptr.py

@@ -49,7 +49,7 @@ class Entry_u_boot_with_ucode_ptr(Entry_blob):
     def ProcessContents(self):
         # If the image does not need microcode, there is nothing to do
         if not self.target_offset:
-            return
+            return True
 
         # Get the offset of the microcode
         ucode_entry = self.section.FindEntryType('u-boot-ucode')
@@ -91,6 +91,6 @@ class Entry_u_boot_with_ucode_ptr(Entry_blob):
         # Write the microcode offset and size into the entry
         offset_and_size = struct.pack('<2L', offset, size)
         self.target_offset -= self.image_pos
-        self.ProcessContentsUpdate(self.data[:self.target_offset] +
-                                   offset_and_size +
-                                   self.data[self.target_offset + 8:])
+        return self.ProcessContentsUpdate(self.data[:self.target_offset] +
+                                          offset_and_size +
+                                          self.data[self.target_offset + 8:])

+ 974 - 65
tools/binman/ftest.py

@@ -6,6 +6,8 @@
 #
 #    python -m unittest func_test.TestFunctional.testHelp
 
+from __future__ import print_function
+
 import hashlib
 from optparse import OptionParser
 import os
@@ -16,14 +18,19 @@ import tempfile
 import unittest
 
 import binman
+import cbfs_util
 import cmdline
 import command
 import control
 import elf
 import fdt
+from etype import fdtmap
+from etype import image_header
 import fdt_util
 import fmap_util
 import test_util
+import gzip
+from image import Image
 import state
 import tools
 import tout
@@ -59,9 +66,11 @@ BMPBLK_DATA           = b'bmp'
 VBLOCK_DATA           = b'vblk'
 FILES_DATA            = (b"sorry I'm late\nOh, don't bother apologising, I'm " +
                          b"sorry you're alive\n")
-COMPRESS_DATA         = b'data to compress'
+COMPRESS_DATA         = b'compress xxxxxxxxxxxxxxxxxxxxxx data'
 REFCODE_DATA          = b'refcode'
 
+EXTRACT_DTB_SIZE = 0x3c9
+
 
 class TestFunctional(unittest.TestCase):
     """Functional tests for binman
@@ -131,13 +140,46 @@ class TestFunctional(unittest.TestCase):
 
         TestFunctional._MakeInputFile('compress', COMPRESS_DATA)
 
+        # Travis-CI may have an old lz4
+        self.have_lz4 = True
+        try:
+            tools.Run('lz4', '--no-frame-crc', '-c',
+                      os.path.join(self._indir, 'u-boot.bin'))
+        except:
+            self.have_lz4 = False
+
     @classmethod
     def tearDownClass(self):
         """Remove the temporary input directory and its contents"""
-        if self._indir:
-            shutil.rmtree(self._indir)
+        if self.preserve_indir:
+            print('Preserving input dir: %s' % self._indir)
+        else:
+            if self._indir:
+                shutil.rmtree(self._indir)
         self._indir = None
 
+    @classmethod
+    def setup_test_args(cls, preserve_indir=False, preserve_outdirs=False,
+                        toolpath=None, verbosity=None):
+        """Accept arguments controlling test execution
+
+        Args:
+            preserve_indir: Preserve the shared input directory used by all
+                tests in this class.
+            preserve_outdir: Preserve the output directories used by tests. Each
+                test has its own, so this is normally only useful when running a
+                single test.
+            toolpath: ist of paths to use for tools
+        """
+        cls.preserve_indir = preserve_indir
+        cls.preserve_outdirs = preserve_outdirs
+        cls.toolpath = toolpath
+        cls.verbosity = verbosity
+
+    def _CheckLz4(self):
+        if not self.have_lz4:
+            self.skipTest('lz4 --no-frame-crc not available')
+
     def setUp(self):
         # Enable this to turn on debugging output
         # tout.Init(tout.DEBUG)
@@ -145,7 +187,10 @@ class TestFunctional(unittest.TestCase):
 
     def tearDown(self):
         """Remove the temporary output directory"""
-        tools._FinaliseForTest()
+        if self.preserve_outdirs:
+            print('Preserving output dir: %s' % tools.outdir)
+        else:
+            tools._FinaliseForTest()
 
     @classmethod
     def _ResetDtbs(self):
@@ -167,7 +212,7 @@ class TestFunctional(unittest.TestCase):
                             result.stdout + result.stderr))
         return result
 
-    def _DoBinman(self, *args):
+    def _DoBinman(self, *argv):
         """Run binman using directly (in the same process)
 
         Args:
@@ -175,16 +220,14 @@ class TestFunctional(unittest.TestCase):
         Returns:
             Return value (0 for success)
         """
-        args = list(args)
-        if '-D' in sys.argv:
-            args = args + ['-D']
-        (options, args) = cmdline.ParseArgs(args)
-        options.pager = 'binman-invalid-pager'
-        options.build_dir = self._indir
+        argv = list(argv)
+        args = cmdline.ParseArgs(argv)
+        args.pager = 'binman-invalid-pager'
+        args.build_dir = self._indir
 
         # For testing, you can force an increase in verbosity here
-        # options.verbosity = tout.DEBUG
-        return control.Binman(options, args)
+        # args.verbosity = tout.DEBUG
+        return control.Binman(args)
 
     def _DoTestFile(self, fname, debug=False, map=False, update_dtb=False,
                     entry_args=None, images=None, use_real_dtb=False,
@@ -202,17 +245,23 @@ class TestFunctional(unittest.TestCase):
                 value: value of that arg
             images: List of image names to build
         """
-        args = ['-p', '-I', self._indir, '-d', self.TestFile(fname)]
+        args = []
         if debug:
             args.append('-D')
+        if verbosity is not None:
+            args.append('-v%d' % verbosity)
+        elif self.verbosity:
+            args.append('-v%d' % self.verbosity)
+        if self.toolpath:
+            for path in self.toolpath:
+                args += ['--toolpath', path]
+        args += ['build', '-p', '-I', self._indir, '-d', self.TestFile(fname)]
         if map:
             args.append('-m')
         if update_dtb:
-            args.append('-up')
+            args.append('-u')
         if not use_real_dtb:
             args.append('--fake-dtb')
-        if verbosity is not None:
-            args.append('-v%d' % verbosity)
         if entry_args:
             for arg, value in entry_args.items():
                 args.append('-a%s=%s' % (arg, value))
@@ -323,6 +372,17 @@ class TestFunctional(unittest.TestCase):
             if reset_dtbs and use_real_dtb:
                 self._ResetDtbs()
 
+    def _DoReadFileRealDtb(self, fname):
+        """Run binman with a real .dtb file and return the resulting data
+
+        Args:
+            fname: DT source filename to use (e.g. 082_fdt_update_all.dts)
+
+        Returns:
+            Resulting image contents
+        """
+        return self._DoReadFileDtb(fname, use_real_dtb=True, update_dtb=True)[0]
+
     def _DoReadFile(self, fname, use_real_dtb=False):
         """Helper function which discards the device-tree binary
 
@@ -419,16 +479,16 @@ class TestFunctional(unittest.TestCase):
         """
         return struct.unpack('>L', dtb[4:8])[0]
 
-    def _GetPropTree(self, dtb, prop_names):
+    def _GetPropTree(self, dtb, prop_names, prefix='/binman/'):
         def AddNode(node, path):
             if node.name != '/':
                 path += '/' + node.name
+            for prop in node.props.values():
+                if prop.name in prop_names:
+                    prop_path = path + ':' + prop.name
+                    tree[prop_path[len(prefix):]] = fdt_util.fdt32_to_cpu(
+                        prop.value)
             for subnode in node.subnodes:
-                for prop in subnode.props.values():
-                    if prop.name in prop_names:
-                        prop_path = path + '/' + subnode.name + ':' + prop.name
-                        tree[prop_path[len('/binman/'):]] = fdt_util.fdt32_to_cpu(
-                            prop.value)
                 AddNode(subnode, path)
 
         tree = {}
@@ -470,20 +530,20 @@ class TestFunctional(unittest.TestCase):
         """Test that we can run it with a specific board"""
         self._SetupDtb('005_simple.dts', 'sandbox/u-boot.dtb')
         TestFunctional._MakeInputFile('sandbox/u-boot.bin', U_BOOT_DATA)
-        result = self._DoBinman('-b', 'sandbox')
+        result = self._DoBinman('build', '-b', 'sandbox')
         self.assertEqual(0, result)
 
     def testNeedBoard(self):
         """Test that we get an error when no board ius supplied"""
         with self.assertRaises(ValueError) as e:
-            result = self._DoBinman()
+            result = self._DoBinman('build')
         self.assertIn("Must provide a board to process (use -b <board>)",
                 str(e.exception))
 
     def testMissingDt(self):
         """Test that an invalid device-tree file generates an error"""
         with self.assertRaises(Exception) as e:
-            self._RunBinman('-d', 'missing_file')
+            self._RunBinman('build', '-d', 'missing_file')
         # We get one error from libfdt, and a different one from fdtget.
         self.AssertInList(["Couldn't open blob from 'missing_file'",
                            'No such file or directory'], str(e.exception))
@@ -495,26 +555,26 @@ class TestFunctional(unittest.TestCase):
         will come from the device-tree compiler (dtc).
         """
         with self.assertRaises(Exception) as e:
-            self._RunBinman('-d', self.TestFile('001_invalid.dts'))
+            self._RunBinman('build', '-d', self.TestFile('001_invalid.dts'))
         self.assertIn("FATAL ERROR: Unable to parse input tree",
                 str(e.exception))
 
     def testMissingNode(self):
         """Test that a device tree without a 'binman' node generates an error"""
         with self.assertRaises(Exception) as e:
-            self._DoBinman('-d', self.TestFile('002_missing_node.dts'))
+            self._DoBinman('build', '-d', self.TestFile('002_missing_node.dts'))
         self.assertIn("does not have a 'binman' node", str(e.exception))
 
     def testEmpty(self):
         """Test that an empty binman node works OK (i.e. does nothing)"""
-        result = self._RunBinman('-d', self.TestFile('003_empty.dts'))
+        result = self._RunBinman('build', '-d', self.TestFile('003_empty.dts'))
         self.assertEqual(0, len(result.stderr))
         self.assertEqual(0, result.return_code)
 
     def testInvalidEntry(self):
         """Test that an invalid entry is flagged"""
         with self.assertRaises(Exception) as e:
-            result = self._RunBinman('-d',
+            result = self._RunBinman('build', '-d',
                                      self.TestFile('004_invalid_entry.dts'))
         self.assertIn("Unknown entry type 'not-a-valid-type' in node "
                 "'/binman/not-a-valid-type'", str(e.exception))
@@ -526,7 +586,7 @@ class TestFunctional(unittest.TestCase):
 
     def testSimpleDebug(self):
         """Test a simple binman run with debugging enabled"""
-        data = self._DoTestFile('005_simple.dts', debug=True)
+        self._DoTestFile('005_simple.dts', debug=True)
 
     def testDual(self):
         """Test that we can handle creating two images
@@ -537,7 +597,7 @@ class TestFunctional(unittest.TestCase):
         self.assertEqual(0, retcode)
 
         image = control.images['image1']
-        self.assertEqual(len(U_BOOT_DATA), image._size)
+        self.assertEqual(len(U_BOOT_DATA), image.size)
         fname = tools.GetOutputFilename('image1.bin')
         self.assertTrue(os.path.exists(fname))
         with open(fname, 'rb') as fd:
@@ -545,7 +605,7 @@ class TestFunctional(unittest.TestCase):
             self.assertEqual(U_BOOT_DATA, data)
 
         image = control.images['image2']
-        self.assertEqual(3 + len(U_BOOT_DATA) + 5, image._size)
+        self.assertEqual(3 + len(U_BOOT_DATA) + 5, image.size)
         fname = tools.GetOutputFilename('image2.bin')
         self.assertTrue(os.path.exists(fname))
         with open(fname, 'rb') as fd:
@@ -601,7 +661,7 @@ class TestFunctional(unittest.TestCase):
         self.assertEqual(61, entry.offset)
         self.assertEqual(len(U_BOOT_DATA), entry.size)
 
-        self.assertEqual(65, image._size)
+        self.assertEqual(65, image.size)
 
     def testPackExtra(self):
         """Test that extra packing feature works as expected"""
@@ -645,7 +705,7 @@ class TestFunctional(unittest.TestCase):
         self.assertEqual(64, entry.size)
 
         self.CheckNoGaps(entries)
-        self.assertEqual(128, image._size)
+        self.assertEqual(128, image.size)
 
     def testPackAlignPowerOf2(self):
         """Test that invalid entry alignment is detected"""
@@ -703,7 +763,7 @@ class TestFunctional(unittest.TestCase):
         self.assertEqual(0, retcode)
         self.assertIn('image', control.images)
         image = control.images['image']
-        self.assertEqual(7, image._size)
+        self.assertEqual(7, image.size)
 
     def testPackImageSizeAlign(self):
         """Test that image size alignemnt works as expected"""
@@ -711,7 +771,7 @@ class TestFunctional(unittest.TestCase):
         self.assertEqual(0, retcode)
         self.assertIn('image', control.images)
         image = control.images['image']
-        self.assertEqual(16, image._size)
+        self.assertEqual(16, image.size)
 
     def testPackInvalidImageAlign(self):
         """Test that invalid image alignment is detected"""
@@ -724,7 +784,7 @@ class TestFunctional(unittest.TestCase):
         """Test that invalid image alignment is detected"""
         with self.assertRaises(ValueError) as e:
             self._DoTestFile('020_pack_inv_image_align_power2.dts')
-        self.assertIn("Section '/binman': Alignment size 131 must be a power of "
+        self.assertIn("Image '/binman': Alignment size 131 must be a power of "
                       "two", str(e.exception))
 
     def testImagePadByte(self):
@@ -775,7 +835,7 @@ class TestFunctional(unittest.TestCase):
         """Test that the end-at-4gb property requires a size property"""
         with self.assertRaises(ValueError) as e:
             self._DoTestFile('027_pack_4gb_no_size.dts')
-        self.assertIn("Section '/binman': Section size must be provided when "
+        self.assertIn("Image '/binman': Section size must be provided when "
                       "using end-at-4gb", str(e.exception))
 
     def test4gbAndSkipAtStartTogether(self):
@@ -783,7 +843,7 @@ class TestFunctional(unittest.TestCase):
         together"""
         with self.assertRaises(ValueError) as e:
             self._DoTestFile('80_4gb_and_skip_at_start_together.dts')
-        self.assertIn("Section '/binman': Provide either 'end-at-4gb' or "
+        self.assertIn("Image '/binman': Provide either 'end-at-4gb' or "
                       "'skip-at-start'", str(e.exception))
 
     def testPackX86RomOutside(self):
@@ -806,8 +866,8 @@ class TestFunctional(unittest.TestCase):
         TestFunctional._MakeInputFile('descriptor.bin', b'')
         with self.assertRaises(ValueError) as e:
             self._DoTestFile('031_x86-rom-me.dts')
-        self.assertIn("Node '/binman/intel-descriptor': Cannot find FD "
-                      "signature", str(e.exception))
+        self.assertIn("Node '/binman/intel-descriptor': Cannot find Intel Flash Descriptor (FD) signature",
+                      str(e.exception))
 
     def testPackX86RomBadDesc(self):
         """Test that the Intel requires a descriptor entry"""
@@ -820,6 +880,9 @@ class TestFunctional(unittest.TestCase):
     def testPackX86RomMe(self):
         """Test that an x86 ROM with an ME region can be created"""
         data = self._DoReadFile('031_x86-rom-me.dts')
+        expected_desc = tools.ReadFile(self.TestFile('descriptor.bin'))
+        if data[:0x1000] != expected_desc:
+            self.fail('Expected descriptor binary at start of image')
         self.assertEqual(ME_DATA, data[0x1000:0x1000 + len(ME_DATA)])
 
     def testPackVga(self):
@@ -1156,16 +1219,20 @@ class TestFunctional(unittest.TestCase):
         """Test that obtaining the contents works as expected"""
         with self.assertRaises(ValueError) as e:
             self._DoReadFile('057_unknown_contents.dts', True)
-        self.assertIn("Section '/binman': Internal error: Could not complete "
+        self.assertIn("Image '/binman': Internal error: Could not complete "
                 "processing of contents: remaining [<_testing.Entry__testing ",
                 str(e.exception))
 
     def testBadChangeSize(self):
         """Test that trying to change the size of an entry fails"""
-        with self.assertRaises(ValueError) as e:
-            self._DoReadFile('059_change_size.dts', True)
-        self.assertIn("Node '/binman/_testing': Cannot update entry size from "
-                      '2 to 1', str(e.exception))
+        try:
+            state.SetAllowEntryExpansion(False)
+            with self.assertRaises(ValueError) as e:
+                self._DoReadFile('059_change_size.dts', True)
+            self.assertIn("Node '/binman/_testing': Cannot update entry size from 1 to 2",
+                          str(e.exception))
+        finally:
+            state.SetAllowEntryExpansion(True)
 
     def testUpdateFdt(self):
         """Test that we can update the device tree with offset/size info"""
@@ -1242,7 +1309,8 @@ class TestFunctional(unittest.TestCase):
 
     def testEntryArgsInvalidFormat(self):
         """Test that an invalid entry-argument format is detected"""
-        args = ['-d', self.TestFile('064_entry_args_required.dts'), '-ano-value']
+        args = ['build', '-d', self.TestFile('064_entry_args_required.dts'),
+                '-ano-value']
         with self.assertRaises(ValueError) as e:
             self._DoBinman(*args)
         self.assertIn("Invalid entry arguemnt 'no-value'", str(e.exception))
@@ -1286,7 +1354,7 @@ class TestFunctional(unittest.TestCase):
         expected = (tools.ToBytes(TEXT_DATA) +
                     tools.GetBytes(0, 8 - len(TEXT_DATA)) +
                     tools.ToBytes(TEXT_DATA2) + tools.ToBytes(TEXT_DATA3) +
-                    b'some text')
+                    b'some text' + b'more text')
         self.assertEqual(expected, data)
 
     def testEntryDocs(self):
@@ -1471,7 +1539,7 @@ class TestFunctional(unittest.TestCase):
         expected = 'Skipping images: image1'
 
         # We should only get the expected message in verbose mode
-        for verbosity in (None, 2):
+        for verbosity in (0, 2):
             with test_util.capture_sys_output() as (stdout, stderr):
                 retcode = self._DoTestFile('006_dual_image.dts',
                                            verbosity=verbosity,
@@ -1487,8 +1555,7 @@ class TestFunctional(unittest.TestCase):
 
     def testUpdateFdtAll(self):
         """Test that all device trees are updated with offset/size info"""
-        data, _, _, _ = self._DoReadFileDtb('082_fdt_update_all.dts',
-                                            use_real_dtb=True, update_dtb=True)
+        data = self._DoReadFileRealDtb('082_fdt_update_all.dts')
 
         base_expected = {
             'section:image-pos': 0,
@@ -1560,19 +1627,11 @@ class TestFunctional(unittest.TestCase):
             self._ResetDtbs()
 
     def _decompress(self, data):
-        out = os.path.join(self._indir, 'lz4.tmp')
-        with open(out, 'wb') as fd:
-            fd.write(data)
-        return tools.Run('lz4', '-dc', out, binary=True)
-        '''
-        try:
-            orig = lz4.frame.decompress(data)
-        except AttributeError:
-            orig = lz4.decompress(data)
-        '''
+        return tools.Decompress(data, 'lz4')
 
     def testCompress(self):
         """Test compression of blobs"""
+        self._CheckLz4()
         data, _, _, out_dtb_fname = self._DoReadFileDtb('083_compress.dts',
                                             use_real_dtb=True, update_dtb=True)
         dtb = fdt.Fdt(out_dtb_fname)
@@ -1594,12 +1653,13 @@ class TestFunctional(unittest.TestCase):
 
     def testFilesCompress(self):
         """Test bringing in multiple files and compressing them"""
+        self._CheckLz4()
         data = self._DoReadFile('085_files_compress.dts')
 
         image = control.images['image']
         entries = image.GetEntries()
         files = entries['files']
-        entries = files._section._entries
+        entries = files._entries
 
         orig = b''
         for i in range(1, 3):
@@ -1754,11 +1814,13 @@ class TestFunctional(unittest.TestCase):
     def testElf(self):
         """Basic test of ELF entries"""
         self._SetupSplElf()
+        with open(self.TestFile('bss_data'), 'rb') as fd:
+            TestFunctional._MakeInputFile('tpl/u-boot-tpl', fd.read())
         with open(self.TestFile('bss_data'), 'rb') as fd:
             TestFunctional._MakeInputFile('-boot', fd.read())
         data = self._DoReadFile('096_elf.dts')
 
-    def testElfStripg(self):
+    def testElfStrip(self):
         """Basic test of ELF entries"""
         self._SetupSplElf()
         with open(self.TestFile('bss_data'), 'rb') as fd:
@@ -1784,7 +1846,7 @@ class TestFunctional(unittest.TestCase):
 <none>     00000003  00000004  u-boot-align
 ''', map_data)
 
-    def testPacRefCode(self):
+    def testPackRefCode(self):
         """Test that an image with an Intel Reference code binary works"""
         data = self._DoReadFile('100_intel_refcode.dts')
         self.assertEqual(REFCODE_DATA, data[:len(REFCODE_DATA)])
@@ -1810,6 +1872,853 @@ class TestFunctional(unittest.TestCase):
                          tools.GetBytes(0x26, 4) + U_BOOT_DATA +
                              tools.GetBytes(0x26, 8))
 
+    def testCbfsRaw(self):
+        """Test base handling of a Coreboot Filesystem (CBFS)
+
+        The exact contents of the CBFS is verified by similar tests in
+        cbfs_util_test.py. The tests here merely check that the files added to
+        the CBFS can be found in the final image.
+        """
+        data = self._DoReadFile('102_cbfs_raw.dts')
+        size = 0xb0
+
+        cbfs = cbfs_util.CbfsReader(data)
+        self.assertEqual(size, cbfs.rom_size)
+
+        self.assertIn('u-boot-dtb', cbfs.files)
+        cfile = cbfs.files['u-boot-dtb']
+        self.assertEqual(U_BOOT_DTB_DATA, cfile.data)
+
+    def testCbfsArch(self):
+        """Test on non-x86 architecture"""
+        data = self._DoReadFile('103_cbfs_raw_ppc.dts')
+        size = 0x100
+
+        cbfs = cbfs_util.CbfsReader(data)
+        self.assertEqual(size, cbfs.rom_size)
+
+        self.assertIn('u-boot-dtb', cbfs.files)
+        cfile = cbfs.files['u-boot-dtb']
+        self.assertEqual(U_BOOT_DTB_DATA, cfile.data)
+
+    def testCbfsStage(self):
+        """Tests handling of a Coreboot Filesystem (CBFS)"""
+        if not elf.ELF_TOOLS:
+            self.skipTest('Python elftools not available')
+        elf_fname = os.path.join(self._indir, 'cbfs-stage.elf')
+        elf.MakeElf(elf_fname, U_BOOT_DATA, U_BOOT_DTB_DATA)
+        size = 0xb0
+
+        data = self._DoReadFile('104_cbfs_stage.dts')
+        cbfs = cbfs_util.CbfsReader(data)
+        self.assertEqual(size, cbfs.rom_size)
+
+        self.assertIn('u-boot', cbfs.files)
+        cfile = cbfs.files['u-boot']
+        self.assertEqual(U_BOOT_DATA + U_BOOT_DTB_DATA, cfile.data)
+
+    def testCbfsRawCompress(self):
+        """Test handling of compressing raw files"""
+        self._CheckLz4()
+        data = self._DoReadFile('105_cbfs_raw_compress.dts')
+        size = 0x140
+
+        cbfs = cbfs_util.CbfsReader(data)
+        self.assertIn('u-boot', cbfs.files)
+        cfile = cbfs.files['u-boot']
+        self.assertEqual(COMPRESS_DATA, cfile.data)
+
+    def testCbfsBadArch(self):
+        """Test handling of a bad architecture"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('106_cbfs_bad_arch.dts')
+        self.assertIn("Invalid architecture 'bad-arch'", str(e.exception))
+
+    def testCbfsNoSize(self):
+        """Test handling of a missing size property"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('107_cbfs_no_size.dts')
+        self.assertIn('entry must have a size property', str(e.exception))
+
+    def testCbfsNoCOntents(self):
+        """Test handling of a CBFS entry which does not provide contentsy"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('108_cbfs_no_contents.dts')
+        self.assertIn('Could not complete processing of contents',
+                      str(e.exception))
+
+    def testCbfsBadCompress(self):
+        """Test handling of a bad architecture"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('109_cbfs_bad_compress.dts')
+        self.assertIn("Invalid compression in 'u-boot': 'invalid-algo'",
+                      str(e.exception))
+
+    def testCbfsNamedEntries(self):
+        """Test handling of named entries"""
+        data = self._DoReadFile('110_cbfs_name.dts')
+
+        cbfs = cbfs_util.CbfsReader(data)
+        self.assertIn('FRED', cbfs.files)
+        cfile1 = cbfs.files['FRED']
+        self.assertEqual(U_BOOT_DATA, cfile1.data)
+
+        self.assertIn('hello', cbfs.files)
+        cfile2 = cbfs.files['hello']
+        self.assertEqual(U_BOOT_DTB_DATA, cfile2.data)
+
+    def _SetupIfwi(self, fname):
+        """Set up to run an IFWI test
+
+        Args:
+            fname: Filename of input file to provide (fitimage.bin or ifwi.bin)
+        """
+        self._SetupSplElf()
+
+        # Intel Integrated Firmware Image (IFWI) file
+        with gzip.open(self.TestFile('%s.gz' % fname), 'rb') as fd:
+            data = fd.read()
+        TestFunctional._MakeInputFile(fname,data)
+
+    def _CheckIfwi(self, data):
+        """Check that an image with an IFWI contains the correct output
+
+        Args:
+            data: Conents of output file
+        """
+        expected_desc = tools.ReadFile(self.TestFile('descriptor.bin'))
+        if data[:0x1000] != expected_desc:
+            self.fail('Expected descriptor binary at start of image')
+
+        # We expect to find the TPL wil in subpart IBBP entry IBBL
+        image_fname = tools.GetOutputFilename('image.bin')
+        tpl_fname = tools.GetOutputFilename('tpl.out')
+        tools.RunIfwiTool(image_fname, tools.CMD_EXTRACT, fname=tpl_fname,
+                          subpart='IBBP', entry_name='IBBL')
+
+        tpl_data = tools.ReadFile(tpl_fname)
+        self.assertEqual(tpl_data[:len(U_BOOT_TPL_DATA)], U_BOOT_TPL_DATA)
+
+    def testPackX86RomIfwi(self):
+        """Test that an x86 ROM with Integrated Firmware Image can be created"""
+        self._SetupIfwi('fitimage.bin')
+        data = self._DoReadFile('111_x86-rom-ifwi.dts')
+        self._CheckIfwi(data)
+
+    def testPackX86RomIfwiNoDesc(self):
+        """Test that an x86 ROM with IFWI can be created from an ifwi.bin file"""
+        self._SetupIfwi('ifwi.bin')
+        data = self._DoReadFile('112_x86-rom-ifwi-nodesc.dts')
+        self._CheckIfwi(data)
+
+    def testPackX86RomIfwiNoData(self):
+        """Test that an x86 ROM with IFWI handles missing data"""
+        self._SetupIfwi('ifwi.bin')
+        with self.assertRaises(ValueError) as e:
+            data = self._DoReadFile('113_x86-rom-ifwi-nodata.dts')
+        self.assertIn('Could not complete processing of contents',
+                      str(e.exception))
+
+    def testCbfsOffset(self):
+        """Test a CBFS with files at particular offsets
+
+        Like all CFBS tests, this is just checking the logic that calls
+        cbfs_util. See cbfs_util_test for fully tests (e.g. test_cbfs_offset()).
+        """
+        data = self._DoReadFile('114_cbfs_offset.dts')
+        size = 0x200
+
+        cbfs = cbfs_util.CbfsReader(data)
+        self.assertEqual(size, cbfs.rom_size)
+
+        self.assertIn('u-boot', cbfs.files)
+        cfile = cbfs.files['u-boot']
+        self.assertEqual(U_BOOT_DATA, cfile.data)
+        self.assertEqual(0x40, cfile.cbfs_offset)
+
+        self.assertIn('u-boot-dtb', cbfs.files)
+        cfile2 = cbfs.files['u-boot-dtb']
+        self.assertEqual(U_BOOT_DTB_DATA, cfile2.data)
+        self.assertEqual(0x140, cfile2.cbfs_offset)
+
+    def testFdtmap(self):
+        """Test an FDT map can be inserted in the image"""
+        data = self.data = self._DoReadFileRealDtb('115_fdtmap.dts')
+        fdtmap_data = data[len(U_BOOT_DATA):]
+        magic = fdtmap_data[:8]
+        self.assertEqual('_FDTMAP_', magic)
+        self.assertEqual(tools.GetBytes(0, 8), fdtmap_data[8:16])
+
+        fdt_data = fdtmap_data[16:]
+        dtb = fdt.Fdt.FromData(fdt_data)
+        dtb.Scan()
+        props = self._GetPropTree(dtb, ['offset', 'size', 'image-pos'],
+                                  prefix='/')
+        self.assertEqual({
+            'image-pos': 0,
+            'offset': 0,
+            'u-boot:offset': 0,
+            'u-boot:size': len(U_BOOT_DATA),
+            'u-boot:image-pos': 0,
+            'fdtmap:image-pos': 4,
+            'fdtmap:offset': 4,
+            'fdtmap:size': len(fdtmap_data),
+            'size': len(data),
+        }, props)
+
+    def testFdtmapNoMatch(self):
+        """Check handling of an FDT map when the section cannot be found"""
+        self.data = self._DoReadFileRealDtb('115_fdtmap.dts')
+
+        # Mangle the section name, which should cause a mismatch between the
+        # correct FDT path and the one expected by the section
+        image = control.images['image']
+        image._node.path += '-suffix'
+        entries = image.GetEntries()
+        fdtmap = entries['fdtmap']
+        with self.assertRaises(ValueError) as e:
+            fdtmap._GetFdtmap()
+        self.assertIn("Cannot locate node for path '/binman-suffix'",
+                      str(e.exception))
+
+    def testFdtmapHeader(self):
+        """Test an FDT map and image header can be inserted in the image"""
+        data = self.data = self._DoReadFileRealDtb('116_fdtmap_hdr.dts')
+        fdtmap_pos = len(U_BOOT_DATA)
+        fdtmap_data = data[fdtmap_pos:]
+        fdt_data = fdtmap_data[16:]
+        dtb = fdt.Fdt.FromData(fdt_data)
+        fdt_size = dtb.GetFdtObj().totalsize()
+        hdr_data = data[-8:]
+        self.assertEqual('BinM', hdr_data[:4])
+        offset = struct.unpack('<I', hdr_data[4:])[0] & 0xffffffff
+        self.assertEqual(fdtmap_pos - 0x400, offset - (1 << 32))
+
+    def testFdtmapHeaderStart(self):
+        """Test an image header can be inserted at the image start"""
+        data = self.data = self._DoReadFileRealDtb('117_fdtmap_hdr_start.dts')
+        fdtmap_pos = 0x100 + len(U_BOOT_DATA)
+        hdr_data = data[:8]
+        self.assertEqual('BinM', hdr_data[:4])
+        offset = struct.unpack('<I', hdr_data[4:])[0]
+        self.assertEqual(fdtmap_pos, offset)
+
+    def testFdtmapHeaderPos(self):
+        """Test an image header can be inserted at a chosen position"""
+        data = self.data = self._DoReadFileRealDtb('118_fdtmap_hdr_pos.dts')
+        fdtmap_pos = 0x100 + len(U_BOOT_DATA)
+        hdr_data = data[0x80:0x88]
+        self.assertEqual('BinM', hdr_data[:4])
+        offset = struct.unpack('<I', hdr_data[4:])[0]
+        self.assertEqual(fdtmap_pos, offset)
+
+    def testHeaderMissingFdtmap(self):
+        """Test an image header requires an fdtmap"""
+        with self.assertRaises(ValueError) as e:
+            self.data = self._DoReadFileRealDtb('119_fdtmap_hdr_missing.dts')
+        self.assertIn("'image_header' section must have an 'fdtmap' sibling",
+                      str(e.exception))
+
+    def testHeaderNoLocation(self):
+        """Test an image header with a no specified location is detected"""
+        with self.assertRaises(ValueError) as e:
+            self.data = self._DoReadFileRealDtb('120_hdr_no_location.dts')
+        self.assertIn("Invalid location 'None', expected 'start' or 'end'",
+                      str(e.exception))
+
+    def testEntryExpand(self):
+        """Test expanding an entry after it is packed"""
+        data = self._DoReadFile('121_entry_expand.dts')
+        self.assertEqual(b'aa', data[:2])
+        self.assertEqual(U_BOOT_DATA, data[2:2 + len(U_BOOT_DATA)])
+        self.assertEqual(b'aa', data[-2:])
+
+    def testEntryExpandBad(self):
+        """Test expanding an entry after it is packed, twice"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('122_entry_expand_twice.dts')
+        self.assertIn("Image '/binman': Entries expanded after packing",
+                      str(e.exception))
+
+    def testEntryExpandSection(self):
+        """Test expanding an entry within a section after it is packed"""
+        data = self._DoReadFile('123_entry_expand_section.dts')
+        self.assertEqual(b'aa', data[:2])
+        self.assertEqual(U_BOOT_DATA, data[2:2 + len(U_BOOT_DATA)])
+        self.assertEqual(b'aa', data[-2:])
+
+    def testCompressDtb(self):
+        """Test that compress of device-tree files is supported"""
+        self._CheckLz4()
+        data = self.data = self._DoReadFileRealDtb('124_compress_dtb.dts')
+        self.assertEqual(U_BOOT_DATA, data[:len(U_BOOT_DATA)])
+        comp_data = data[len(U_BOOT_DATA):]
+        orig = self._decompress(comp_data)
+        dtb = fdt.Fdt.FromData(orig)
+        dtb.Scan()
+        props = self._GetPropTree(dtb, ['size', 'uncomp-size'])
+        expected = {
+            'u-boot:size': len(U_BOOT_DATA),
+            'u-boot-dtb:uncomp-size': len(orig),
+            'u-boot-dtb:size': len(comp_data),
+            'size': len(data),
+            }
+        self.assertEqual(expected, props)
+
+    def testCbfsUpdateFdt(self):
+        """Test that we can update the device tree with CBFS offset/size info"""
+        self._CheckLz4()
+        data, _, _, out_dtb_fname = self._DoReadFileDtb('125_cbfs_update.dts',
+                                                        update_dtb=True)
+        dtb = fdt.Fdt(out_dtb_fname)
+        dtb.Scan()
+        props = self._GetPropTree(dtb, ['offset', 'size', 'image-pos',
+                                        'uncomp-size'])
+        del props['cbfs/u-boot:size']
+        self.assertEqual({
+            'offset': 0,
+            'size': len(data),
+            'image-pos': 0,
+            'cbfs:offset': 0,
+            'cbfs:size': len(data),
+            'cbfs:image-pos': 0,
+            'cbfs/u-boot:offset': 0x38,
+            'cbfs/u-boot:uncomp-size': len(U_BOOT_DATA),
+            'cbfs/u-boot:image-pos': 0x38,
+            'cbfs/u-boot-dtb:offset': 0xb8,
+            'cbfs/u-boot-dtb:size': len(U_BOOT_DATA),
+            'cbfs/u-boot-dtb:image-pos': 0xb8,
+            }, props)
+
+    def testCbfsBadType(self):
+        """Test an image header with a no specified location is detected"""
+        with self.assertRaises(ValueError) as e:
+            self._DoReadFile('126_cbfs_bad_type.dts')
+        self.assertIn("Unknown cbfs-type 'badtype'", str(e.exception))
+
+    def testList(self):
+        """Test listing the files in an image"""
+        self._CheckLz4()
+        data = self._DoReadFile('127_list.dts')
+        image = control.images['image']
+        entries = image.BuildEntryList()
+        self.assertEqual(7, len(entries))
+
+        ent = entries[0]
+        self.assertEqual(0, ent.indent)
+        self.assertEqual('main-section', ent.name)
+        self.assertEqual('section', ent.etype)
+        self.assertEqual(len(data), ent.size)
+        self.assertEqual(0, ent.image_pos)
+        self.assertEqual(None, ent.uncomp_size)
+        self.assertEqual(0, ent.offset)
+
+        ent = entries[1]
+        self.assertEqual(1, ent.indent)
+        self.assertEqual('u-boot', ent.name)
+        self.assertEqual('u-boot', ent.etype)
+        self.assertEqual(len(U_BOOT_DATA), ent.size)
+        self.assertEqual(0, ent.image_pos)
+        self.assertEqual(None, ent.uncomp_size)
+        self.assertEqual(0, ent.offset)
+
+        ent = entries[2]
+        self.assertEqual(1, ent.indent)
+        self.assertEqual('section', ent.name)
+        self.assertEqual('section', ent.etype)
+        section_size = ent.size
+        self.assertEqual(0x100, ent.image_pos)
+        self.assertEqual(None, ent.uncomp_size)
+        self.assertEqual(0x100, ent.offset)
+
+        ent = entries[3]
+        self.assertEqual(2, ent.indent)
+        self.assertEqual('cbfs', ent.name)
+        self.assertEqual('cbfs', ent.etype)
+        self.assertEqual(0x400, ent.size)
+        self.assertEqual(0x100, ent.image_pos)
+        self.assertEqual(None, ent.uncomp_size)
+        self.assertEqual(0, ent.offset)
+
+        ent = entries[4]
+        self.assertEqual(3, ent.indent)
+        self.assertEqual('u-boot', ent.name)
+        self.assertEqual('u-boot', ent.etype)
+        self.assertEqual(len(U_BOOT_DATA), ent.size)
+        self.assertEqual(0x138, ent.image_pos)
+        self.assertEqual(None, ent.uncomp_size)
+        self.assertEqual(0x38, ent.offset)
+
+        ent = entries[5]
+        self.assertEqual(3, ent.indent)
+        self.assertEqual('u-boot-dtb', ent.name)
+        self.assertEqual('text', ent.etype)
+        self.assertGreater(len(COMPRESS_DATA), ent.size)
+        self.assertEqual(0x178, ent.image_pos)
+        self.assertEqual(len(COMPRESS_DATA), ent.uncomp_size)
+        self.assertEqual(0x78, ent.offset)
+
+        ent = entries[6]
+        self.assertEqual(2, ent.indent)
+        self.assertEqual('u-boot-dtb', ent.name)
+        self.assertEqual('u-boot-dtb', ent.etype)
+        self.assertEqual(0x500, ent.image_pos)
+        self.assertEqual(len(U_BOOT_DTB_DATA), ent.uncomp_size)
+        dtb_size = ent.size
+        # Compressing this data expands it since headers are added
+        self.assertGreater(dtb_size, len(U_BOOT_DTB_DATA))
+        self.assertEqual(0x400, ent.offset)
+
+        self.assertEqual(len(data), 0x100 + section_size)
+        self.assertEqual(section_size, 0x400 + dtb_size)
+
+    def testFindFdtmap(self):
+        """Test locating an FDT map in an image"""
+        self._CheckLz4()
+        data = self.data = self._DoReadFileRealDtb('128_decode_image.dts')
+        image = control.images['image']
+        entries = image.GetEntries()
+        entry = entries['fdtmap']
+        self.assertEqual(entry.image_pos, fdtmap.LocateFdtmap(data))
+
+    def testFindFdtmapMissing(self):
+        """Test failing to locate an FDP map"""
+        data = self._DoReadFile('005_simple.dts')
+        self.assertEqual(None, fdtmap.LocateFdtmap(data))
+
+    def testFindImageHeader(self):
+        """Test locating a image header"""
+        self._CheckLz4()
+        data = self.data = self._DoReadFileRealDtb('128_decode_image.dts')
+        image = control.images['image']
+        entries = image.GetEntries()
+        entry = entries['fdtmap']
+        # The header should point to the FDT map
+        self.assertEqual(entry.image_pos, image_header.LocateHeaderOffset(data))
+
+    def testFindImageHeaderStart(self):
+        """Test locating a image header located at the start of an image"""
+        data = self.data = self._DoReadFileRealDtb('117_fdtmap_hdr_start.dts')
+        image = control.images['image']
+        entries = image.GetEntries()
+        entry = entries['fdtmap']
+        # The header should point to the FDT map
+        self.assertEqual(entry.image_pos, image_header.LocateHeaderOffset(data))
+
+    def testFindImageHeaderMissing(self):
+        """Test failing to locate an image header"""
+        data = self._DoReadFile('005_simple.dts')
+        self.assertEqual(None, image_header.LocateHeaderOffset(data))
+
+    def testReadImage(self):
+        """Test reading an image and accessing its FDT map"""
+        self._CheckLz4()
+        data = self.data = self._DoReadFileRealDtb('128_decode_image.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        orig_image = control.images['image']
+        image = Image.FromFile(image_fname)
+        self.assertEqual(orig_image.GetEntries().keys(),
+                         image.GetEntries().keys())
+
+        orig_entry = orig_image.GetEntries()['fdtmap']
+        entry = image.GetEntries()['fdtmap']
+        self.assertEquals(orig_entry.offset, entry.offset)
+        self.assertEquals(orig_entry.size, entry.size)
+        self.assertEquals(orig_entry.image_pos, entry.image_pos)
+
+    def testReadImageNoHeader(self):
+        """Test accessing an image's FDT map without an image header"""
+        self._CheckLz4()
+        data = self._DoReadFileRealDtb('129_decode_image_nohdr.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        image = Image.FromFile(image_fname)
+        self.assertTrue(isinstance(image, Image))
+        self.assertEqual('image', image.image_name)
+
+    def testReadImageFail(self):
+        """Test failing to read an image image's FDT map"""
+        self._DoReadFile('005_simple.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        with self.assertRaises(ValueError) as e:
+            image = Image.FromFile(image_fname)
+        self.assertIn("Cannot find FDT map in image", str(e.exception))
+
+    def testListCmd(self):
+        """Test listing the files in an image using an Fdtmap"""
+        self._CheckLz4()
+        data = self._DoReadFileRealDtb('130_list_fdtmap.dts')
+
+        # lz4 compression size differs depending on the version
+        image = control.images['image']
+        entries = image.GetEntries()
+        section_size = entries['section'].size
+        fdt_size = entries['section'].GetEntries()['u-boot-dtb'].size
+        fdtmap_offset = entries['fdtmap'].offset
+
+        image_fname = tools.GetOutputFilename('image.bin')
+        with test_util.capture_sys_output() as (stdout, stderr):
+            self._DoBinman('ls', '-i', image_fname)
+        lines = stdout.getvalue().splitlines()
+        expected = [
+'Name              Image-pos  Size  Entry-type    Offset  Uncomp-size',
+'----------------------------------------------------------------------',
+'main-section              0   c00  section            0',
+'  u-boot                  0     4  u-boot             0',
+'  section               100   %x  section          100' % section_size,
+'    cbfs                100   400  cbfs               0',
+'      u-boot            138     4  u-boot            38',
+'      u-boot-dtb        180   10f  u-boot-dtb        80          3c9',
+'    u-boot-dtb          500   %x  u-boot-dtb       400          3c9' % fdt_size,
+'  fdtmap                %x   395  fdtmap           %x' %
+        (fdtmap_offset, fdtmap_offset),
+'  image-header          bf8     8  image-header     bf8',
+            ]
+        self.assertEqual(expected, lines)
+
+    def testListCmdFail(self):
+        """Test failing to list an image"""
+        self._DoReadFile('005_simple.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        with self.assertRaises(ValueError) as e:
+            self._DoBinman('ls', '-i', image_fname)
+        self.assertIn("Cannot find FDT map in image", str(e.exception))
+
+    def _RunListCmd(self, paths, expected):
+        """List out entries and check the result
+
+        Args:
+            paths: List of paths to pass to the list command
+            expected: Expected list of filenames to be returned, in order
+        """
+        self._CheckLz4()
+        self._DoReadFileRealDtb('130_list_fdtmap.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        image = Image.FromFile(image_fname)
+        lines = image.GetListEntries(paths)[1]
+        files = [line[0].strip() for line in lines[1:]]
+        self.assertEqual(expected, files)
+
+    def testListCmdSection(self):
+        """Test listing the files in a section"""
+        self._RunListCmd(['section'],
+            ['section', 'cbfs', 'u-boot', 'u-boot-dtb', 'u-boot-dtb'])
+
+    def testListCmdFile(self):
+        """Test listing a particular file"""
+        self._RunListCmd(['*u-boot-dtb'], ['u-boot-dtb', 'u-boot-dtb'])
+
+    def testListCmdWildcard(self):
+        """Test listing a wildcarded file"""
+        self._RunListCmd(['*boot*'],
+            ['u-boot', 'u-boot', 'u-boot-dtb', 'u-boot-dtb'])
+
+    def testListCmdWildcardMulti(self):
+        """Test listing a wildcarded file"""
+        self._RunListCmd(['*cb*', '*head*'],
+            ['cbfs', 'u-boot', 'u-boot-dtb', 'image-header'])
+
+    def testListCmdEmpty(self):
+        """Test listing a wildcarded file"""
+        self._RunListCmd(['nothing'], [])
+
+    def testListCmdPath(self):
+        """Test listing the files in a sub-entry of a section"""
+        self._RunListCmd(['section/cbfs'], ['cbfs', 'u-boot', 'u-boot-dtb'])
+
+    def _RunExtractCmd(self, entry_name, decomp=True):
+        """Extract an entry from an image
+
+        Args:
+            entry_name: Entry name to extract
+            decomp: True to decompress the data if compressed, False to leave
+                it in its raw uncompressed format
+
+        Returns:
+            data from entry
+        """
+        self._CheckLz4()
+        self._DoReadFileRealDtb('130_list_fdtmap.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        return control.ReadEntry(image_fname, entry_name, decomp)
+
+    def testExtractSimple(self):
+        """Test extracting a single file"""
+        data = self._RunExtractCmd('u-boot')
+        self.assertEqual(U_BOOT_DATA, data)
+
+    def testExtractSection(self):
+        """Test extracting the files in a section"""
+        data = self._RunExtractCmd('section')
+        cbfs_data = data[:0x400]
+        cbfs = cbfs_util.CbfsReader(cbfs_data)
+        self.assertEqual(['u-boot', 'u-boot-dtb', ''], cbfs.files.keys())
+        dtb_data = data[0x400:]
+        dtb = self._decompress(dtb_data)
+        self.assertEqual(EXTRACT_DTB_SIZE, len(dtb))
+
+    def testExtractCompressed(self):
+        """Test extracting compressed data"""
+        data = self._RunExtractCmd('section/u-boot-dtb')
+        self.assertEqual(EXTRACT_DTB_SIZE, len(data))
+
+    def testExtractRaw(self):
+        """Test extracting compressed data without decompressing it"""
+        data = self._RunExtractCmd('section/u-boot-dtb', decomp=False)
+        dtb = self._decompress(data)
+        self.assertEqual(EXTRACT_DTB_SIZE, len(dtb))
+
+    def testExtractCbfs(self):
+        """Test extracting CBFS data"""
+        data = self._RunExtractCmd('section/cbfs/u-boot')
+        self.assertEqual(U_BOOT_DATA, data)
+
+    def testExtractCbfsCompressed(self):
+        """Test extracting CBFS compressed data"""
+        data = self._RunExtractCmd('section/cbfs/u-boot-dtb')
+        self.assertEqual(EXTRACT_DTB_SIZE, len(data))
+
+    def testExtractCbfsRaw(self):
+        """Test extracting CBFS compressed data without decompressing it"""
+        data = self._RunExtractCmd('section/cbfs/u-boot-dtb', decomp=False)
+        dtb = tools.Decompress(data, 'lzma')
+        self.assertEqual(EXTRACT_DTB_SIZE, len(dtb))
+
+    def testExtractBadEntry(self):
+        """Test extracting a bad section path"""
+        with self.assertRaises(ValueError) as e:
+            self._RunExtractCmd('section/does-not-exist')
+        self.assertIn("Entry 'does-not-exist' not found in '/section'",
+                      str(e.exception))
+
+    def testExtractMissingFile(self):
+        """Test extracting file that does not exist"""
+        with self.assertRaises(IOError) as e:
+            control.ReadEntry('missing-file', 'name')
+
+    def testExtractBadFile(self):
+        """Test extracting an invalid file"""
+        fname = os.path.join(self._indir, 'badfile')
+        tools.WriteFile(fname, b'')
+        with self.assertRaises(ValueError) as e:
+            control.ReadEntry(fname, 'name')
+
+    def testExtractCmd(self):
+        """Test extracting a file fron an image on the command line"""
+        self._CheckLz4()
+        self._DoReadFileRealDtb('130_list_fdtmap.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        fname = os.path.join(self._indir, 'output.extact')
+        with test_util.capture_sys_output() as (stdout, stderr):
+            self._DoBinman('extract', '-i', image_fname, 'u-boot', '-f', fname)
+        data = tools.ReadFile(fname)
+        self.assertEqual(U_BOOT_DATA, data)
+
+    def testExtractOneEntry(self):
+        """Test extracting a single entry fron an image """
+        self._CheckLz4()
+        self._DoReadFileRealDtb('130_list_fdtmap.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        fname = os.path.join(self._indir, 'output.extact')
+        control.ExtractEntries(image_fname, fname, None, ['u-boot'])
+        data = tools.ReadFile(fname)
+        self.assertEqual(U_BOOT_DATA, data)
+
+    def _CheckExtractOutput(self, decomp):
+        """Helper to test file output with and without decompression
+
+        Args:
+            decomp: True to decompress entry data, False to output it raw
+        """
+        def _CheckPresent(entry_path, expect_data, expect_size=None):
+            """Check and remove expected file
+
+            This checks the data/size of a file and removes the file both from
+            the outfiles set and from the output directory. Once all files are
+            processed, both the set and directory should be empty.
+
+            Args:
+                entry_path: Entry path
+                expect_data: Data to expect in file, or None to skip check
+                expect_size: Size of data to expect in file, or None to skip
+            """
+            path = os.path.join(outdir, entry_path)
+            data = tools.ReadFile(path)
+            os.remove(path)
+            if expect_data:
+                self.assertEqual(expect_data, data)
+            elif expect_size:
+                self.assertEqual(expect_size, len(data))
+            outfiles.remove(path)
+
+        def _CheckDirPresent(name):
+            """Remove expected directory
+
+            This gives an error if the directory does not exist as expected
+
+            Args:
+                name: Name of directory to remove
+            """
+            path = os.path.join(outdir, name)
+            os.rmdir(path)
+
+        self._DoReadFileRealDtb('130_list_fdtmap.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        outdir = os.path.join(self._indir, 'extract')
+        einfos = control.ExtractEntries(image_fname, None, outdir, [], decomp)
+
+        # Create a set of all file that were output (should be 9)
+        outfiles = set()
+        for root, dirs, files in os.walk(outdir):
+            outfiles |= set([os.path.join(root, fname) for fname in files])
+        self.assertEqual(9, len(outfiles))
+        self.assertEqual(9, len(einfos))
+
+        image = control.images['image']
+        entries = image.GetEntries()
+
+        # Check the 9 files in various ways
+        section = entries['section']
+        section_entries = section.GetEntries()
+        cbfs_entries = section_entries['cbfs'].GetEntries()
+        _CheckPresent('u-boot', U_BOOT_DATA)
+        _CheckPresent('section/cbfs/u-boot', U_BOOT_DATA)
+        dtb_len = EXTRACT_DTB_SIZE
+        if not decomp:
+            dtb_len = cbfs_entries['u-boot-dtb'].size
+        _CheckPresent('section/cbfs/u-boot-dtb', None, dtb_len)
+        if not decomp:
+            dtb_len = section_entries['u-boot-dtb'].size
+        _CheckPresent('section/u-boot-dtb', None, dtb_len)
+
+        fdtmap = entries['fdtmap']
+        _CheckPresent('fdtmap', fdtmap.data)
+        hdr = entries['image-header']
+        _CheckPresent('image-header', hdr.data)
+
+        _CheckPresent('section/root', section.data)
+        cbfs = section_entries['cbfs']
+        _CheckPresent('section/cbfs/root', cbfs.data)
+        data = tools.ReadFile(image_fname)
+        _CheckPresent('root', data)
+
+        # There should be no files left. Remove all the directories to check.
+        # If there are any files/dirs remaining, one of these checks will fail.
+        self.assertEqual(0, len(outfiles))
+        _CheckDirPresent('section/cbfs')
+        _CheckDirPresent('section')
+        _CheckDirPresent('')
+        self.assertFalse(os.path.exists(outdir))
+
+    def testExtractAllEntries(self):
+        """Test extracting all entries"""
+        self._CheckLz4()
+        self._CheckExtractOutput(decomp=True)
+
+    def testExtractAllEntriesRaw(self):
+        """Test extracting all entries without decompressing them"""
+        self._CheckLz4()
+        self._CheckExtractOutput(decomp=False)
+
+    def testExtractSelectedEntries(self):
+        """Test extracting some entries"""
+        self._CheckLz4()
+        self._DoReadFileRealDtb('130_list_fdtmap.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        outdir = os.path.join(self._indir, 'extract')
+        einfos = control.ExtractEntries(image_fname, None, outdir,
+                                        ['*cb*', '*head*'])
+
+        # File output is tested by testExtractAllEntries(), so just check that
+        # the expected entries are selected
+        names = [einfo.name for einfo in einfos]
+        self.assertEqual(names,
+                         ['cbfs', 'u-boot', 'u-boot-dtb', 'image-header'])
+
+    def testExtractNoEntryPaths(self):
+        """Test extracting some entries"""
+        self._CheckLz4()
+        self._DoReadFileRealDtb('130_list_fdtmap.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        with self.assertRaises(ValueError) as e:
+            control.ExtractEntries(image_fname, 'fname', None, [])
+        self.assertIn('Must specify an entry path to write with -o',
+                      str(e.exception))
+
+    def testExtractTooManyEntryPaths(self):
+        """Test extracting some entries"""
+        self._CheckLz4()
+        self._DoReadFileRealDtb('130_list_fdtmap.dts')
+        image_fname = tools.GetOutputFilename('image.bin')
+        with self.assertRaises(ValueError) as e:
+            control.ExtractEntries(image_fname, 'fname', None, ['a', 'b'])
+        self.assertIn('Must specify exactly one entry path to write with -o',
+                      str(e.exception))
+
+    def testPackAlignSection(self):
+        """Test that sections can have alignment"""
+        self._DoReadFile('131_pack_align_section.dts')
+
+        self.assertIn('image', control.images)
+        image = control.images['image']
+        entries = image.GetEntries()
+        self.assertEqual(3, len(entries))
+
+        # First u-boot
+        self.assertIn('u-boot', entries)
+        entry = entries['u-boot']
+        self.assertEqual(0, entry.offset)
+        self.assertEqual(0, entry.image_pos)
+        self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+        self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+        # Section0
+        self.assertIn('section0', entries)
+        section0 = entries['section0']
+        self.assertEqual(0x10, section0.offset)
+        self.assertEqual(0x10, section0.image_pos)
+        self.assertEqual(len(U_BOOT_DATA), section0.size)
+
+        # Second u-boot
+        section_entries = section0.GetEntries()
+        self.assertIn('u-boot', section_entries)
+        entry = section_entries['u-boot']
+        self.assertEqual(0, entry.offset)
+        self.assertEqual(0x10, entry.image_pos)
+        self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+        self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+        # Section1
+        self.assertIn('section1', entries)
+        section1 = entries['section1']
+        self.assertEqual(0x14, section1.offset)
+        self.assertEqual(0x14, section1.image_pos)
+        self.assertEqual(0x20, section1.size)
+
+        # Second u-boot
+        section_entries = section1.GetEntries()
+        self.assertIn('u-boot', section_entries)
+        entry = section_entries['u-boot']
+        self.assertEqual(0, entry.offset)
+        self.assertEqual(0x14, entry.image_pos)
+        self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+        self.assertEqual(len(U_BOOT_DATA), entry.size)
+
+        # Section2
+        self.assertIn('section2', section_entries)
+        section2 = section_entries['section2']
+        self.assertEqual(0x4, section2.offset)
+        self.assertEqual(0x18, section2.image_pos)
+        self.assertEqual(4, section2.size)
+
+        # Third u-boot
+        section_entries = section2.GetEntries()
+        self.assertIn('u-boot', section_entries)
+        entry = section_entries['u-boot']
+        self.assertEqual(0, entry.offset)
+        self.assertEqual(0x18, entry.image_pos)
+        self.assertEqual(len(U_BOOT_DATA), entry.contents_size)
+        self.assertEqual(len(U_BOOT_DATA), entry.size)
+
 
 if __name__ == "__main__":
     unittest.main()

+ 232 - 83
tools/binman/image.py

@@ -8,15 +8,21 @@
 from __future__ import print_function
 
 from collections import OrderedDict
+import fnmatch
 from operator import attrgetter
 import re
 import sys
 
+from entry import Entry
+from etype import fdtmap
+from etype import image_header
+from etype import section
+import fdt
 import fdt_util
-import bsection
 import tools
+import tout
 
-class Image:
+class Image(section.Entry_section):
     """A Image, representing an output from binman
 
     An image is comprised of a collection of entries each containing binary
@@ -24,12 +30,8 @@ class Image:
 
     This class implements the various operations needed for images.
 
-    Atrtributes:
-        _node: Node object that contains the image definition in device tree
-        _name: Image name
-        _size: Image size in bytes, or None if not known yet
-        _filename: Output filename for image
-        _sections: Sections present in this image (may be one or more)
+    Attributes:
+        filename: Output filename for image
 
     Args:
         test: True if this is being called from a test of Images. This this case
@@ -37,106 +39,94 @@ class Image:
             we create a section manually.
     """
     def __init__(self, name, node, test=False):
-        self._node = node
-        self._name = name
-        self._size = None
-        self._filename = '%s.bin' % self._name
-        if test:
-            self._section = bsection.Section('main-section', None, self._node,
-                                             self, True)
-        else:
-            self._ReadNode()
-
-    def _ReadNode(self):
-        """Read properties from the image node"""
-        self._size = fdt_util.GetInt(self._node, 'size')
-        filename = fdt_util.GetString(self._node, 'filename')
-        if filename:
-            self._filename = filename
-        self._section = bsection.Section('main-section', None, self._node, self)
-
-    def GetFdtSet(self):
-        """Get the set of device tree files used by this image"""
-        return self._section.GetFdtSet()
-
-    def ExpandEntries(self):
-        """Expand out any entries which have calculated sub-entries
-
-        Some entries are expanded out at runtime, e.g. 'files', which produces
-        a section containing a list of files. Process these entries so that
-        this information is added to the device tree.
-        """
-        self._section.ExpandEntries()
+        self.image = self
+        section.Entry_section.__init__(self, None, 'section', node, test)
+        self.name = 'main-section'
+        self.image_name = name
+        self._filename = '%s.bin' % self.image_name
+        if not test:
+            filename = fdt_util.GetString(self._node, 'filename')
+            if filename:
+                self._filename = filename
 
-    def AddMissingProperties(self):
-        """Add properties that are not present in the device tree
+    @classmethod
+    def FromFile(cls, fname):
+        """Convert an image file into an Image for use in binman
 
-        When binman has completed packing the entries the offset and size of
-        each entry are known. But before this the device tree may not specify
-        these. Add any missing properties, with a dummy value, so that the
-        size of the entry is correct. That way we can insert the correct values
-        later.
-        """
-        self._section.AddMissingProperties()
+        Args:
+            fname: Filename of image file to read
 
-    def ProcessFdt(self, fdt):
-        """Allow entries to adjust the device tree
+        Returns:
+            Image object on success
 
-        Some entries need to adjust the device tree for their purposes. This
-        may involve adding or deleting properties.
+        Raises:
+            ValueError if something goes wrong
         """
-        return self._section.ProcessFdt(fdt)
+        data = tools.ReadFile(fname)
+        size = len(data)
 
-    def GetEntryContents(self):
-        """Call ObtainContents() for the section
-        """
-        self._section.GetEntryContents()
+        # First look for an image header
+        pos = image_header.LocateHeaderOffset(data)
+        if pos is None:
+            # Look for the FDT map
+            pos = fdtmap.LocateFdtmap(data)
+        if pos is None:
+            raise ValueError('Cannot find FDT map in image')
 
-    def GetEntryOffsets(self):
-        """Handle entries that want to set the offset/size of other entries
+        # We don't know the FDT size, so check its header first
+        probe_dtb = fdt.Fdt.FromData(
+            data[pos + fdtmap.FDTMAP_HDR_LEN:pos + 256])
+        dtb_size = probe_dtb.GetFdtObj().totalsize()
+        fdtmap_data = data[pos:pos + dtb_size + fdtmap.FDTMAP_HDR_LEN]
+        dtb = fdt.Fdt.FromData(fdtmap_data[fdtmap.FDTMAP_HDR_LEN:])
+        dtb.Scan()
 
-        This calls each entry's GetOffsets() method. If it returns a list
-        of entries to update, it updates them.
-        """
-        self._section.GetEntryOffsets()
+        # Return an Image with the associated nodes
+        image = Image('image', dtb.GetRoot())
+        image._data = data
+        return image
+
+    def Raise(self, msg):
+        """Convenience function to raise an error referencing an image"""
+        raise ValueError("Image '%s': %s" % (self._node.path, msg))
 
     def PackEntries(self):
         """Pack all entries into the image"""
-        self._section.PackEntries()
-
-    def CheckSize(self):
-        """Check that the image contents does not exceed its size, etc."""
-        self._size = self._section.CheckSize()
-
-    def CheckEntries(self):
-        """Check that entries do not overlap or extend outside the image"""
-        self._section.CheckEntries()
-
-    def SetCalculatedProperties(self):
-        self._section.SetCalculatedProperties()
+        section.Entry_section.Pack(self, 0)
 
     def SetImagePos(self):
-        self._section.SetImagePos(0)
+        # This first section in the image so it starts at 0
+        section.Entry_section.SetImagePos(self, 0)
 
     def ProcessEntryContents(self):
         """Call the ProcessContents() method for each entry
 
         This is intended to adjust the contents as needed by the entry type.
+
+        Returns:
+            True if the new data size is OK, False if expansion is needed
         """
-        self._section.ProcessEntryContents()
+        sizes_ok = True
+        for entry in self._entries.values():
+            if not entry.ProcessContents():
+                sizes_ok = False
+                tout.Debug("Entry '%s' size change" % self._node.path)
+        return sizes_ok
 
     def WriteSymbols(self):
         """Write symbol values into binary files for access at run time"""
-        self._section.WriteSymbols()
+        section.Entry_section.WriteSymbols(self, self)
+
+    def BuildSection(self, fd, base_offset):
+        """Write the section to a file"""
+        fd.seek(base_offset)
+        fd.write(self.GetData())
 
     def BuildImage(self):
         """Write the image to a file"""
         fname = tools.GetOutputFilename(self._filename)
         with open(fname, 'wb') as fd:
-            self._section.BuildSection(fd, 0)
-
-    def GetEntries(self):
-        return self._section.GetEntries()
+            self.BuildSection(fd, 0)
 
     def WriteMap(self):
         """Write a map of the image to a .map file
@@ -144,10 +134,169 @@ class Image:
         Returns:
             Filename of map file written
         """
-        filename = '%s.map' % self._name
+        filename = '%s.map' % self.image_name
         fname = tools.GetOutputFilename(filename)
         with open(fname, 'w') as fd:
             print('%8s  %8s  %8s  %s' % ('ImagePos', 'Offset', 'Size', 'Name'),
                   file=fd)
-            self._section.WriteMap(fd, 0)
+            section.Entry_section.WriteMap(self, fd, 0)
         return fname
+
+    def BuildEntryList(self):
+        """List the files in an image
+
+        Returns:
+            List of entry.EntryInfo objects describing all entries in the image
+        """
+        entries = []
+        self.ListEntries(entries, 0)
+        return entries
+
+    def FindEntryPath(self, entry_path):
+        """Find an entry at a given path in the image
+
+        Args:
+            entry_path: Path to entry (e.g. /ro-section/u-boot')
+
+        Returns:
+            Entry object corresponding to that past
+
+        Raises:
+            ValueError if no entry found
+        """
+        parts = entry_path.split('/')
+        entries = self.GetEntries()
+        parent = '/'
+        for part in parts:
+            entry = entries.get(part)
+            if not entry:
+                raise ValueError("Entry '%s' not found in '%s'" %
+                                 (part, parent))
+            parent = entry.GetPath()
+            entries = entry.GetEntries()
+        return entry
+
+    def ReadData(self, decomp=True):
+        return self._data
+
+    def GetListEntries(self, entry_paths):
+        """List the entries in an image
+
+        This decodes the supplied image and returns a list of entries from that
+        image, preceded by a header.
+
+        Args:
+            entry_paths: List of paths to match (each can have wildcards). Only
+                entries whose names match one of these paths will be printed
+
+        Returns:
+            String error message if something went wrong, otherwise
+            3-Tuple:
+                List of EntryInfo objects
+                List of lines, each
+                    List of text columns, each a string
+                List of widths of each column
+        """
+        def _EntryToStrings(entry):
+            """Convert an entry to a list of strings, one for each column
+
+            Args:
+                entry: EntryInfo object containing information to output
+
+            Returns:
+                List of strings, one for each field in entry
+            """
+            def _AppendHex(val):
+                """Append a hex value, or an empty string if val is None
+
+                Args:
+                    val: Integer value, or None if none
+                """
+                args.append('' if val is None else '>%x' % val)
+
+            args = ['  ' * entry.indent + entry.name]
+            _AppendHex(entry.image_pos)
+            _AppendHex(entry.size)
+            args.append(entry.etype)
+            _AppendHex(entry.offset)
+            _AppendHex(entry.uncomp_size)
+            return args
+
+        def _DoLine(lines, line):
+            """Add a line to the output list
+
+            This adds a line (a list of columns) to the output list. It also updates
+            the widths[] array with the maximum width of each column
+
+            Args:
+                lines: List of lines to add to
+                line: List of strings, one for each column
+            """
+            for i, item in enumerate(line):
+                widths[i] = max(widths[i], len(item))
+            lines.append(line)
+
+        def _NameInPaths(fname, entry_paths):
+            """Check if a filename is in a list of wildcarded paths
+
+            Args:
+                fname: Filename to check
+                entry_paths: List of wildcarded paths (e.g. ['*dtb*', 'u-boot*',
+                                                             'section/u-boot'])
+
+            Returns:
+                True if any wildcard matches the filename (using Unix filename
+                    pattern matching, not regular expressions)
+                False if not
+            """
+            for path in entry_paths:
+                if fnmatch.fnmatch(fname, path):
+                    return True
+            return False
+
+        entries = self.BuildEntryList()
+
+        # This is our list of lines. Each item in the list is a list of strings, one
+        # for each column
+        lines = []
+        HEADER = ['Name', 'Image-pos', 'Size', 'Entry-type', 'Offset',
+                  'Uncomp-size']
+        num_columns = len(HEADER)
+
+        # This records the width of each column, calculated as the maximum width of
+        # all the strings in that column
+        widths = [0] * num_columns
+        _DoLine(lines, HEADER)
+
+        # We won't print anything unless it has at least this indent. So at the
+        # start we will print nothing, unless a path matches (or there are no
+        # entry paths)
+        MAX_INDENT = 100
+        min_indent = MAX_INDENT
+        path_stack = []
+        path = ''
+        indent = 0
+        selected_entries = []
+        for entry in entries:
+            if entry.indent > indent:
+                path_stack.append(path)
+            elif entry.indent < indent:
+                path_stack.pop()
+            if path_stack:
+                path = path_stack[-1] + '/' + entry.name
+            indent = entry.indent
+
+            # If there are entry paths to match and we are not looking at a
+            # sub-entry of a previously matched entry, we need to check the path
+            if entry_paths and indent <= min_indent:
+                if _NameInPaths(path[1:], entry_paths):
+                    # Print this entry and all sub-entries (=higher indent)
+                    min_indent = indent
+                else:
+                    # Don't print this entry, nor any following entries until we get
+                    # a path match
+                    min_indent = MAX_INDENT
+                    continue
+            _DoLine(lines, _EntryToStrings(entry))
+            selected_entries.append(entry)
+        return selected_entries, lines, widths

+ 7 - 11
tools/binman/image_test.py

@@ -12,28 +12,25 @@ from test_util import capture_sys_output
 class TestImage(unittest.TestCase):
     def testInvalidFormat(self):
         image = Image('name', 'node', test=True)
-        section = image._section
         with self.assertRaises(ValueError) as e:
-            section.LookupSymbol('_binman_something_prop_', False, 'msg')
+            image.LookupSymbol('_binman_something_prop_', False, 'msg')
         self.assertIn(
             "msg: Symbol '_binman_something_prop_' has invalid format",
             str(e.exception))
 
     def testMissingSymbol(self):
         image = Image('name', 'node', test=True)
-        section = image._section
-        section._entries = {}
+        image._entries = {}
         with self.assertRaises(ValueError) as e:
-            section.LookupSymbol('_binman_type_prop_pname', False, 'msg')
+            image.LookupSymbol('_binman_type_prop_pname', False, 'msg')
         self.assertIn("msg: Entry 'type' not found in list ()",
                       str(e.exception))
 
     def testMissingSymbolOptional(self):
         image = Image('name', 'node', test=True)
-        section = image._section
-        section._entries = {}
+        image._entries = {}
         with capture_sys_output() as (stdout, stderr):
-            val = section.LookupSymbol('_binman_type_prop_pname', True, 'msg')
+            val = image.LookupSymbol('_binman_type_prop_pname', True, 'msg')
         self.assertEqual(val, None)
         self.assertEqual("Warning: msg: Entry 'type' not found in list ()\n",
                          stderr.getvalue())
@@ -41,8 +38,7 @@ class TestImage(unittest.TestCase):
 
     def testBadProperty(self):
         image = Image('name', 'node', test=True)
-        section = image._section
-        section._entries = {'u-boot': 1}
+        image._entries = {'u-boot': 1}
         with self.assertRaises(ValueError) as e:
-            section.LookupSymbol('_binman_u_boot_prop_bad', False, 'msg')
+            image.LookupSymbol('_binman_u_boot_prop_bad', False, 'msg')
         self.assertIn("msg: No such property 'bad", str(e.exception))

+ 25 - 1
tools/binman/state.py

@@ -31,6 +31,11 @@ fdt_subset = set()
 # The DTB which contains the full image information
 main_dtb = None
 
+# Allow entries to expand after they have been packed. This is detected and
+# forces a re-pack. If not allowed, any attempted expansion causes an error in
+# Entry.ProcessContentsUpdate()
+allow_entry_expansion = True
+
 def GetFdt(fname):
     """Get the Fdt object for a particular device-tree filename
 
@@ -59,7 +64,7 @@ def GetFdtPath(fname):
     """
     return fdt_files[fname]._fname
 
-def GetFdtContents(fname):
+def GetFdtContents(fname='u-boot.dtb'):
     """Looks up the FDT pathname and contents
 
     This is used to obtain the Fdt pathname and contents when needed by an
@@ -250,3 +255,22 @@ def CheckSetHashValue(node, get_data_func):
             data = m.digest()
         for n in GetUpdateNodes(hash_node):
             n.SetData('value', data)
+
+def SetAllowEntryExpansion(allow):
+    """Set whether post-pack expansion of entries is allowed
+
+    Args:
+       allow: True to allow expansion, False to raise an exception
+    """
+    global allow_entry_expansion
+
+    allow_entry_expansion = allow
+
+def AllowEntryExpansion():
+    """Check whether post-pack expansion of entries is allowed
+
+    Returns:
+        True if expansion should be allowed, False if an exception should be
+            raised
+    """
+    return allow_entry_expansion

+ 5 - 0
tools/binman/test/066_text.dts

@@ -24,5 +24,10 @@
 			text-label = "test-id4";
 			test-id4 = "some text";
 		};
+		/* Put text directly in the node */
+		text5 {
+			type = "text";
+			text = "more text";
+		};
 	};
 };

+ 2 - 0
tools/binman/test/096_elf.dts

@@ -10,5 +10,7 @@
 		};
 		u-boot-spl-elf {
 		};
+		u-boot-tpl-elf {
+		};
 	};
 };

+ 20 - 0
tools/binman/test/102_cbfs_raw.dts

@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0xb0>;
+			u-boot {
+				cbfs-type = "raw";
+			};
+			u-boot-dtb {
+				cbfs-type = "raw";
+			};
+		};
+	};
+};

+ 21 - 0
tools/binman/test/103_cbfs_raw_ppc.dts

@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0x100>;
+			cbfs-arch = "ppc64";
+			u-boot {
+				cbfs-type = "raw";
+			};
+			u-boot-dtb {
+				cbfs-type = "raw";
+			};
+		};
+	};
+};

+ 19 - 0
tools/binman/test/104_cbfs_stage.dts

@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0xb0>;
+			u-boot {
+				type = "blob";
+				filename = "cbfs-stage.elf";
+				cbfs-type = "stage";
+			};
+		};
+	};
+};

+ 26 - 0
tools/binman/test/105_cbfs_raw_compress.dts

@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0x140>;
+			u-boot {
+				type = "text";
+				text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
+				cbfs-type = "raw";
+				cbfs-compress = "lz4";
+			};
+			u-boot-dtb {
+				type = "text";
+				text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
+				cbfs-type = "raw";
+				cbfs-compress = "lzma";
+			};
+		};
+	};
+};

+ 15 - 0
tools/binman/test/106_cbfs_bad_arch.dts

@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0x100>;
+			cbfs-arch = "bad-arch";
+		};
+	};
+};

+ 13 - 0
tools/binman/test/107_cbfs_no_size.dts

@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+		};
+	};
+};

+ 17 - 0
tools/binman/test/108_cbfs_no_contents.dts

@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0x100>;
+			_testing {
+				return-unknown-contents;
+			};
+		};
+	};
+};

+ 18 - 0
tools/binman/test/109_cbfs_bad_compress.dts

@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0xb0>;
+			u-boot {
+				cbfs-type = "raw";
+				cbfs-compress = "invalid-algo";
+			};
+		};
+	};
+};

+ 24 - 0
tools/binman/test/110_cbfs_name.dts

@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0x100>;
+			u-boot {
+				cbfs-name = "FRED";
+				cbfs-type = "raw";
+			};
+
+			hello {
+				type = "blob";
+				filename = "u-boot.dtb";
+				cbfs-type = "raw";
+			};
+		};
+	};
+};

+ 29 - 0
tools/binman/test/111_x86-rom-ifwi.dts

@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x800000>;
+		intel-descriptor {
+			filename = "descriptor.bin";
+		};
+
+		intel-ifwi {
+			offset-unset;
+			filename = "fitimage.bin";
+			convert-fit;
+
+			u-boot-tpl {
+				replace;
+				ifwi-subpart = "IBBP";
+				ifwi-entry = "IBBL";
+			};
+		};
+	};
+};

+ 28 - 0
tools/binman/test/112_x86-rom-ifwi-nodesc.dts

@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x800000>;
+		intel-descriptor {
+			filename = "descriptor.bin";
+		};
+
+		intel-ifwi {
+			offset-unset;
+			filename = "ifwi.bin";
+
+			u-boot-tpl {
+				replace;
+				ifwi-subpart = "IBBP";
+				ifwi-entry = "IBBL";
+			};
+		};
+	};
+};

+ 29 - 0
tools/binman/test/113_x86-rom-ifwi-nodata.dts

@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x800000>;
+		intel-descriptor {
+			filename = "descriptor.bin";
+		};
+
+		intel-ifwi {
+			offset-unset;
+			filename = "ifwi.bin";
+
+			_testing {
+				return-unknown-contents;
+				replace;
+				ifwi-subpart = "IBBP";
+				ifwi-entry = "IBBL";
+			};
+		};
+	};
+};

+ 26 - 0
tools/binman/test/114_cbfs_offset.dts

@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		end-at-4gb;
+		size = <0x200>;
+		cbfs {
+			size = <0x200>;
+			offset = <0xfffffe00>;
+			u-boot {
+				cbfs-offset = <0x40>;
+				cbfs-type = "raw";
+			};
+			u-boot-dtb {
+				cbfs-offset = <0x140>;
+				cbfs-type = "raw";
+			};
+		};
+	};
+};

+ 13 - 0
tools/binman/test/115_fdtmap.dts

@@ -0,0 +1,13 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+		};
+		fdtmap {
+		};
+	};
+};

+ 17 - 0
tools/binman/test/116_fdtmap_hdr.dts

@@ -0,0 +1,17 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <0x400>;
+		u-boot {
+		};
+		fdtmap {
+		};
+		image-header {
+			location = "end";
+		};
+	};
+};

+ 19 - 0
tools/binman/test/117_fdtmap_hdr_start.dts

@@ -0,0 +1,19 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <0x400>;
+		sort-by-offset;
+		u-boot {
+			offset = <0x100>;
+		};
+		fdtmap {
+		};
+		image-header {
+			location = "start";
+		};
+	};
+};

+ 19 - 0
tools/binman/test/118_fdtmap_hdr_pos.dts

@@ -0,0 +1,19 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <0x400>;
+		sort-by-offset;
+		u-boot {
+			offset = <0x100>;
+		};
+		fdtmap {
+		};
+		image-header {
+			offset = <0x80>;
+		};
+	};
+};

+ 16 - 0
tools/binman/test/119_fdtmap_hdr_missing.dts

@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		u-boot {
+		};
+		image-header {
+			offset = <0x80>;
+			location = "start";
+		};
+	};
+};

+ 16 - 0
tools/binman/test/120_hdr_no_location.dts

@@ -0,0 +1,16 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		sort-by-offset;
+		u-boot {
+		};
+		fdtmap {
+		};
+		image-header {
+		};
+	};
+};

+ 20 - 0
tools/binman/test/121_entry_expand.dts

@@ -0,0 +1,20 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			bad-update-contents;
+		};
+
+		u-boot {
+		};
+
+		_testing2 {
+			type = "_testing";
+			bad-update-contents;
+		};
+	};
+};

+ 21 - 0
tools/binman/test/122_entry_expand_twice.dts

@@ -0,0 +1,21 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			bad-update-contents;
+			bad-update-contents-twice;
+		};
+
+		u-boot {
+		};
+
+		_testing2 {
+			type = "_testing";
+			bad-update-contents;
+		};
+	};
+};

+ 22 - 0
tools/binman/test/123_entry_expand_section.dts

@@ -0,0 +1,22 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		_testing {
+			bad-update-contents;
+		};
+
+		u-boot {
+		};
+
+		section {
+			_testing2 {
+				type = "_testing";
+				bad-update-contents;
+			};
+		};
+	};
+};

+ 14 - 0
tools/binman/test/124_compress_dtb.dts

@@ -0,0 +1,14 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+		};
+		u-boot-dtb {
+			compress = "lz4";
+		};
+	};
+};

+ 21 - 0
tools/binman/test/125_cbfs_update.dts

@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0x100>;
+			u-boot {
+				cbfs-type = "raw";
+				cbfs-compress = "lz4";
+			};
+			u-boot-dtb {
+				cbfs-type = "raw";
+			};
+		};
+	};
+};

+ 17 - 0
tools/binman/test/126_cbfs_bad_type.dts

@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		cbfs {
+			size = <0x100>;
+			u-boot {
+				cbfs-type = "badtype";
+			};
+		};
+	};
+};

+ 33 - 0
tools/binman/test/127_list.dts

@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+		};
+		section {
+			align = <0x100>;
+			cbfs {
+				size = <0x400>;
+				u-boot {
+					cbfs-type = "raw";
+					cbfs-offset = <0x38>;
+				};
+				u-boot-dtb {
+					type = "text";
+					text = "compress xxxxxxxxxxxxxxxxxxxxxx data";
+					cbfs-type = "raw";
+					cbfs-compress = "lzma";
+					cbfs-offset = <0x78>;
+				};
+			};
+			u-boot-dtb {
+				compress = "lz4";
+			};
+		};
+	};
+};

+ 36 - 0
tools/binman/test/128_decode_image.dts

@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <0xc00>;
+		u-boot {
+		};
+		section {
+			align = <0x100>;
+			cbfs {
+				size = <0x400>;
+				u-boot {
+					cbfs-type = "raw";
+				};
+				u-boot-dtb {
+					cbfs-type = "raw";
+					cbfs-compress = "lzma";
+					cbfs-offset = <0x80>;
+				};
+			};
+			u-boot-dtb {
+				compress = "lz4";
+			};
+		};
+		fdtmap {
+		};
+		image-header {
+			location = "end";
+		};
+	};
+};

+ 33 - 0
tools/binman/test/129_decode_image_nohdr.dts

@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <0xc00>;
+		u-boot {
+		};
+		section {
+			align = <0x100>;
+			cbfs {
+				size = <0x400>;
+				u-boot {
+					cbfs-type = "raw";
+				};
+				u-boot-dtb {
+					cbfs-type = "raw";
+					cbfs-compress = "lzma";
+					cbfs-offset = <0x80>;
+				};
+			};
+			u-boot-dtb {
+				compress = "lz4";
+			};
+		};
+		fdtmap {
+		};
+	};
+};

+ 36 - 0
tools/binman/test/130_list_fdtmap.dts

@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		size = <0xc00>;
+		u-boot {
+		};
+		section {
+			align = <0x100>;
+			cbfs {
+				size = <0x400>;
+				u-boot {
+					cbfs-type = "raw";
+				};
+				u-boot-dtb {
+					cbfs-type = "raw";
+					cbfs-compress = "lzma";
+					cbfs-offset = <0x80>;
+				};
+			};
+			u-boot-dtb {
+				compress = "lz4";
+			};
+		};
+		fdtmap {
+		};
+		image-header {
+			location = "end";
+		};
+	};
+};

+ 28 - 0
tools/binman/test/131_pack_align_section.dts

@@ -0,0 +1,28 @@
+/dts-v1/;
+
+/ {
+	#address-cells = <1>;
+	#size-cells = <1>;
+
+	binman {
+		u-boot {
+		};
+		section0 {
+			type = "section";
+			align = <0x10>;
+			u-boot {
+			};
+		};
+		section1 {
+			type = "section";
+			align-size = <0x20>;
+			u-boot {
+			};
+			section2 {
+				type = "section";
+				u-boot {
+				};
+			};
+		};
+	};
+};

BIN
tools/binman/test/fitimage.bin.gz


BIN
tools/binman/test/ifwi.bin.gz


+ 2 - 2
tools/buildman/README

@@ -137,7 +137,7 @@ the '&' operator to limit the selection:
 
 You can also use -x to specifically exclude some boards. For example:
 
- buildmand arm -x nvidia,freescale,.*ball$
+  buildman arm -x nvidia,freescale,.*ball$
 
 means to build all arm boards except nvidia, freescale and anything ending
 with 'ball'.
@@ -146,7 +146,7 @@ For building specific boards you can use the --boards option, which takes a
 comma-separated list of board target names and be used multiple times on
 the command line:
 
-   buidman --boards sandbox,snow --boards
+  buildman --boards sandbox,snow --boards
 
 It is convenient to use the -n option to see what will be built based on
 the subset given. Use -v as well to get an actual list of boards.

+ 2304 - 0
tools/ifwitool.c

@@ -0,0 +1,2304 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ifwitool, CLI utility for Integrated Firmware Image (IFWI) manipulation
+ *
+ * This is taken from the Coreboot project
+ */
+
+#include <assert.h>
+#include <stdbool.h>
+#include <getopt.h>
+#include "os_support.h"
+
+#define __packed		__attribute__((packed))
+#define KiB			1024
+#define ALIGN(x, a)		__ALIGN_MASK((x), (typeof(x))(a) - 1)
+#define __ALIGN_MASK(x, mask)	(((x) + (mask)) & ~(mask))
+#define ARRAY_SIZE(x)		(sizeof(x) / sizeof((x)[0]))
+
+/*
+ * min()/max()/clamp() macros that also do
+ * strict type-checking.. See the
+ * "unnecessary" pointer comparison.
+ */
+#define min(x, y) ({				\
+	typeof(x) _min1 = (x);			\
+	typeof(y) _min2 = (y);			\
+	(void)&_min1 == &_min2);		\
+	_min1 < _min2 ? _min1 : _min2; })
+
+#define max(x, y) ({				\
+	typeof(x) _max1 = (x);			\
+	typeof(y) _max2 = (y);			\
+	(void)(&_max1 == &_max2);		\
+	_max1 > _max2 ? _max1 : _max2; })
+
+static int verbose = 1;
+
+/* Buffer and file I/O */
+struct buffer {
+	char *name;
+	char *data;
+	size_t offset;
+	size_t size;
+};
+
+#define ERROR(...) { fprintf(stderr, "E: " __VA_ARGS__); }
+#define INFO(...) { if (verbose > 0) fprintf(stderr, "INFO: " __VA_ARGS__); }
+#define DEBUG(...) { if (verbose > 1) fprintf(stderr, "DEBUG: " __VA_ARGS__); }
+
+/*
+ * BPDT is Boot Partition Descriptor Table. It is located at the start of a
+ * logical boot partition(LBP). It stores information about the critical
+ * sub-partitions present within the LBP.
+ *
+ * S-BPDT is Secondary Boot Partition Descriptor Table. It is located after the
+ * critical sub-partitions and contains information about the non-critical
+ * sub-partitions present within the LBP.
+ *
+ * Both tables are identified by BPDT_SIGNATURE stored at the start of the
+ * table.
+ */
+#define BPDT_SIGNATURE				(0x000055AA)
+
+/* Parameters passed in by caller */
+static struct param {
+	const char *file_name;
+	const char *subpart_name;
+	const char *image_name;
+	bool dir_ops;
+	const char *dentry_name;
+} param;
+
+struct bpdt_header {
+	/*
+	 * This is used to identify start of BPDT. It should always be
+	 * BPDT_SIGNATURE.
+	 */
+	uint32_t signature;
+	/* Count of BPDT entries present */
+	uint16_t descriptor_count;
+	/* Version - Currently supported = 1 */
+	uint16_t bpdt_version;
+	/* Unused - Should be 0 */
+	uint32_t xor_redundant_block;
+	/* Version of IFWI build */
+	uint32_t ifwi_version;
+	/* Version of FIT tool used to create IFWI */
+	uint64_t fit_tool_version;
+} __packed;
+#define BPDT_HEADER_SIZE			(sizeof(struct bpdt_header))
+
+struct bpdt_entry {
+	/* Type of sub-partition */
+	uint16_t type;
+	/* Attributes of sub-partition */
+	uint16_t flags;
+	/* Offset of sub-partition from beginning of LBP */
+	uint32_t offset;
+	/* Size in bytes of sub-partition */
+	uint32_t size;
+} __packed;
+#define BPDT_ENTRY_SIZE			(sizeof(struct bpdt_entry))
+
+struct bpdt {
+	struct bpdt_header h;
+	/* In practice, this could be an array of 0 to n entries */
+	struct bpdt_entry e[0];
+} __packed;
+
+static inline size_t get_bpdt_size(struct bpdt_header *h)
+{
+	return (sizeof(*h) + BPDT_ENTRY_SIZE * h->descriptor_count);
+}
+
+/* Minimum size in bytes allocated to BPDT in IFWI */
+#define BPDT_MIN_SIZE			((size_t)512)
+
+/* Header to define directory header for sub-partition */
+struct subpart_dir_header {
+	/* Should be SUBPART_DIR_MARKER */
+	uint32_t marker;
+	/* Number of directory entries in the sub-partition */
+	uint32_t num_entries;
+	/* Currenty supported - 1 */
+	uint8_t header_version;
+	/* Currenty supported - 1 */
+	uint8_t entry_version;
+	/* Length of directory header in bytes */
+	uint8_t header_length;
+	/*
+	 * 2s complement of 8-bit sum from first byte of header to last byte of
+	 * last directory entry.
+	 */
+	uint8_t checksum;
+	/* ASCII short name of sub-partition */
+	uint8_t name[4];
+} __packed;
+#define SUBPART_DIR_HEADER_SIZE			\
+					(sizeof(struct subpart_dir_header))
+#define SUBPART_DIR_MARKER				0x44504324
+#define SUBPART_DIR_HEADER_VERSION_SUPPORTED	1
+#define SUBPART_DIR_ENTRY_VERSION_SUPPORTED	1
+
+/* Structure for each directory entry for sub-partition */
+struct subpart_dir_entry {
+	/* Name of directory entry - Not guaranteed to be NULL-terminated */
+	uint8_t name[12];
+	/* Offset of entry from beginning of sub-partition */
+	uint32_t offset;
+	/* Length in bytes of sub-directory entry */
+	uint32_t length;
+	/* Must be zero */
+	uint32_t rsvd;
+} __packed;
+#define SUBPART_DIR_ENTRY_SIZE			\
+					(sizeof(struct subpart_dir_entry))
+
+struct subpart_dir {
+	struct subpart_dir_header h;
+	/* In practice, this could be an array of 0 to n entries */
+	struct subpart_dir_entry e[0];
+} __packed;
+
+static inline size_t subpart_dir_size(struct subpart_dir_header *h)
+{
+	return (sizeof(*h) + SUBPART_DIR_ENTRY_SIZE * h->num_entries);
+}
+
+struct manifest_header {
+	uint32_t header_type;
+	uint32_t header_length;
+	uint32_t header_version;
+	uint32_t flags;
+	uint32_t vendor;
+	uint32_t date;
+	uint32_t size;
+	uint32_t id;
+	uint32_t rsvd;
+	uint64_t version;
+	uint32_t svn;
+	uint64_t rsvd1;
+	uint8_t rsvd2[64];
+	uint32_t modulus_size;
+	uint32_t exponent_size;
+	uint8_t public_key[256];
+	uint32_t exponent;
+	uint8_t signature[256];
+} __packed;
+
+#define DWORD_SIZE				4
+#define MANIFEST_HDR_SIZE			(sizeof(struct manifest_header))
+#define MANIFEST_ID_MAGIC			(0x324e4d24)
+
+struct module {
+	uint8_t name[12];
+	uint8_t type;
+	uint8_t hash_alg;
+	uint16_t hash_size;
+	uint32_t metadata_size;
+	uint8_t metadata_hash[32];
+} __packed;
+
+#define MODULE_SIZE				(sizeof(struct module))
+
+struct signed_pkg_info_ext {
+	uint32_t ext_type;
+	uint32_t ext_length;
+	uint8_t name[4];
+	uint32_t vcn;
+	uint8_t bitmap[16];
+	uint32_t svn;
+	uint8_t rsvd[16];
+} __packed;
+
+#define SIGNED_PKG_INFO_EXT_TYPE		0x15
+#define SIGNED_PKG_INFO_EXT_SIZE		\
+	(sizeof(struct signed_pkg_info_ext))
+
+/*
+ * Attributes for various IFWI sub-partitions.
+ * LIES_WITHIN_BPDT_4K = Sub-Partition should lie within the same 4K block as
+ * BPDT.
+ * NON_CRITICAL_SUBPART = Sub-Partition entry should be present in S-BPDT.
+ * CONTAINS_DIR = Sub-Partition contains directory.
+ * AUTO_GENERATED = Sub-Partition is generated by the tool.
+ * MANDATORY_BPDT_ENTRY = Even if sub-partition is deleted, BPDT should contain
+ * an entry for it with size 0 and offset 0.
+ */
+enum subpart_attributes {
+	LIES_WITHIN_BPDT_4K = (1 << 0),
+	NON_CRITICAL_SUBPART = (1 << 1),
+	CONTAINS_DIR = (1 << 2),
+	AUTO_GENERATED = (1 << 3),
+	MANDATORY_BPDT_ENTRY = (1 << 4),
+};
+
+/* Type value for various IFWI sub-partitions */
+enum bpdt_entry_type {
+	SMIP_TYPE		= 0,
+	CSE_RBE_TYPE		= 1,
+	CSE_BUP_TYPE		= 2,
+	UCODE_TYPE		= 3,
+	IBB_TYPE		= 4,
+	S_BPDT_TYPE		= 5,
+	OBB_TYPE		= 6,
+	CSE_MAIN_TYPE		= 7,
+	ISH_TYPE		= 8,
+	CSE_IDLM_TYPE		= 9,
+	IFP_OVERRIDE_TYPE	= 10,
+	DEBUG_TOKENS_TYPE	= 11,
+	UFS_PHY_TYPE		= 12,
+	UFS_GPP_TYPE		= 13,
+	PMC_TYPE		= 14,
+	IUNIT_TYPE		= 15,
+	NVM_CONFIG_TYPE	= 16,
+	UEP_TYPE		= 17,
+	UFS_RATE_B_TYPE	= 18,
+	MAX_SUBPARTS		= 19,
+};
+
+/*
+ * There are two order requirements for an IFWI image:
+ * 1. Order in which the sub-partitions lie within the BPDT entries.
+ * 2. Order in which the sub-partitions lie within the image.
+ *
+ * header_order defines #1 i.e. the order in which the sub-partitions should
+ * appear in the BPDT entries. pack_order defines #2 i.e. the order in which
+ * sub-partitions appear in the IFWI image. pack_order controls the offset and
+ * thus sub-partitions would have increasing offsets as we loop over pack_order.
+ */
+const enum bpdt_entry_type bpdt_header_order[MAX_SUBPARTS] = {
+	/* Order of the following entries is mandatory */
+	CSE_IDLM_TYPE,
+	IFP_OVERRIDE_TYPE,
+	S_BPDT_TYPE,
+	CSE_RBE_TYPE,
+	UFS_PHY_TYPE,
+	UFS_GPP_TYPE,
+	/* Order of the following entries is recommended */
+	UEP_TYPE,
+	NVM_CONFIG_TYPE,
+	UFS_RATE_B_TYPE,
+	IBB_TYPE,
+	SMIP_TYPE,
+	PMC_TYPE,
+	CSE_BUP_TYPE,
+	UCODE_TYPE,
+	DEBUG_TOKENS_TYPE,
+	IUNIT_TYPE,
+	CSE_MAIN_TYPE,
+	ISH_TYPE,
+	OBB_TYPE,
+};
+
+const enum bpdt_entry_type bpdt_pack_order[MAX_SUBPARTS] = {
+	/* Order of the following entries is mandatory */
+	UFS_GPP_TYPE,
+	UFS_PHY_TYPE,
+	IFP_OVERRIDE_TYPE,
+	UEP_TYPE,
+	NVM_CONFIG_TYPE,
+	UFS_RATE_B_TYPE,
+	/* Order of the following entries is recommended */
+	IBB_TYPE,
+	SMIP_TYPE,
+	CSE_RBE_TYPE,
+	PMC_TYPE,
+	CSE_BUP_TYPE,
+	UCODE_TYPE,
+	CSE_IDLM_TYPE,
+	DEBUG_TOKENS_TYPE,
+	S_BPDT_TYPE,
+	IUNIT_TYPE,
+	CSE_MAIN_TYPE,
+	ISH_TYPE,
+	OBB_TYPE,
+};
+
+/* Utility functions */
+enum ifwi_ret {
+	COMMAND_ERR = -1,
+	NO_ACTION_REQUIRED = 0,
+	REPACK_REQUIRED = 1,
+};
+
+struct dir_ops {
+	enum ifwi_ret (*dir_add)(int type);
+};
+
+static enum ifwi_ret ibbp_dir_add(int type);
+
+const struct subpart_info {
+	const char *name;
+	const char *readable_name;
+	uint32_t attr;
+	struct dir_ops dir_ops;
+} subparts[MAX_SUBPARTS] = {
+	/* OEM SMIP */
+	[SMIP_TYPE] = {"SMIP", "SMIP", CONTAINS_DIR, {NULL} },
+	/* CSE RBE */
+	[CSE_RBE_TYPE] = {"RBEP", "CSE_RBE", CONTAINS_DIR |
+			  MANDATORY_BPDT_ENTRY, {NULL} },
+	/* CSE BUP */
+	[CSE_BUP_TYPE] = {"FTPR", "CSE_BUP", CONTAINS_DIR |
+			  MANDATORY_BPDT_ENTRY, {NULL} },
+	/* uCode */
+	[UCODE_TYPE] = {"UCOD", "Microcode", CONTAINS_DIR, {NULL} },
+	/* IBB */
+	[IBB_TYPE] = {"IBBP", "Bootblock", CONTAINS_DIR, {ibbp_dir_add} },
+	/* S-BPDT */
+	[S_BPDT_TYPE] = {"S_BPDT", "S-BPDT", AUTO_GENERATED |
+			 MANDATORY_BPDT_ENTRY, {NULL} },
+	/* OBB */
+	[OBB_TYPE] = {"OBBP", "OEM boot block", CONTAINS_DIR |
+		      NON_CRITICAL_SUBPART, {NULL} },
+	/* CSE Main */
+	[CSE_MAIN_TYPE] = {"NFTP", "CSE_MAIN", CONTAINS_DIR |
+			   NON_CRITICAL_SUBPART, {NULL} },
+	/* ISH */
+	[ISH_TYPE] = {"ISHP", "ISH", NON_CRITICAL_SUBPART, {NULL} },
+	/* CSE IDLM */
+	[CSE_IDLM_TYPE] = {"DLMP", "CSE_IDLM", CONTAINS_DIR |
+			   MANDATORY_BPDT_ENTRY, {NULL} },
+	/* IFP Override */
+	[IFP_OVERRIDE_TYPE] = {"IFP_OVERRIDE", "IFP_OVERRIDE",
+			       LIES_WITHIN_BPDT_4K | MANDATORY_BPDT_ENTRY,
+			       {NULL} },
+	/* Debug Tokens */
+	[DEBUG_TOKENS_TYPE] = {"DEBUG_TOKENS", "Debug Tokens", 0, {NULL} },
+	/* UFS Phy Configuration */
+	[UFS_PHY_TYPE] = {"UFS_PHY", "UFS Phy", LIES_WITHIN_BPDT_4K |
+			  MANDATORY_BPDT_ENTRY, {NULL} },
+	/* UFS GPP LUN ID */
+	[UFS_GPP_TYPE] = {"UFS_GPP", "UFS GPP", LIES_WITHIN_BPDT_4K |
+			  MANDATORY_BPDT_ENTRY, {NULL} },
+	/* PMC */
+	[PMC_TYPE] = {"PMCP", "PMC firmware", CONTAINS_DIR, {NULL} },
+	/* IUNIT */
+	[IUNIT_TYPE] = {"IUNP", "IUNIT", NON_CRITICAL_SUBPART, {NULL} },
+	/* NVM Config */
+	[NVM_CONFIG_TYPE] = {"NVM_CONFIG", "NVM Config", 0, {NULL} },
+	/* UEP */
+	[UEP_TYPE] = {"UEP", "UEP", LIES_WITHIN_BPDT_4K | MANDATORY_BPDT_ENTRY,
+		      {NULL} },
+	/* UFS Rate B Config */
+	[UFS_RATE_B_TYPE] = {"UFS_RATE_B", "UFS Rate B Config", 0, {NULL} },
+};
+
+struct ifwi_image {
+	/* Data read from input file */
+	struct buffer input_buff;
+
+	/* BPDT header and entries */
+	struct buffer bpdt;
+	size_t input_ifwi_start_offset;
+	size_t input_ifwi_end_offset;
+
+	/* Subpartition content */
+	struct buffer subpart_buf[MAX_SUBPARTS];
+} ifwi_image;
+
+/* Buffer and file I/O */
+static off_t get_file_size(FILE *f)
+{
+	off_t fsize;
+
+	fseek(f, 0, SEEK_END);
+	fsize = ftell(f);
+	fseek(f, 0, SEEK_SET);
+	return fsize;
+}
+
+static inline void *buffer_get(const struct buffer *b)
+{
+	return b->data;
+}
+
+static inline size_t buffer_size(const struct buffer *b)
+{
+	return b->size;
+}
+
+static inline size_t buffer_offset(const struct buffer *b)
+{
+	return b->offset;
+}
+
+/*
+ * Shrink a buffer toward the beginning of its previous space.
+ * Afterward, buffer_delete() remains the means of cleaning it up
+ */
+static inline void buffer_set_size(struct buffer *b, size_t size)
+{
+	b->size = size;
+}
+
+/* Splice a buffer into another buffer. Note that it's up to the caller to
+ * bounds check the offset and size. The resulting buffer is backed by the same
+ * storage as the original, so although it is valid to buffer_delete() either
+ * one of them, doing so releases both simultaneously
+ */
+static void buffer_splice(struct buffer *dest, const struct buffer *src,
+			  size_t offset, size_t size)
+{
+	dest->name = src->name;
+	dest->data = src->data + offset;
+	dest->offset = src->offset + offset;
+	dest->size = size;
+}
+
+/*
+ * Shrink a buffer toward the end of its previous space.
+ * Afterward, buffer_delete() remains the means of cleaning it up
+ */
+static inline void buffer_seek(struct buffer *b, size_t size)
+{
+	b->offset += size;
+	b->size -= size;
+	b->data += size;
+}
+
+/* Returns the start of the underlying buffer, with the offset undone */
+static inline void *buffer_get_original_backing(const struct buffer *b)
+{
+	if (!b)
+		return NULL;
+	return buffer_get(b) - buffer_offset(b);
+}
+
+int buffer_create(struct buffer *buffer, size_t size, const char *name)
+{
+	buffer->name = strdup(name);
+	buffer->offset = 0;
+	buffer->size = size;
+	buffer->data = (char *)malloc(buffer->size);
+	if (!buffer->data) {
+		fprintf(stderr, "%s: Insufficient memory (0x%zx).\n", __func__,
+			size);
+	}
+
+	return !buffer->data;
+}
+
+int buffer_write_file(struct buffer *buffer, const char *filename)
+{
+	FILE *fp = fopen(filename, "wb");
+
+	if (!fp) {
+		perror(filename);
+		return -1;
+	}
+	assert(buffer && buffer->data);
+	if (fwrite(buffer->data, 1, buffer->size, fp) != buffer->size) {
+		fprintf(stderr, "incomplete write: %s\n", filename);
+		fclose(fp);
+		return -1;
+	}
+	fclose(fp);
+	return 0;
+}
+
+void buffer_delete(struct buffer *buffer)
+{
+	assert(buffer);
+	if (buffer->name) {
+		free(buffer->name);
+		buffer->name = NULL;
+	}
+	if (buffer->data) {
+		free(buffer_get_original_backing(buffer));
+		buffer->data = NULL;
+	}
+	buffer->offset = 0;
+	buffer->size = 0;
+}
+
+int buffer_from_file(struct buffer *buffer, const char *filename)
+{
+	FILE *fp = fopen(filename, "rb");
+
+	if (!fp) {
+		perror(filename);
+		return -1;
+	}
+	buffer->offset = 0;
+	off_t file_size = get_file_size(fp);
+
+	if (file_size < 0) {
+		fprintf(stderr, "could not determine size of %s\n", filename);
+		fclose(fp);
+		return -1;
+	}
+	buffer->size = file_size;
+	buffer->name = strdup(filename);
+	buffer->data = (char *)malloc(buffer->size);
+	assert(buffer->data);
+	if (fread(buffer->data, 1, buffer->size, fp) != buffer->size) {
+		fprintf(stderr, "incomplete read: %s\n", filename);
+		fclose(fp);
+		buffer_delete(buffer);
+		return -1;
+	}
+	fclose(fp);
+	return 0;
+}
+
+static void alloc_buffer(struct buffer *b, size_t s, const char *n)
+{
+	if (buffer_create(b, s, n) == 0)
+		return;
+
+	ERROR("Buffer allocation failure for %s (size = %zx).\n", n, s);
+	exit(-1);
+}
+
+/* Little-Endian functions */
+static inline uint8_t read_ble8(const void *src)
+{
+	const uint8_t *s = src;
+	return *s;
+}
+
+static inline uint8_t read_at_ble8(const void *src, size_t offset)
+{
+	const uint8_t *s = src;
+
+	s += offset;
+	return read_ble8(s);
+}
+
+static inline void write_ble8(void *dest, uint8_t val)
+{
+	*(uint8_t *)dest = val;
+}
+
+static inline void write_at_ble8(void *dest, uint8_t val, size_t offset)
+{
+	uint8_t *d = dest;
+
+	d += offset;
+	write_ble8(d, val);
+}
+
+static inline uint8_t read_at_le8(const void *src, size_t offset)
+{
+	return read_at_ble8(src, offset);
+}
+
+static inline void write_le8(void *dest, uint8_t val)
+{
+	write_ble8(dest, val);
+}
+
+static inline void write_at_le8(void *dest, uint8_t val, size_t offset)
+{
+	write_at_ble8(dest, val, offset);
+}
+
+static inline uint16_t read_le16(const void *src)
+{
+	const uint8_t *s = src;
+
+	return (((uint16_t)s[1]) << 8) | (((uint16_t)s[0]) << 0);
+}
+
+static inline uint16_t read_at_le16(const void *src, size_t offset)
+{
+	const uint8_t *s = src;
+
+	s += offset;
+	return read_le16(s);
+}
+
+static inline void write_le16(void *dest, uint16_t val)
+{
+	write_le8(dest, val >> 0);
+	write_at_le8(dest, val >> 8, sizeof(uint8_t));
+}
+
+static inline void write_at_le16(void *dest, uint16_t val, size_t offset)
+{
+	uint8_t *d = dest;
+
+	d += offset;
+	write_le16(d, val);
+}
+
+static inline uint32_t read_le32(const void *src)
+{
+	const uint8_t *s = src;
+
+	return (((uint32_t)s[3]) << 24) | (((uint32_t)s[2]) << 16) |
+		(((uint32_t)s[1]) << 8) | (((uint32_t)s[0]) << 0);
+}
+
+static inline uint32_t read_at_le32(const void *src, size_t offset)
+{
+	const uint8_t *s = src;
+
+	s += offset;
+	return read_le32(s);
+}
+
+static inline void write_le32(void *dest, uint32_t val)
+{
+	write_le16(dest, val >> 0);
+	write_at_le16(dest, val >> 16, sizeof(uint16_t));
+}
+
+static inline void write_at_le32(void *dest, uint32_t val, size_t offset)
+{
+	uint8_t *d = dest;
+
+	d += offset;
+	write_le32(d, val);
+}
+
+static inline uint64_t read_le64(const void *src)
+{
+	uint64_t val;
+
+	val = read_at_le32(src, sizeof(uint32_t));
+	val <<= 32;
+	val |= read_le32(src);
+	return val;
+}
+
+static inline uint64_t read_at_le64(const void *src, size_t offset)
+{
+	const uint8_t *s = src;
+
+	s += offset;
+	return read_le64(s);
+}
+
+static inline void write_le64(void *dest, uint64_t val)
+{
+	write_le32(dest, val >> 0);
+	write_at_le32(dest, val >> 32, sizeof(uint32_t));
+}
+
+static inline void write_at_le64(void *dest, uint64_t val, size_t offset)
+{
+	uint8_t *d = dest;
+
+	d += offset;
+	write_le64(d, val);
+}
+
+/*
+ * Read header/entry members in little-endian format.
+ * Returns the offset upto which the read was performed.
+ */
+static size_t read_member(void *src, size_t offset, size_t size_bytes,
+			  void *dst)
+{
+	switch (size_bytes) {
+	case 1:
+		*(uint8_t *)dst = read_at_le8(src, offset);
+		break;
+	case 2:
+		*(uint16_t *)dst = read_at_le16(src, offset);
+		break;
+	case 4:
+		*(uint32_t *)dst = read_at_le32(src, offset);
+		break;
+	case 8:
+		*(uint64_t *)dst = read_at_le64(src, offset);
+		break;
+	default:
+		ERROR("Read size not supported %zd\n", size_bytes);
+		exit(-1);
+	}
+
+	return (offset + size_bytes);
+}
+
+/*
+ * Convert to little endian format.
+ * Returns the offset upto which the fixup was performed.
+ */
+static size_t fix_member(void *data, size_t offset, size_t size_bytes)
+{
+	uint8_t *src = (uint8_t *)data + offset;
+
+	switch (size_bytes) {
+	case 1:
+		write_at_le8(data, *(uint8_t *)src, offset);
+		break;
+	case 2:
+		write_at_le16(data, *(uint16_t *)src, offset);
+		break;
+	case 4:
+		write_at_le32(data, *(uint32_t *)src, offset);
+		break;
+	case 8:
+		write_at_le64(data, *(uint64_t *)src, offset);
+		break;
+	default:
+		ERROR("Write size not supported %zd\n", size_bytes);
+		exit(-1);
+	}
+	return (offset + size_bytes);
+}
+
+static void print_subpart_dir(struct subpart_dir *s)
+{
+	if (verbose == 0)
+		return;
+
+	size_t i;
+
+	printf("%-25s 0x%-23.8x\n", "Marker", s->h.marker);
+	printf("%-25s %-25d\n", "Num entries", s->h.num_entries);
+	printf("%-25s %-25d\n", "Header Version", s->h.header_version);
+	printf("%-25s %-25d\n", "Entry Version", s->h.entry_version);
+	printf("%-25s 0x%-23x\n", "Header Length", s->h.header_length);
+	printf("%-25s 0x%-23x\n", "Checksum", s->h.checksum);
+	printf("%-25s ", "Name");
+	for (i = 0; i < sizeof(s->h.name); i++)
+		printf("%c", s->h.name[i]);
+
+	printf("\n");
+
+	printf("%-25s%-25s%-25s%-25s%-25s\n", "Entry #", "Name", "Offset",
+	       "Length", "Rsvd");
+
+	printf("=========================================================================================================================\n");
+
+	for (i = 0; i < s->h.num_entries; i++) {
+		printf("%-25zd%-25.12s0x%-23x0x%-23x0x%-23x\n", i + 1,
+		       s->e[i].name, s->e[i].offset, s->e[i].length,
+		       s->e[i].rsvd);
+	}
+
+	printf("=========================================================================================================================\n");
+}
+
+static void bpdt_print_header(struct bpdt_header *h, const char *name)
+{
+	if (verbose == 0)
+		return;
+
+	printf("%-25s %-25s\n", "Header", name);
+	printf("%-25s 0x%-23.8x\n", "Signature", h->signature);
+	printf("%-25s %-25d\n", "Descriptor count", h->descriptor_count);
+	printf("%-25s %-25d\n", "BPDT Version", h->bpdt_version);
+	printf("%-25s 0x%-23x\n", "XOR checksum", h->xor_redundant_block);
+	printf("%-25s 0x%-23x\n", "IFWI Version", h->ifwi_version);
+	printf("%-25s 0x%-23llx\n", "FIT Tool Version",
+	       (long long)h->fit_tool_version);
+}
+
+static void bpdt_print_entries(struct bpdt_entry *e, size_t count,
+			       const char *name)
+{
+	size_t i;
+
+	if (verbose == 0)
+		return;
+
+	printf("%s entries\n", name);
+
+	printf("%-25s%-25s%-25s%-25s%-25s%-25s%-25s%-25s\n", "Entry #",
+	       "Sub-Partition", "Name", "Type", "Flags", "Offset", "Size",
+	       "File Offset");
+
+	printf("=========================================================================================================================================================================================================\n");
+
+	for (i = 0; i < count; i++) {
+		printf("%-25zd%-25s%-25s%-25d0x%-23.08x0x%-23x0x%-23x0x%-23zx\n",
+		       i + 1, subparts[e[i].type].name,
+		       subparts[e[i].type].readable_name, e[i].type, e[i].flags,
+		       e[i].offset, e[i].size,
+		       e[i].offset + ifwi_image.input_ifwi_start_offset);
+	}
+
+	printf("=========================================================================================================================================================================================================\n");
+}
+
+static void bpdt_validate_header(struct bpdt_header *h, const char *name)
+{
+	assert(h->signature == BPDT_SIGNATURE);
+
+	if (h->bpdt_version != 1) {
+		ERROR("Invalid header : %s\n", name);
+		exit(-1);
+	}
+
+	DEBUG("Validated header : %s\n", name);
+}
+
+static void bpdt_read_header(void *data, struct bpdt_header *h,
+			     const char *name)
+{
+	size_t offset = 0;
+
+	offset = read_member(data, offset, sizeof(h->signature), &h->signature);
+	offset = read_member(data, offset, sizeof(h->descriptor_count),
+			     &h->descriptor_count);
+	offset = read_member(data, offset, sizeof(h->bpdt_version),
+			     &h->bpdt_version);
+	offset = read_member(data, offset, sizeof(h->xor_redundant_block),
+			     &h->xor_redundant_block);
+	offset = read_member(data, offset, sizeof(h->ifwi_version),
+			     &h->ifwi_version);
+	read_member(data, offset, sizeof(h->fit_tool_version),
+		    &h->fit_tool_version);
+
+	bpdt_validate_header(h, name);
+	bpdt_print_header(h, name);
+}
+
+static void bpdt_read_entries(void *data, struct bpdt *bpdt, const char *name)
+{
+	size_t i, offset = 0;
+	struct bpdt_entry *e = &bpdt->e[0];
+	size_t count = bpdt->h.descriptor_count;
+
+	for (i = 0; i < count; i++) {
+		offset = read_member(data, offset, sizeof(e[i].type),
+				     &e[i].type);
+		offset = read_member(data, offset, sizeof(e[i].flags),
+				     &e[i].flags);
+		offset = read_member(data, offset, sizeof(e[i].offset),
+				     &e[i].offset);
+		offset = read_member(data, offset, sizeof(e[i].size),
+				     &e[i].size);
+	}
+
+	bpdt_print_entries(e, count, name);
+}
+
+/*
+ * Given type of sub-partition, identify BPDT entry for it.
+ * Sub-Partition could lie either within BPDT or S-BPDT.
+ */
+static struct bpdt_entry *__find_entry_by_type(struct bpdt_entry *e,
+					       size_t count, int type)
+{
+	size_t i;
+
+	for (i = 0; i < count; i++) {
+		if (e[i].type == type)
+			break;
+	}
+
+	if (i == count)
+		return NULL;
+
+	return &e[i];
+}
+
+static struct bpdt_entry *find_entry_by_type(int type)
+{
+	struct bpdt *b = buffer_get(&ifwi_image.bpdt);
+
+	if (!b)
+		return NULL;
+
+	struct bpdt_entry *curr = __find_entry_by_type(&b->e[0],
+						       b->h.descriptor_count,
+						       type);
+
+	if (curr)
+		return curr;
+
+	b = buffer_get(&ifwi_image.subpart_buf[S_BPDT_TYPE]);
+	if (!b)
+		return NULL;
+
+	return __find_entry_by_type(&b->e[0], b->h.descriptor_count, type);
+}
+
+/*
+ * Find sub-partition type given its name. If the name does not exist, returns
+ * -1.
+ */
+static int find_type_by_name(const char *name)
+{
+	int i;
+
+	for (i = 0; i < MAX_SUBPARTS; i++) {
+		if ((strlen(subparts[i].name) == strlen(name)) &&
+		    (!strcmp(subparts[i].name, name)))
+			break;
+	}
+
+	if (i == MAX_SUBPARTS) {
+		ERROR("Invalid sub-partition name %s.\n", name);
+		return -1;
+	}
+
+	return i;
+}
+
+/*
+ * Read the content of a sub-partition from input file and store it in
+ * ifwi_image.subpart_buf[SUB-PARTITION_TYPE].
+ *
+ * Returns the maximum offset occupied by the sub-partitions.
+ */
+static size_t read_subpart_buf(void *data, size_t size, struct bpdt_entry *e,
+			       size_t count)
+{
+	size_t i, type;
+	struct buffer *buf;
+	size_t max_offset = 0;
+
+	for (i = 0; i < count; i++) {
+		type = e[i].type;
+
+		if (type >= MAX_SUBPARTS) {
+			ERROR("Invalid sub-partition type %zd.\n", type);
+			exit(-1);
+		}
+
+		if (buffer_size(&ifwi_image.subpart_buf[type])) {
+			ERROR("Multiple sub-partitions of type %zd(%s).\n",
+			      type, subparts[type].name);
+			exit(-1);
+		}
+
+		if (e[i].size == 0) {
+			INFO("Dummy sub-partition %zd(%s). Skipping.\n", type,
+			     subparts[type].name);
+			continue;
+		}
+
+		assert((e[i].offset + e[i].size) <= size);
+
+		/*
+		 * Sub-partitions in IFWI image are not in the same order as
+		 * in BPDT entries. BPDT entires are in header_order whereas
+		 * sub-partition offsets in the image are in pack_order.
+		 */
+		if ((e[i].offset + e[i].size) > max_offset)
+			max_offset = e[i].offset + e[i].size;
+
+		/*
+		 * S-BPDT sub-partition contains information about all the
+		 * non-critical sub-partitions. Thus, size of S-BPDT
+		 * sub-partition equals size of S-BPDT plus size of all the
+		 * non-critical sub-partitions. Thus, reading whole of S-BPDT
+		 * here would be redundant as the non-critical partitions are
+		 * read and allocated buffers separately. Also, S-BPDT requires
+		 * special handling for reading header and entries.
+		 */
+		if (type == S_BPDT_TYPE)
+			continue;
+
+		buf = &ifwi_image.subpart_buf[type];
+
+		alloc_buffer(buf, e[i].size, subparts[type].name);
+		memcpy(buffer_get(buf), (uint8_t *)data + e[i].offset,
+		       e[i].size);
+	}
+
+	assert(max_offset);
+	return max_offset;
+}
+
+/*
+ * Allocate buffer for bpdt header, entries and all sub-partition content.
+ * Returns offset in data where BPDT ends.
+ */
+static size_t alloc_bpdt_buffer(void *data, size_t size, size_t offset,
+				struct buffer *b, const char *name)
+{
+	struct bpdt_header bpdt_header;
+
+	assert((offset + BPDT_HEADER_SIZE) < size);
+	bpdt_read_header((uint8_t *)data + offset, &bpdt_header, name);
+
+	/* Buffer to read BPDT header and entries */
+	alloc_buffer(b, get_bpdt_size(&bpdt_header), name);
+
+	struct bpdt *bpdt = buffer_get(b);
+
+	memcpy(&bpdt->h, &bpdt_header, BPDT_HEADER_SIZE);
+
+	/*
+	 * If no entries are present, maximum offset occupied is (offset +
+	 * BPDT_HEADER_SIZE).
+	 */
+	if (bpdt->h.descriptor_count == 0)
+		return (offset + BPDT_HEADER_SIZE);
+
+	/* Read all entries */
+	assert((offset + get_bpdt_size(&bpdt->h)) < size);
+	bpdt_read_entries((uint8_t *)data + offset + BPDT_HEADER_SIZE, bpdt,
+			  name);
+
+	/* Read all sub-partition content in subpart_buf */
+	return read_subpart_buf(data, size, &bpdt->e[0],
+				bpdt->h.descriptor_count);
+}
+
+static void parse_sbpdt(void *data, size_t size)
+{
+	struct bpdt_entry *s;
+
+	s  = find_entry_by_type(S_BPDT_TYPE);
+	if (!s)
+		return;
+
+	assert(size > s->offset);
+
+	alloc_bpdt_buffer(data, size, s->offset,
+			  &ifwi_image.subpart_buf[S_BPDT_TYPE],
+			  "S-BPDT");
+}
+
+static uint8_t calc_checksum(struct subpart_dir *s)
+{
+	size_t size = subpart_dir_size(&s->h);
+	uint8_t *data = (uint8_t *)s;
+	uint8_t checksum = 0;
+	size_t i;
+	uint8_t old_checksum = s->h.checksum;
+
+	s->h.checksum = 0;
+
+	for (i = 0; i < size; i++)
+		checksum += data[i];
+
+	s->h.checksum = old_checksum;
+
+	/* 2s complement */
+	return -checksum;
+}
+
+static void validate_subpart_dir(struct subpart_dir *s, const char *name,
+				 bool checksum_check)
+{
+	if (s->h.marker != SUBPART_DIR_MARKER ||
+	    s->h.header_version != SUBPART_DIR_HEADER_VERSION_SUPPORTED ||
+	    s->h.entry_version != SUBPART_DIR_ENTRY_VERSION_SUPPORTED ||
+	    s->h.header_length != SUBPART_DIR_HEADER_SIZE) {
+		ERROR("Invalid subpart_dir for %s.\n", name);
+		exit(-1);
+	}
+
+	if (!checksum_check)
+		return;
+
+	uint8_t checksum = calc_checksum(s);
+
+	if (checksum != s->h.checksum)
+		ERROR("Invalid checksum for %s (Expected=0x%x, Actual=0x%x).\n",
+		      name, checksum, s->h.checksum);
+}
+
+static void validate_subpart_dir_without_checksum(struct subpart_dir *s,
+						  const char *name)
+{
+	validate_subpart_dir(s, name, 0);
+}
+
+static void validate_subpart_dir_with_checksum(struct subpart_dir *s,
+					       const char *name)
+{
+	validate_subpart_dir(s, name, 1);
+}
+
+static void parse_subpart_dir(struct buffer *subpart_dir_buf,
+			      struct buffer *input_buf, const char *name)
+{
+	struct subpart_dir_header hdr;
+	size_t offset = 0;
+	uint8_t *data = buffer_get(input_buf);
+	size_t size = buffer_size(input_buf);
+
+	/* Read Subpart_Dir header */
+	assert(size >= SUBPART_DIR_HEADER_SIZE);
+	offset = read_member(data, offset, sizeof(hdr.marker), &hdr.marker);
+	offset = read_member(data, offset, sizeof(hdr.num_entries),
+			     &hdr.num_entries);
+	offset = read_member(data, offset, sizeof(hdr.header_version),
+			     &hdr.header_version);
+	offset = read_member(data, offset, sizeof(hdr.entry_version),
+			     &hdr.entry_version);
+	offset = read_member(data, offset, sizeof(hdr.header_length),
+			     &hdr.header_length);
+	offset = read_member(data, offset, sizeof(hdr.checksum), &hdr.checksum);
+	memcpy(hdr.name, data + offset, sizeof(hdr.name));
+	offset += sizeof(hdr.name);
+
+	validate_subpart_dir_without_checksum((struct subpart_dir *)&hdr, name);
+
+	assert(size > subpart_dir_size(&hdr));
+	alloc_buffer(subpart_dir_buf, subpart_dir_size(&hdr), "Subpart Dir");
+	memcpy(buffer_get(subpart_dir_buf), &hdr, SUBPART_DIR_HEADER_SIZE);
+
+	/* Read Subpart Dir entries */
+	struct subpart_dir *subpart_dir = buffer_get(subpart_dir_buf);
+	struct subpart_dir_entry *e = &subpart_dir->e[0];
+	uint32_t i;
+
+	for (i = 0; i < hdr.num_entries; i++) {
+		memcpy(e[i].name, data + offset, sizeof(e[i].name));
+		offset += sizeof(e[i].name);
+		offset = read_member(data, offset, sizeof(e[i].offset),
+				     &e[i].offset);
+		offset = read_member(data, offset, sizeof(e[i].length),
+				     &e[i].length);
+		offset = read_member(data, offset, sizeof(e[i].rsvd),
+				     &e[i].rsvd);
+	}
+
+	validate_subpart_dir_with_checksum(subpart_dir, name);
+
+	print_subpart_dir(subpart_dir);
+}
+
+/* Parse input image file to identify different sub-partitions */
+static int ifwi_parse(void)
+{
+	struct buffer *buff = &ifwi_image.input_buff;
+	const char *image_name = param.image_name;
+
+	DEBUG("Parsing IFWI image...\n");
+
+	/* Read input file */
+	if (buffer_from_file(buff, image_name)) {
+		ERROR("Failed to read input file %s.\n", image_name);
+		return -1;
+	}
+
+	INFO("Buffer %p size 0x%zx\n", buff->data, buff->size);
+
+	/* Look for BPDT signature at 4K intervals */
+	size_t offset = 0;
+	void *data = buffer_get(buff);
+
+	while (offset < buffer_size(buff)) {
+		if (read_at_le32(data, offset) == BPDT_SIGNATURE)
+			break;
+		offset += 4 * KiB;
+	}
+
+	if (offset >= buffer_size(buff)) {
+		ERROR("Image does not contain BPDT!!\n");
+		return -1;
+	}
+
+	ifwi_image.input_ifwi_start_offset = offset;
+	INFO("BPDT starts at offset 0x%zx.\n", offset);
+
+	data = (uint8_t *)data + offset;
+	size_t ifwi_size = buffer_size(buff) - offset;
+
+	/* Read BPDT and sub-partitions */
+	uintptr_t end_offset;
+
+	end_offset = ifwi_image.input_ifwi_start_offset +
+		alloc_bpdt_buffer(data, ifwi_size, 0, &ifwi_image.bpdt, "BPDT");
+
+	/* Parse S-BPDT, if any */
+	parse_sbpdt(data, ifwi_size);
+
+	/*
+	 * Store end offset of IFWI. Required for copying any trailing non-IFWI
+	 * part of the image.
+	 * ASSUMPTION: IFWI image always ends on a 4K boundary.
+	 */
+	ifwi_image.input_ifwi_end_offset = ALIGN(end_offset, 4 * KiB);
+	DEBUG("Parsing done.\n");
+
+	return 0;
+}
+
+/*
+ * This function is used by repack to count the number of BPDT and S-BPDT
+ * entries that are present. It frees the current buffers used by the entries
+ * and allocates fresh buffers that can be used for repacking. Returns BPDT
+ * entries which are empty and need to be filled in.
+ */
+static void __bpdt_reset(struct buffer *b, size_t count, size_t size)
+{
+	size_t bpdt_size = BPDT_HEADER_SIZE + count * BPDT_ENTRY_SIZE;
+
+	assert(size >= bpdt_size);
+
+	/*
+	 * If buffer does not have the required size, allocate a fresh buffer.
+	 */
+	if (buffer_size(b) != size) {
+		struct buffer temp;
+
+		alloc_buffer(&temp, size, b->name);
+		memcpy(buffer_get(&temp), buffer_get(b), buffer_size(b));
+		buffer_delete(b);
+		*b = temp;
+	}
+
+	struct bpdt *bpdt = buffer_get(b);
+	uint8_t *ptr = (uint8_t *)&bpdt->e[0];
+	size_t entries_size = BPDT_ENTRY_SIZE * count;
+
+	/* Zero out BPDT entries */
+	memset(ptr, 0, entries_size);
+	/* Fill any pad-space with FF */
+	memset(ptr + entries_size, 0xFF, size - bpdt_size);
+
+	bpdt->h.descriptor_count = count;
+}
+
+static void bpdt_reset(void)
+{
+	size_t i;
+	size_t bpdt_count = 0, sbpdt_count = 0, dummy_bpdt_count = 0;
+
+	/* Count number of BPDT and S-BPDT entries */
+	for (i = 0; i < MAX_SUBPARTS; i++) {
+		if (buffer_size(&ifwi_image.subpart_buf[i]) == 0) {
+			if (subparts[i].attr & MANDATORY_BPDT_ENTRY) {
+				bpdt_count++;
+				dummy_bpdt_count++;
+			}
+			continue;
+		}
+
+		if (subparts[i].attr & NON_CRITICAL_SUBPART)
+			sbpdt_count++;
+		else
+			bpdt_count++;
+	}
+
+	DEBUG("Count: BPDT = %zd, Dummy BPDT = %zd, S-BPDT = %zd\n", bpdt_count,
+	      dummy_bpdt_count, sbpdt_count);
+
+	/* Update BPDT if required */
+	size_t bpdt_size = max(BPDT_MIN_SIZE,
+			       BPDT_HEADER_SIZE + bpdt_count * BPDT_ENTRY_SIZE);
+	__bpdt_reset(&ifwi_image.bpdt, bpdt_count, bpdt_size);
+
+	/* Update S-BPDT if required */
+	bpdt_size = ALIGN(BPDT_HEADER_SIZE + sbpdt_count * BPDT_ENTRY_SIZE,
+			  4 * KiB);
+	__bpdt_reset(&ifwi_image.subpart_buf[S_BPDT_TYPE], sbpdt_count,
+		     bpdt_size);
+}
+
+/* Initialize BPDT entries in header order */
+static void bpdt_entries_init_header_order(void)
+{
+	int i, type;
+	size_t size;
+
+	struct bpdt *bpdt, *sbpdt, *curr;
+	size_t bpdt_curr = 0, sbpdt_curr = 0, *count_ptr;
+
+	bpdt = buffer_get(&ifwi_image.bpdt);
+	sbpdt = buffer_get(&ifwi_image.subpart_buf[S_BPDT_TYPE]);
+
+	for (i = 0; i < MAX_SUBPARTS; i++) {
+		type = bpdt_header_order[i];
+		size = buffer_size(&ifwi_image.subpart_buf[type]);
+
+		if (size == 0 && !(subparts[type].attr & MANDATORY_BPDT_ENTRY))
+			continue;
+
+		if (subparts[type].attr & NON_CRITICAL_SUBPART) {
+			curr = sbpdt;
+			count_ptr = &sbpdt_curr;
+		} else {
+			curr = bpdt;
+			count_ptr = &bpdt_curr;
+		}
+
+		assert(*count_ptr < curr->h.descriptor_count);
+		curr->e[*count_ptr].type = type;
+		curr->e[*count_ptr].flags = 0;
+		curr->e[*count_ptr].offset = 0;
+		curr->e[*count_ptr].size = size;
+
+		(*count_ptr)++;
+	}
+}
+
+static void pad_buffer(struct buffer *b, size_t size)
+{
+	size_t buff_size = buffer_size(b);
+
+	assert(buff_size <= size);
+
+	if (buff_size == size)
+		return;
+
+	struct buffer temp;
+
+	alloc_buffer(&temp, size, b->name);
+	uint8_t *data = buffer_get(&temp);
+
+	memcpy(data, buffer_get(b), buff_size);
+	memset(data + buff_size, 0xFF, size - buff_size);
+
+	*b = temp;
+}
+
+/* Initialize offsets of entries using pack order */
+static void bpdt_entries_init_pack_order(void)
+{
+	int i, type;
+	struct bpdt_entry *curr;
+	size_t curr_offset, curr_end;
+
+	curr_offset = max(BPDT_MIN_SIZE, buffer_size(&ifwi_image.bpdt));
+
+	/*
+	 * There are two types of sub-partitions that need to be handled here:
+	 *   1. Sub-partitions that lie within the same 4K as BPDT
+	 *   2. Sub-partitions that lie outside the 4K of BPDT
+	 *
+	 * For sub-partitions of type # 1, there is no requirement on the start
+	 * or end of the sub-partition. They need to be packed in without any
+	 * holes left in between. If there is any empty space left after the end
+	 * of the last sub-partition in 4K of BPDT, then that space needs to be
+	 * padded with FF bytes, but the size of the last sub-partition remains
+	 * unchanged.
+	 *
+	 * For sub-partitions of type # 2, both the start and end should be a
+	 * multiple of 4K. If not, then it needs to be padded with FF bytes and
+	 * size adjusted such that the sub-partition ends on 4K boundary.
+	 */
+
+	/* #1 Sub-partitions that lie within same 4K as BPDT */
+	struct buffer *last_bpdt_buff = &ifwi_image.bpdt;
+
+	for (i = 0; i < MAX_SUBPARTS; i++) {
+		type = bpdt_pack_order[i];
+		curr = find_entry_by_type(type);
+
+		if (!curr || curr->size == 0)
+			continue;
+
+		if (!(subparts[type].attr & LIES_WITHIN_BPDT_4K))
+			continue;
+
+		curr->offset = curr_offset;
+		curr_offset = curr->offset + curr->size;
+		last_bpdt_buff = &ifwi_image.subpart_buf[type];
+		DEBUG("type=%d, curr_offset=0x%zx, curr->offset=0x%x, curr->size=0x%x, buff_size=0x%zx\n",
+		      type, curr_offset, curr->offset, curr->size,
+		      buffer_size(&ifwi_image.subpart_buf[type]));
+	}
+
+	/* Pad ff bytes if there is any empty space left in BPDT 4K */
+	curr_end = ALIGN(curr_offset, 4 * KiB);
+	pad_buffer(last_bpdt_buff,
+		   buffer_size(last_bpdt_buff) + (curr_end - curr_offset));
+	curr_offset = curr_end;
+
+	/* #2 Sub-partitions that lie outside of BPDT 4K */
+	for (i = 0; i < MAX_SUBPARTS; i++) {
+		type = bpdt_pack_order[i];
+		curr = find_entry_by_type(type);
+
+		if (!curr || curr->size == 0)
+			continue;
+
+		if (subparts[type].attr & LIES_WITHIN_BPDT_4K)
+			continue;
+
+		assert(curr_offset == ALIGN(curr_offset, 4 * KiB));
+		curr->offset = curr_offset;
+		curr_end = ALIGN(curr->offset + curr->size, 4 * KiB);
+		curr->size = curr_end - curr->offset;
+
+		pad_buffer(&ifwi_image.subpart_buf[type], curr->size);
+
+		curr_offset = curr_end;
+		DEBUG("type=%d, curr_offset=0x%zx, curr->offset=0x%x, curr->size=0x%x, buff_size=0x%zx\n",
+		      type, curr_offset, curr->offset, curr->size,
+		      buffer_size(&ifwi_image.subpart_buf[type]));
+	}
+
+	/*
+	 * Update size of S-BPDT to include size of all non-critical
+	 * sub-partitions.
+	 *
+	 * Assumption: S-BPDT always lies at the end of IFWI image.
+	 */
+	curr = find_entry_by_type(S_BPDT_TYPE);
+	assert(curr);
+
+	assert(curr_offset == ALIGN(curr_offset, 4 * KiB));
+	curr->size = curr_offset - curr->offset;
+}
+
+/* Convert all members of BPDT to little-endian format */
+static void bpdt_fixup_write_buffer(struct buffer *buf)
+{
+	struct bpdt *s = buffer_get(buf);
+
+	struct bpdt_header *h = &s->h;
+	struct bpdt_entry *e = &s->e[0];
+
+	size_t count = h->descriptor_count;
+
+	size_t offset = 0;
+
+	offset = fix_member(&h->signature, offset, sizeof(h->signature));
+	offset = fix_member(&h->descriptor_count, offset,
+			    sizeof(h->descriptor_count));
+	offset = fix_member(&h->bpdt_version, offset, sizeof(h->bpdt_version));
+	offset = fix_member(&h->xor_redundant_block, offset,
+			    sizeof(h->xor_redundant_block));
+	offset = fix_member(&h->ifwi_version, offset, sizeof(h->ifwi_version));
+	offset = fix_member(&h->fit_tool_version, offset,
+			    sizeof(h->fit_tool_version));
+
+	uint32_t i;
+
+	for (i = 0; i < count; i++) {
+		offset = fix_member(&e[i].type, offset, sizeof(e[i].type));
+		offset = fix_member(&e[i].flags, offset, sizeof(e[i].flags));
+		offset = fix_member(&e[i].offset, offset, sizeof(e[i].offset));
+		offset = fix_member(&e[i].size, offset, sizeof(e[i].size));
+	}
+}
+
+/* Write BPDT to output buffer after fixup */
+static void bpdt_write(struct buffer *dst, size_t offset, struct buffer *src)
+{
+	bpdt_fixup_write_buffer(src);
+	memcpy(buffer_get(dst) + offset, buffer_get(src), buffer_size(src));
+}
+
+/*
+ * Follows these steps to re-create image:
+ * 1. Write any non-IFWI prefix.
+ * 2. Write out BPDT header and entries.
+ * 3. Write sub-partition buffers to respective offsets.
+ * 4. Write any non-IFWI suffix.
+ *
+ * While performing the above steps, make sure that any empty holes are filled
+ * with FF.
+ */
+static void ifwi_write(const char *image_name)
+{
+	struct bpdt_entry *s = find_entry_by_type(S_BPDT_TYPE);
+
+	assert(s);
+
+	size_t ifwi_start, ifwi_end, file_end;
+
+	ifwi_start = ifwi_image.input_ifwi_start_offset;
+	ifwi_end = ifwi_start + ALIGN(s->offset + s->size, 4 * KiB);
+	file_end = ifwi_end + (buffer_size(&ifwi_image.input_buff) -
+			       ifwi_image.input_ifwi_end_offset);
+
+	struct buffer b;
+
+	alloc_buffer(&b, file_end, "Final-IFWI");
+
+	uint8_t *input_data = buffer_get(&ifwi_image.input_buff);
+	uint8_t *output_data = buffer_get(&b);
+
+	DEBUG("ifwi_start:0x%zx, ifwi_end:0x%zx, file_end:0x%zx\n", ifwi_start,
+	      ifwi_end, file_end);
+
+	/* Copy non-IFWI prefix, if any */
+	memcpy(output_data, input_data, ifwi_start);
+
+	DEBUG("Copied non-IFWI prefix (offset=0x0, size=0x%zx).\n", ifwi_start);
+
+	struct buffer ifwi;
+
+	buffer_splice(&ifwi, &b, ifwi_start, ifwi_end - ifwi_start);
+	uint8_t *ifwi_data = buffer_get(&ifwi);
+
+	/* Copy sub-partitions using pack_order */
+	struct bpdt_entry *curr;
+	struct buffer *subpart_buf;
+	int i, type;
+
+	for (i = 0; i < MAX_SUBPARTS; i++) {
+		type = bpdt_pack_order[i];
+
+		if (type == S_BPDT_TYPE)
+			continue;
+
+		curr = find_entry_by_type(type);
+
+		if (!curr || !curr->size)
+			continue;
+
+		subpart_buf = &ifwi_image.subpart_buf[type];
+
+		DEBUG("curr->offset=0x%x, curr->size=0x%x, type=%d, write_size=0x%zx\n",
+		      curr->offset, curr->size, type, buffer_size(subpart_buf));
+
+		assert((curr->offset + buffer_size(subpart_buf)) <=
+		       buffer_size(&ifwi));
+
+		memcpy(ifwi_data + curr->offset, buffer_get(subpart_buf),
+		       buffer_size(subpart_buf));
+	}
+
+	/* Copy non-IFWI suffix, if any */
+	if (ifwi_end != file_end) {
+		memcpy(output_data + ifwi_end,
+		       input_data + ifwi_image.input_ifwi_end_offset,
+		       file_end - ifwi_end);
+		DEBUG("Copied non-IFWI suffix (offset=0x%zx,size=0x%zx).\n",
+		      ifwi_end, file_end - ifwi_end);
+	}
+
+	/*
+	 * Convert BPDT to little-endian format and write it to output buffer.
+	 * S-BPDT is written first and then BPDT.
+	 */
+	bpdt_write(&ifwi, s->offset, &ifwi_image.subpart_buf[S_BPDT_TYPE]);
+	bpdt_write(&ifwi, 0, &ifwi_image.bpdt);
+
+	if (buffer_write_file(&b, image_name)) {
+		ERROR("File write error\n");
+		exit(-1);
+	}
+
+	buffer_delete(&b);
+	printf("Image written successfully to %s.\n", image_name);
+}
+
+/*
+ * Calculate size and offset of each sub-partition again since it might have
+ * changed because of add/delete operation. Also, re-create BPDT and S-BPDT
+ * entries and write back the new IFWI image to file.
+ */
+static void ifwi_repack(void)
+{
+	bpdt_reset();
+	bpdt_entries_init_header_order();
+	bpdt_entries_init_pack_order();
+
+	struct bpdt *b = buffer_get(&ifwi_image.bpdt);
+
+	bpdt_print_entries(&b->e[0], b->h.descriptor_count, "BPDT");
+
+	b = buffer_get(&ifwi_image.subpart_buf[S_BPDT_TYPE]);
+	bpdt_print_entries(&b->e[0], b->h.descriptor_count, "S-BPDT");
+
+	DEBUG("Repack done.. writing image.\n");
+	ifwi_write(param.image_name);
+}
+
+static void init_subpart_dir_header(struct subpart_dir_header *hdr,
+				    size_t count, const char *name)
+{
+	memset(hdr, 0, sizeof(*hdr));
+
+	hdr->marker = SUBPART_DIR_MARKER;
+	hdr->num_entries = count;
+	hdr->header_version = SUBPART_DIR_HEADER_VERSION_SUPPORTED;
+	hdr->entry_version = SUBPART_DIR_ENTRY_VERSION_SUPPORTED;
+	hdr->header_length = SUBPART_DIR_HEADER_SIZE;
+	memcpy(hdr->name, name, sizeof(hdr->name));
+}
+
+static size_t init_subpart_dir_entry(struct subpart_dir_entry *e,
+				     struct buffer *b, size_t offset)
+{
+	memset(e, 0, sizeof(*e));
+
+	assert(strlen(b->name) <= sizeof(e->name));
+	strncpy((char *)e->name, (char *)b->name, sizeof(e->name));
+	e->offset = offset;
+	e->length = buffer_size(b);
+
+	return (offset + buffer_size(b));
+}
+
+static void init_manifest_header(struct manifest_header *hdr, size_t size)
+{
+	memset(hdr, 0, sizeof(*hdr));
+
+	hdr->header_type = 0x4;
+	assert((MANIFEST_HDR_SIZE % DWORD_SIZE) == 0);
+	hdr->header_length = MANIFEST_HDR_SIZE / DWORD_SIZE;
+	hdr->header_version = 0x10000;
+	hdr->vendor = 0x8086;
+
+	struct tm *local_time;
+	time_t curr_time;
+	char buffer[11];
+
+	curr_time = time(NULL);
+	local_time = localtime(&curr_time);
+	strftime(buffer, sizeof(buffer), "0x%Y%m%d", local_time);
+	hdr->date = strtoul(buffer, NULL, 16);
+
+	assert((size % DWORD_SIZE) == 0);
+	hdr->size = size / DWORD_SIZE;
+	hdr->id = MANIFEST_ID_MAGIC;
+}
+
+static void init_signed_pkg_info_ext(struct signed_pkg_info_ext *ext,
+				     size_t count, const char *name)
+{
+	memset(ext, 0, sizeof(*ext));
+
+	ext->ext_type = SIGNED_PKG_INFO_EXT_TYPE;
+	ext->ext_length = SIGNED_PKG_INFO_EXT_SIZE + count * MODULE_SIZE;
+	memcpy(ext->name, name, sizeof(ext->name));
+}
+
+static void subpart_dir_fixup_write_buffer(struct buffer *buf)
+{
+	struct subpart_dir *s = buffer_get(buf);
+	struct subpart_dir_header *h = &s->h;
+	struct subpart_dir_entry *e = &s->e[0];
+
+	size_t count = h->num_entries;
+	size_t offset = 0;
+
+	offset = fix_member(&h->marker, offset, sizeof(h->marker));
+	offset = fix_member(&h->num_entries, offset, sizeof(h->num_entries));
+	offset = fix_member(&h->header_version, offset,
+			    sizeof(h->header_version));
+	offset = fix_member(&h->entry_version, offset,
+			    sizeof(h->entry_version));
+	offset = fix_member(&h->header_length, offset,
+			    sizeof(h->header_length));
+	offset = fix_member(&h->checksum, offset, sizeof(h->checksum));
+	offset += sizeof(h->name);
+
+	uint32_t i;
+
+	for (i = 0; i < count; i++) {
+		offset += sizeof(e[i].name);
+		offset = fix_member(&e[i].offset, offset, sizeof(e[i].offset));
+		offset = fix_member(&e[i].length, offset, sizeof(e[i].length));
+		offset = fix_member(&e[i].rsvd, offset, sizeof(e[i].rsvd));
+	}
+}
+
+static void create_subpart(struct buffer *dst, struct buffer *info[],
+			   size_t count, const char *name)
+{
+	struct buffer subpart_dir_buff;
+	size_t size = SUBPART_DIR_HEADER_SIZE + count * SUBPART_DIR_ENTRY_SIZE;
+
+	alloc_buffer(&subpart_dir_buff, size, "subpart-dir");
+
+	struct subpart_dir_header *h = buffer_get(&subpart_dir_buff);
+	struct subpart_dir_entry *e = (struct subpart_dir_entry *)(h + 1);
+
+	init_subpart_dir_header(h, count, name);
+
+	size_t curr_offset = size;
+	size_t i;
+
+	for (i = 0; i < count; i++) {
+		curr_offset = init_subpart_dir_entry(&e[i], info[i],
+						     curr_offset);
+	}
+
+	alloc_buffer(dst, curr_offset, name);
+	uint8_t *data = buffer_get(dst);
+
+	for (i = 0; i < count; i++) {
+		memcpy(data + e[i].offset, buffer_get(info[i]),
+		       buffer_size(info[i]));
+	}
+
+	h->checksum = calc_checksum(buffer_get(&subpart_dir_buff));
+
+	struct subpart_dir *dir = buffer_get(&subpart_dir_buff);
+
+	print_subpart_dir(dir);
+
+	subpart_dir_fixup_write_buffer(&subpart_dir_buff);
+	memcpy(data, dir, buffer_size(&subpart_dir_buff));
+
+	buffer_delete(&subpart_dir_buff);
+}
+
+static enum ifwi_ret ibbp_dir_add(int type)
+{
+	struct buffer manifest;
+	struct signed_pkg_info_ext *ext;
+	struct buffer ibbl;
+	struct buffer ibb;
+
+#define DUMMY_IBB_SIZE			(4 * KiB)
+
+	assert(type == IBB_TYPE);
+
+	/*
+	 * Entry # 1 - IBBP.man
+	 * Contains manifest header and signed pkg info extension.
+	 */
+	size_t size = MANIFEST_HDR_SIZE + SIGNED_PKG_INFO_EXT_SIZE;
+
+	alloc_buffer(&manifest, size, "IBBP.man");
+
+	struct manifest_header *man_hdr = buffer_get(&manifest);
+
+	init_manifest_header(man_hdr, size);
+
+	ext = (struct signed_pkg_info_ext *)(man_hdr + 1);
+
+	init_signed_pkg_info_ext(ext, 0, subparts[type].name);
+
+	/* Entry # 2 - IBBL */
+	if (buffer_from_file(&ibbl, param.file_name))
+		return COMMAND_ERR;
+
+	/* Entry # 3 - IBB */
+	alloc_buffer(&ibb, DUMMY_IBB_SIZE, "IBB");
+	memset(buffer_get(&ibb), 0xFF, DUMMY_IBB_SIZE);
+
+	/* Create subpartition */
+	struct buffer *info[] = {
+		&manifest, &ibbl, &ibb,
+	};
+	create_subpart(&ifwi_image.subpart_buf[type], &info[0],
+		       ARRAY_SIZE(info), subparts[type].name);
+
+	return REPACK_REQUIRED;
+}
+
+static enum ifwi_ret ifwi_raw_add(int type)
+{
+	if (buffer_from_file(&ifwi_image.subpart_buf[type], param.file_name))
+		return COMMAND_ERR;
+
+	printf("Sub-partition %s(%d) added from file %s.\n", param.subpart_name,
+	       type, param.file_name);
+	return REPACK_REQUIRED;
+}
+
+static enum ifwi_ret ifwi_dir_add(int type)
+{
+	if (!(subparts[type].attr & CONTAINS_DIR) ||
+	    !subparts[type].dir_ops.dir_add) {
+		ERROR("Sub-Partition %s(%d) does not support dir ops.\n",
+		      subparts[type].name, type);
+		return COMMAND_ERR;
+	}
+
+	if (!param.dentry_name) {
+		ERROR("%s: -e option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	enum ifwi_ret ret = subparts[type].dir_ops.dir_add(type);
+
+	if (ret != COMMAND_ERR)
+		printf("Sub-partition %s(%d) entry %s added from file %s.\n",
+		       param.subpart_name, type, param.dentry_name,
+		       param.file_name);
+	else
+		ERROR("Sub-partition dir operation failed.\n");
+
+	return ret;
+}
+
+static enum ifwi_ret ifwi_add(void)
+{
+	if (!param.file_name) {
+		ERROR("%s: -f option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	if (!param.subpart_name) {
+		ERROR("%s: -n option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	int type = find_type_by_name(param.subpart_name);
+
+	if (type == -1)
+		return COMMAND_ERR;
+
+	const struct subpart_info *curr_subpart = &subparts[type];
+
+	if (curr_subpart->attr & AUTO_GENERATED) {
+		ERROR("Cannot add auto-generated sub-partitions.\n");
+		return COMMAND_ERR;
+	}
+
+	if (buffer_size(&ifwi_image.subpart_buf[type])) {
+		ERROR("Image already contains sub-partition %s(%d).\n",
+		      param.subpart_name, type);
+		return COMMAND_ERR;
+	}
+
+	if (param.dir_ops)
+		return ifwi_dir_add(type);
+
+	return ifwi_raw_add(type);
+}
+
+static enum ifwi_ret ifwi_delete(void)
+{
+	if (!param.subpart_name) {
+		ERROR("%s: -n option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	int type = find_type_by_name(param.subpart_name);
+
+	if (type == -1)
+		return COMMAND_ERR;
+
+	const struct subpart_info *curr_subpart = &subparts[type];
+
+	if (curr_subpart->attr & AUTO_GENERATED) {
+		ERROR("Cannot delete auto-generated sub-partitions.\n");
+		return COMMAND_ERR;
+	}
+
+	if (buffer_size(&ifwi_image.subpart_buf[type]) == 0) {
+		printf("Image does not contain sub-partition %s(%d).\n",
+		       param.subpart_name, type);
+		return NO_ACTION_REQUIRED;
+	}
+
+	buffer_delete(&ifwi_image.subpart_buf[type]);
+	printf("Sub-Partition %s(%d) deleted.\n", subparts[type].name, type);
+	return REPACK_REQUIRED;
+}
+
+static enum ifwi_ret ifwi_dir_extract(int type)
+{
+	if (!(subparts[type].attr & CONTAINS_DIR)) {
+		ERROR("Sub-Partition %s(%d) does not support dir ops.\n",
+		      subparts[type].name, type);
+		return COMMAND_ERR;
+	}
+
+	if (!param.dentry_name) {
+		ERROR("%s: -e option required.\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	struct buffer subpart_dir_buff;
+
+	parse_subpart_dir(&subpart_dir_buff, &ifwi_image.subpart_buf[type],
+			  subparts[type].name);
+
+	uint32_t i;
+	struct subpart_dir *s = buffer_get(&subpart_dir_buff);
+
+	for (i = 0; i < s->h.num_entries; i++) {
+		if (!strncmp((char *)s->e[i].name, param.dentry_name,
+			     sizeof(s->e[i].name)))
+			break;
+	}
+
+	if (i == s->h.num_entries) {
+		ERROR("Entry %s not found in subpartition for %s.\n",
+		      param.dentry_name, param.subpart_name);
+		exit(-1);
+	}
+
+	struct buffer dst;
+
+	DEBUG("Splicing buffer at 0x%x size 0x%x\n", s->e[i].offset,
+	      s->e[i].length);
+	buffer_splice(&dst, &ifwi_image.subpart_buf[type], s->e[i].offset,
+		      s->e[i].length);
+
+	if (buffer_write_file(&dst, param.file_name))
+		return COMMAND_ERR;
+
+	printf("Sub-Partition %s(%d), entry(%s) stored in %s.\n",
+	       param.subpart_name, type, param.dentry_name, param.file_name);
+
+	return NO_ACTION_REQUIRED;
+}
+
+static enum ifwi_ret ifwi_raw_extract(int type)
+{
+	if (buffer_write_file(&ifwi_image.subpart_buf[type], param.file_name))
+		return COMMAND_ERR;
+
+	printf("Sub-Partition %s(%d) stored in %s.\n", param.subpart_name, type,
+	       param.file_name);
+
+	return NO_ACTION_REQUIRED;
+}
+
+static enum ifwi_ret ifwi_extract(void)
+{
+	if (!param.file_name) {
+		ERROR("%s: -f option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	if (!param.subpart_name) {
+		ERROR("%s: -n option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	int type = find_type_by_name(param.subpart_name);
+
+	if (type == -1)
+		return COMMAND_ERR;
+
+	if (type == S_BPDT_TYPE) {
+		INFO("Tool does not support raw extract for %s\n",
+		     param.subpart_name);
+		return NO_ACTION_REQUIRED;
+	}
+
+	if (buffer_size(&ifwi_image.subpart_buf[type]) == 0) {
+		ERROR("Image does not contain sub-partition %s(%d).\n",
+		      param.subpart_name, type);
+		return COMMAND_ERR;
+	}
+
+	INFO("Extracting sub-partition %s(%d).\n", param.subpart_name, type);
+	if (param.dir_ops)
+		return ifwi_dir_extract(type);
+
+	return ifwi_raw_extract(type);
+}
+
+static enum ifwi_ret ifwi_print(void)
+{
+	verbose += 2;
+
+	struct bpdt *b = buffer_get(&ifwi_image.bpdt);
+
+	bpdt_print_header(&b->h, "BPDT");
+	bpdt_print_entries(&b->e[0], b->h.descriptor_count, "BPDT");
+
+	b = buffer_get(&ifwi_image.subpart_buf[S_BPDT_TYPE]);
+	bpdt_print_header(&b->h, "S-BPDT");
+	bpdt_print_entries(&b->e[0], b->h.descriptor_count, "S-BPDT");
+
+	if (param.dir_ops == 0) {
+		verbose -= 2;
+		return NO_ACTION_REQUIRED;
+	}
+
+	int i;
+	struct buffer subpart_dir_buf;
+
+	for (i = 0; i < MAX_SUBPARTS ; i++) {
+		if (!(subparts[i].attr & CONTAINS_DIR) ||
+		    (buffer_size(&ifwi_image.subpart_buf[i]) == 0))
+			continue;
+
+		parse_subpart_dir(&subpart_dir_buf, &ifwi_image.subpart_buf[i],
+				  subparts[i].name);
+		buffer_delete(&subpart_dir_buf);
+	}
+
+	verbose -= 2;
+
+	return NO_ACTION_REQUIRED;
+}
+
+static enum ifwi_ret ifwi_raw_replace(int type)
+{
+	buffer_delete(&ifwi_image.subpart_buf[type]);
+	return ifwi_raw_add(type);
+}
+
+static enum ifwi_ret ifwi_dir_replace(int type)
+{
+	if (!(subparts[type].attr & CONTAINS_DIR)) {
+		ERROR("Sub-Partition %s(%d) does not support dir ops.\n",
+		      subparts[type].name, type);
+		return COMMAND_ERR;
+	}
+
+	if (!param.dentry_name) {
+		ERROR("%s: -e option required.\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	struct buffer subpart_dir_buf;
+
+	parse_subpart_dir(&subpart_dir_buf, &ifwi_image.subpart_buf[type],
+			  subparts[type].name);
+
+	uint32_t i;
+	struct subpart_dir *s = buffer_get(&subpart_dir_buf);
+
+	for (i = 0; i < s->h.num_entries; i++) {
+		if (!strcmp((char *)s->e[i].name, param.dentry_name))
+			break;
+	}
+
+	if (i == s->h.num_entries) {
+		ERROR("Entry %s not found in subpartition for %s.\n",
+		      param.dentry_name, param.subpart_name);
+		exit(-1);
+	}
+
+	struct buffer b;
+
+	if (buffer_from_file(&b, param.file_name)) {
+		ERROR("Failed to read %s\n", param.file_name);
+		exit(-1);
+	}
+
+	struct buffer dst;
+	size_t dst_size = buffer_size(&ifwi_image.subpart_buf[type]) +
+				      buffer_size(&b) - s->e[i].length;
+	size_t subpart_start = s->e[i].offset;
+	size_t subpart_end = s->e[i].offset + s->e[i].length;
+
+	alloc_buffer(&dst, dst_size, ifwi_image.subpart_buf[type].name);
+
+	uint8_t *src_data = buffer_get(&ifwi_image.subpart_buf[type]);
+	uint8_t *dst_data = buffer_get(&dst);
+	size_t curr_offset = 0;
+
+	/* Copy data before the sub-partition entry */
+	memcpy(dst_data + curr_offset, src_data, subpart_start);
+	curr_offset += subpart_start;
+
+	/* Copy sub-partition entry */
+	memcpy(dst_data + curr_offset, buffer_get(&b), buffer_size(&b));
+	curr_offset += buffer_size(&b);
+
+	/* Copy remaining data */
+	memcpy(dst_data + curr_offset, src_data + subpart_end,
+	       buffer_size(&ifwi_image.subpart_buf[type]) - subpart_end);
+
+	/* Update sub-partition buffer */
+	int offset = s->e[i].offset;
+
+	buffer_delete(&ifwi_image.subpart_buf[type]);
+	ifwi_image.subpart_buf[type] = dst;
+
+	/* Update length of entry in the subpartition */
+	s->e[i].length = buffer_size(&b);
+	buffer_delete(&b);
+
+	/* Adjust offsets of affected entries in subpartition */
+	offset = s->e[i].offset - offset;
+	for (; i < s->h.num_entries; i++)
+		s->e[i].offset += offset;
+
+	/* Re-calculate checksum */
+	s->h.checksum = calc_checksum(s);
+
+	/* Convert members to litte-endian */
+	subpart_dir_fixup_write_buffer(&subpart_dir_buf);
+
+	memcpy(dst_data, buffer_get(&subpart_dir_buf),
+	       buffer_size(&subpart_dir_buf));
+
+	buffer_delete(&subpart_dir_buf);
+
+	printf("Sub-partition %s(%d) entry %s replaced from file %s.\n",
+	       param.subpart_name, type, param.dentry_name, param.file_name);
+
+	return REPACK_REQUIRED;
+}
+
+static enum ifwi_ret ifwi_replace(void)
+{
+	if (!param.file_name) {
+		ERROR("%s: -f option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	if (!param.subpart_name) {
+		ERROR("%s: -n option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	int type = find_type_by_name(param.subpart_name);
+
+	if (type == -1)
+		return COMMAND_ERR;
+
+	const struct subpart_info *curr_subpart = &subparts[type];
+
+	if (curr_subpart->attr & AUTO_GENERATED) {
+		ERROR("Cannot replace auto-generated sub-partitions.\n");
+		return COMMAND_ERR;
+	}
+
+	if (buffer_size(&ifwi_image.subpart_buf[type]) == 0) {
+		ERROR("Image does not contain sub-partition %s(%d).\n",
+		      param.subpart_name, type);
+		return COMMAND_ERR;
+	}
+
+	if (param.dir_ops)
+		return ifwi_dir_replace(type);
+
+	return ifwi_raw_replace(type);
+}
+
+static enum ifwi_ret ifwi_create(void)
+{
+	/*
+	 * Create peels off any non-IFWI content present in the input buffer and
+	 * creates output file with only the IFWI present.
+	 */
+
+	if (!param.file_name) {
+		ERROR("%s: -f option required\n", __func__);
+		return COMMAND_ERR;
+	}
+
+	/* Peel off any non-IFWI prefix */
+	buffer_seek(&ifwi_image.input_buff,
+		    ifwi_image.input_ifwi_start_offset);
+	/* Peel off any non-IFWI suffix */
+	buffer_set_size(&ifwi_image.input_buff,
+			ifwi_image.input_ifwi_end_offset -
+			ifwi_image.input_ifwi_start_offset);
+
+	/*
+	 * Adjust start and end offset of IFWI now that non-IFWI prefix is gone.
+	 */
+	ifwi_image.input_ifwi_end_offset -= ifwi_image.input_ifwi_start_offset;
+	ifwi_image.input_ifwi_start_offset = 0;
+
+	param.image_name = param.file_name;
+
+	return REPACK_REQUIRED;
+}
+
+struct command {
+	const char *name;
+	const char *optstring;
+	enum ifwi_ret (*function)(void);
+};
+
+static const struct command commands[] = {
+	{"add", "f:n:e:dvh?", ifwi_add},
+	{"create", "f:vh?", ifwi_create},
+	{"delete", "f:n:vh?", ifwi_delete},
+	{"extract", "f:n:e:dvh?", ifwi_extract},
+	{"print", "dh?", ifwi_print},
+	{"replace", "f:n:e:dvh?", ifwi_replace},
+};
+
+static struct option long_options[] = {
+	{"subpart_dentry",  required_argument, 0, 'e'},
+	{"file",	    required_argument, 0, 'f'},
+	{"help",	    required_argument, 0, 'h'},
+	{"name",	    required_argument, 0, 'n'},
+	{"dir_ops",         no_argument,       0, 'd'},
+	{"verbose",	    no_argument,       0, 'v'},
+	{NULL,		    0,                 0,  0 }
+};
+
+static void usage(const char *name)
+{
+	printf("ifwitool: Utility for IFWI manipulation\n\n"
+	       "USAGE:\n"
+	       " %s [-h]\n"
+	       " %s FILE COMMAND [PARAMETERS]\n\n"
+	       "COMMANDs:\n"
+	       " add -f FILE -n NAME [-d -e ENTRY]\n"
+	       " create -f FILE\n"
+	       " delete -n NAME\n"
+	       " extract -f FILE -n NAME [-d -e ENTRY]\n"
+	       " print [-d]\n"
+	       " replace -f FILE -n NAME [-d -e ENTRY]\n"
+	       "OPTIONs:\n"
+	       " -f FILE : File to read/write/create/extract\n"
+	       " -d      : Perform directory operation\n"
+	       " -e ENTRY: Name of directory entry to operate on\n"
+	       " -v      : Verbose level\n"
+	       " -h      : Help message\n"
+	       " -n NAME : Name of sub-partition to operate on\n",
+	       name, name
+	       );
+
+	printf("\nNAME should be one of:\n");
+	int i;
+
+	for (i = 0; i < MAX_SUBPARTS; i++)
+		printf("%s(%s)\n", subparts[i].name, subparts[i].readable_name);
+	printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+	if (argc < 3) {
+		usage(argv[0]);
+		return 1;
+	}
+
+	param.image_name = argv[1];
+	char *cmd = argv[2];
+
+	optind += 2;
+
+	uint32_t i;
+
+	for (i = 0; i < ARRAY_SIZE(commands); i++) {
+		if (strcmp(cmd, commands[i].name) != 0)
+			continue;
+
+		int c;
+
+		while (1) {
+			int option_index;
+
+			c = getopt_long(argc, argv, commands[i].optstring,
+					long_options, &option_index);
+
+			if (c == -1)
+				break;
+
+			/* Filter out illegal long options */
+			if (!strchr(commands[i].optstring, c)) {
+				ERROR("%s: invalid option -- '%c'\n", argv[0],
+				      c);
+				c = '?';
+			}
+
+			switch (c) {
+			case 'n':
+				param.subpart_name = optarg;
+				break;
+			case 'f':
+				param.file_name = optarg;
+				break;
+			case 'd':
+				param.dir_ops = 1;
+				break;
+			case 'e':
+				param.dentry_name = optarg;
+				break;
+			case 'v':
+				verbose++;
+				break;
+			case 'h':
+			case '?':
+				usage(argv[0]);
+				return 1;
+			default:
+				break;
+			}
+		}
+
+		if (ifwi_parse()) {
+			ERROR("%s: ifwi parsing failed\n", argv[0]);
+			return 1;
+		}
+
+		enum ifwi_ret ret = commands[i].function();
+
+		if (ret == COMMAND_ERR) {
+			ERROR("%s: failed execution\n", argv[0]);
+			return 1;
+		}
+
+		if (ret == REPACK_REQUIRED)
+			ifwi_repack();
+
+		return 0;
+	}
+
+	ERROR("%s: invalid command\n", argv[0]);
+	return 1;
+}

+ 2 - 2
tools/patman/command.py

@@ -108,8 +108,8 @@ def RunPipe(pipe_list, infile=None, outfile=None,
     return result
 
 def Output(*cmd, **kwargs):
-    raise_on_error = kwargs.get('raise_on_error', True)
-    return RunPipe([cmd], capture=True, raise_on_error=raise_on_error).stdout
+    kwargs['raise_on_error'] = kwargs.get('raise_on_error', True)
+    return RunPipe([cmd], capture=True, **kwargs).stdout
 
 def OutputOneLine(*cmd, **kwargs):
     raise_on_error = kwargs.pop('raise_on_error', True)

+ 4 - 2
tools/patman/test_util.py

@@ -46,9 +46,10 @@ def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
         glob_list = []
     glob_list += exclude_list
     glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
+    test_cmd = 'test' if 'binman.py' in prog else '-t'
     cmd = ('PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools %s-coverage run '
-           '--omit "%s" %s -P1 -t' % (build_dir, PYTHON, ','.join(glob_list),
-                                      prog))
+           '--omit "%s" %s %s -P1' % (build_dir, PYTHON, ','.join(glob_list),
+                                      prog, test_cmd))
     os.system(cmd)
     stdout = command.Output('%s-coverage' % PYTHON, 'report')
     lines = stdout.splitlines()
@@ -57,6 +58,7 @@ def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None):
         test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
                         for line in lines if '/etype/' in line])
         missing_list = required
+        missing_list.discard('__init__')
         missing_list.difference_update(test_set)
         if missing_list:
             print('Missing tests for %s' % (', '.join(missing_list)))

+ 136 - 5
tools/patman/tools.py

@@ -3,6 +3,8 @@
 # Copyright (c) 2016 Google, Inc
 #
 
+from __future__ import print_function
+
 import command
 import glob
 import os
@@ -24,6 +26,8 @@ chroot_path = None
 # Search paths to use for Filename(), used to find files
 search_paths = []
 
+tool_search_paths = []
+
 # Tools and the packages that contain them, on debian
 packages = {
     'lz4': 'liblz4-tool',
@@ -154,26 +158,56 @@ def Align(pos, align):
 def NotPowerOfTwo(num):
     return num and (num & (num - 1))
 
-def PathHasFile(fname):
+def SetToolPaths(toolpaths):
+    """Set the path to search for tools
+
+    Args:
+        toolpaths: List of paths to search for tools executed by Run()
+    """
+    global tool_search_paths
+
+    tool_search_paths = toolpaths
+
+def PathHasFile(path_spec, fname):
     """Check if a given filename is in the PATH
 
     Args:
+        path_spec: Value of PATH variable to check
         fname: Filename to check
 
     Returns:
         True if found, False if not
     """
-    for dir in os.environ['PATH'].split(':'):
+    for dir in path_spec.split(':'):
         if os.path.exists(os.path.join(dir, fname)):
             return True
     return False
 
 def Run(name, *args, **kwargs):
+    """Run a tool with some arguments
+
+    This runs a 'tool', which is a program used by binman to process files and
+    perhaps produce some output. Tools can be located on the PATH or in a
+    search path.
+
+    Args:
+        name: Command name to run
+        args: Arguments to the tool
+        kwargs: Options to pass to command.run()
+
+    Returns:
+        CommandResult object
+    """
     try:
-        return command.Run(name, *args, cwd=outdir, capture=True, **kwargs)
+        env = None
+        if tool_search_paths:
+            env = dict(os.environ)
+            env['PATH'] = ':'.join(tool_search_paths) + ':' + env['PATH']
+        return command.Run(name, *args, capture=True,
+                           capture_stderr=True, env=env, **kwargs)
     except:
-        if not PathHasFile(name):
-            msg = "Plesae install tool '%s'" % name
+        if env and not PathHasFile(env['PATH'], name):
+            msg = "Please install tool '%s'" % name
             package = packages.get(name)
             if package:
                  msg += " (e.g. from package '%s')" % package
@@ -342,3 +376,100 @@ def ToBytes(string):
     if sys.version_info[0] >= 3:
         return string.encode('utf-8')
     return string
+
+def Compress(indata, algo):
+    """Compress some data using a given algorithm
+
+    Note that for lzma this uses an old version of the algorithm, not that
+    provided by xz.
+
+    This requires 'lz4' and 'lzma_alone' tools. It also requires an output
+    directory to be previously set up, by calling PrepareOutputDir().
+
+    Args:
+        indata: Input data to compress
+        algo: Algorithm to use ('none', 'gzip', 'lz4' or 'lzma')
+
+    Returns:
+        Compressed data
+    """
+    if algo == 'none':
+        return indata
+    fname = GetOutputFilename('%s.comp.tmp' % algo)
+    WriteFile(fname, indata)
+    if algo == 'lz4':
+        data = Run('lz4', '--no-frame-crc', '-c', fname, binary=True)
+    # cbfstool uses a very old version of lzma
+    elif algo == 'lzma':
+        outfname = GetOutputFilename('%s.comp.otmp' % algo)
+        Run('lzma_alone', 'e', fname, outfname, '-lc1', '-lp0', '-pb0', '-d8')
+        data = ReadFile(outfname)
+    elif algo == 'gzip':
+        data = Run('gzip', '-c', fname, binary=True)
+    else:
+        raise ValueError("Unknown algorithm '%s'" % algo)
+    return data
+
+def Decompress(indata, algo):
+    """Decompress some data using a given algorithm
+
+    Note that for lzma this uses an old version of the algorithm, not that
+    provided by xz.
+
+    This requires 'lz4' and 'lzma_alone' tools. It also requires an output
+    directory to be previously set up, by calling PrepareOutputDir().
+
+    Args:
+        indata: Input data to decompress
+        algo: Algorithm to use ('none', 'gzip', 'lz4' or 'lzma')
+
+    Returns:
+        Compressed data
+    """
+    if algo == 'none':
+        return indata
+    fname = GetOutputFilename('%s.decomp.tmp' % algo)
+    with open(fname, 'wb') as fd:
+        fd.write(indata)
+    if algo == 'lz4':
+        data = Run('lz4', '-dc', fname, binary=True)
+    elif algo == 'lzma':
+        outfname = GetOutputFilename('%s.decomp.otmp' % algo)
+        Run('lzma_alone', 'd', fname, outfname)
+        data = ReadFile(outfname)
+    elif algo == 'gzip':
+        data = Run('gzip', '-cd', fname, binary=True)
+    else:
+        raise ValueError("Unknown algorithm '%s'" % algo)
+    return data
+
+CMD_CREATE, CMD_DELETE, CMD_ADD, CMD_REPLACE, CMD_EXTRACT = range(5)
+
+IFWITOOL_CMDS = {
+    CMD_CREATE: 'create',
+    CMD_DELETE: 'delete',
+    CMD_ADD: 'add',
+    CMD_REPLACE: 'replace',
+    CMD_EXTRACT: 'extract',
+    }
+
+def RunIfwiTool(ifwi_file, cmd, fname=None, subpart=None, entry_name=None):
+    """Run ifwitool with the given arguments:
+
+    Args:
+        ifwi_file: IFWI file to operation on
+        cmd: Command to execute (CMD_...)
+        fname: Filename of file to add/replace/extract/create (None for
+            CMD_DELETE)
+        subpart: Name of sub-partition to operation on (None for CMD_CREATE)
+        entry_name: Name of directory entry to operate on, or None if none
+    """
+    args = ['ifwitool', ifwi_file]
+    args.append(IFWITOOL_CMDS[cmd])
+    if fname:
+        args += ['-f', fname]
+    if subpart:
+        args += ['-n', subpart]
+    if entry_name:
+        args += ['-d', '-e', entry_name]
+    Run(*args)

+ 9 - 1
tools/patman/tout.py

@@ -131,13 +131,21 @@ def Info(msg):
     """
     _Output(3, msg)
 
+def Detail(msg):
+    """Display a detailed message
+
+    Args:
+        msg; Message to display.
+    """
+    _Output(4, msg)
+
 def Debug(msg):
     """Display a debug message
 
     Args:
         msg; Message to display.
     """
-    _Output(4, msg)
+    _Output(5, msg)
 
 def UserOutput(msg):
     """Display a message regardless of the current output level.