summaryrefslogtreecommitdiff
authorXindong Xu <xindong.xu@amlogic.com>2020-01-13 07:22:21 (GMT)
committer Robin Lee <rgl@google.com>2020-01-30 22:22:37 (GMT)
commitac6395f2f1085970ae9f0a02e790c80e17e24e3c (patch)
tree76a4b2376901ae9906022f9c74ea788c0635d9f0
parent6c0d674b3ff1ce558974771b8ae3ef18c5f54fdd (diff)
downloadcommon-ac6395f2f1085970ae9f0a02e790c80e17e24e3c.zip
common-ac6395f2f1085970ae9f0a02e790c80e17e24e3c.tar.gz
common-ac6395f2f1085970ae9f0a02e790c80e17e24e3c.tar.bz2
ota_update: fix ota update error from Q to R [1/3]
bug: 147673123 Problem: BUG=147673123 ota update error from Q to R 1. Timed out waiting for device path: /dev/block/mapper/by-uuid/*** 2. need factory reset after ota Solution: 1. remove the flash the super_empty_all.img by flash-all.dat 2. remove write super_empty_all.img in ota.zip by make otapackage If you want to update form null dynamic to dynamic, use make ota_amlogic to get special ota zip 3. enable console in recovery mode Verify: newton Change-Id: I7b86b64479a6d5a8e8eca7345e938d7f8e22ab52 Signed-off-by: Xindong Xu <xindong.xu@amlogic.com>
Diffstat
-rw-r--r--[-rwxr-xr-x]factory.mk77
-rwxr-xr-xflash-all-dynamic-P2Q.bat51
-rwxr-xr-xflash-all-dynamic-P2Q.sh90
-rwxr-xr-xflash-all-dynamic.bat1
-rwxr-xr-xflash-all-dynamic.sh1
-rwxr-xr-xota_amlogic.py1308
-rw-r--r--products/mbox/g12a/recovery/init.recovery.amlogic.rc2
-rw-r--r--[-rwxr-xr-x]products/mbox/gxl/recovery/init.recovery.amlogic.rc2
-rw-r--r--products/mbox/sm1/recovery/init.recovery.amlogic.rc2
-rw-r--r--[-rwxr-xr-x]products/tv/tl1/recovery/init.recovery.amlogic.rc2
-rw-r--r--[-rwxr-xr-x]products/tv/tm2/recovery/init.recovery.amlogic.rc2
-rwxr-xr-xreleasetools.py3
12 files changed, 904 insertions, 637 deletions
diff --git a/factory.mk b/factory.mk
index b604e60..e7b60cb 100755..100644
--- a/factory.mk
+++ b/factory.mk
@@ -65,8 +65,12 @@ ifeq ($(TARGET_BUILD_TYPE),debug)
name_aml := $(name_aml)_debug
endif
+INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name_aml)-ota-$(FILE_NAME_TAG).zip
+
AML_TARGET := $(PRODUCT_OUT)/obj/PACKAGING/target_files_intermediates/$(name_aml)-target_files-$(FILE_NAME)
+AML_TARGET_ZIP := $(PRODUCT_OUT)/super_empty_all.img
+
ifdef KERNEL_DEVICETREE
DTBTOOL := $(BOARD_AML_VENDOR_PATH)/tools/dtbTool
@@ -509,14 +513,6 @@ $(INSTALLED_AML_UPGRADE_PACKAGE_TARGET): \
$(INTERNAL_SUPERIMAGE_DIST_TARGET) \
$(TARGET_USB_BURNING_V2_DEPEND_MODULES)
mkdir -p $(PRODUCT_UPGRADE_OUT)
-ifeq ($(PRODUCT_USE_DYNAMIC_PARTITIONS), true)
- dd if=/dev/zero of=$(PRODUCT_OUT)/empty_1.bin bs=1 count=4096
- cp $(AML_TARGET)/IMAGES/super_empty.img $(PRODUCT_OUT)/super_empty.img
- dd if=$(AML_TARGET)/IMAGES/super_empty.img bs=1 count=4096 skip=0 of=$(PRODUCT_OUT)/empty_2.bin
- dd if=$(AML_TARGET)/IMAGES/super_empty.img bs=1 count=4096 skip=4096 of=$(PRODUCT_OUT)/empty_3.bin
- rm $(AML_TARGET)/IMAGES/super_empty.img
- cat $(PRODUCT_OUT)/empty_1.bin $(PRODUCT_OUT)/empty_2.bin $(PRODUCT_OUT)/empty_1.bin $(PRODUCT_OUT)/empty_3.bin > $(AML_TARGET)/IMAGES/super_empty.img
-endif
$(hide) $(foreach file,$(VB_CHECK_IMAGES), \
cp $(AML_TARGET)/IMAGES/$(file) $(PRODUCT_OUT)/;\
)
@@ -602,7 +598,7 @@ endif
.PHONY:aml_fastboot_zip
aml_fastboot_zip:$(INSTALLED_AML_FASTBOOT_ZIP)
-$(INSTALLED_AML_FASTBOOT_ZIP): $(addprefix $(PRODUCT_OUT)/,$(FASTBOOT_IMAGES)) $(BUILT_ODMIMAGE_TARGET) $(INSTALLED_AML_UPGRADE_PACKAGE_TARGET)
+$(INSTALLED_AML_FASTBOOT_ZIP): $(addprefix $(PRODUCT_OUT)/,$(FASTBOOT_IMAGES)) $(BUILT_ODMIMAGE_TARGET) $(INSTALLED_AML_UPGRADE_PACKAGE_TARGET) $(AML_TARGET_ZIP)
echo "install $@"
rm -rf $(PRODUCT_OUT)/fastboot_auto
mkdir -p $(PRODUCT_OUT)/fastboot_auto
@@ -622,6 +618,8 @@ ifeq ($(AB_OTA_UPDATER),true)
else
cp device/amlogic/common/flash-all-dynamic.sh $(PRODUCT_OUT)/fastboot_auto/flash-all.sh
cp device/amlogic/common/flash-all-dynamic.bat $(PRODUCT_OUT)/fastboot_auto/flash-all.bat
+ cp device/amlogic/common/flash-all-dynamic-P2Q.sh $(PRODUCT_OUT)/fastboot_auto/flash-all-P2Q.sh
+ cp device/amlogic/common/flash-all-dynamic-P2Q.bat $(PRODUCT_OUT)/fastboot_auto/flash-all-P2Q.bat
endif
cp $(PRODUCT_OUT)/super_empty.img $(PRODUCT_OUT)/fastboot_auto/
else
@@ -631,7 +629,7 @@ endif
$(hide) $(foreach file,$(VB_CHECK_IMAGES), \
cp -f $(AML_TARGET)/IMAGES/$(file) $(PRODUCT_OUT)/fastboot_auto/$(file); \
)
- cp -f $(AML_TARGET)/IMAGES/super_empty.img $(PRODUCT_OUT)/fastboot_auto/super_empty_all.img
+ cp -f $(PRODUCT_OUT)/super_empty_all.img $(PRODUCT_OUT)/fastboot_auto/super_empty_all.img
cd $(PRODUCT_OUT)/fastboot_auto; zip -1 -r ../$(TARGET_PRODUCT)-fastboot-flashall-$(FILE_NAME).zip *
PATH=$$(cd ./$(TARGET_HOST_TOOL_PATH); pwd):$$PATH zipnote $@ | sed 's/@ \([a-z]*.img\).encrypt/&\n@=\1\n/' | \
PATH=$$(cd ./$(TARGET_HOST_TOOL_PATH); pwd):$$PATH zipnote -w $@
@@ -656,46 +654,8 @@ EXTRA_SCRIPT := $(TARGET_DEVICE_DIR)/../../../device/amlogic/common/recovery/upd
$(AMLOGIC_OTA_PACKAGE_TARGET): $(AML_TARGET).zip $(BUILT_ODMIMAGE_TARGET)
@echo "Package OTA2: $@"
-ifeq ($(BOARD_USES_ODMIMAGE),true)
- @echo "copy $(INSTALLED_ODMIMAGE_TARGET)"
- mkdir -p $(AML_TARGET)/IMAGES
- cp $(INSTALLED_ODMIMAGE_TARGET) $(AML_TARGET)/IMAGES/
- -cp $(PRODUCT_OUT)/odm.map $(AML_TARGET)/IMAGES/
-
- mkdir -p $(AML_TARGET)/META
- echo "odm_fs_type=$(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE)" >> $(AML_TARGET)/META/misc_info.txt
- echo "odm_size=$(BOARD_ODMIMAGE_PARTITION_SIZE)" >> $(AML_TARGET)/META/misc_info.txt
- echo "odm_journal_size=$(BOARD_ODMIMAGE_JOURNAL_SIZE)" >> $(AML_TARGET)/META/misc_info.txt
- echo "odm_extfs_inode_count=$(BOARD_ODMIMAGE_EXTFS_INODE_COUNT)" >> $(AML_TARGET)/META/misc_info.txt
- mkdir -p $(AML_TARGET)/ODM
- cp -a $(PRODUCT_OUT)/odm/* $(AML_TARGET)/ODM/
-endif
-ifneq ($(INSTALLED_AMLOGIC_BOOTLOADER_TARGET),)
- @echo "copy $(INSTALLED_AMLOGIC_BOOTLOADER_TARGET)"
- mkdir -p $(AML_TARGET)/IMAGES
- cp $(INSTALLED_AMLOGIC_BOOTLOADER_TARGET) $(AML_TARGET)/IMAGES/bootloader.img
-endif
-ifeq ($(PRODUCT_GOOGLEREF_SECURE_BOOT),true)
- cp $(PRODUCT_OUT)/bootloader.img $(AML_TARGET)/IMAGES/bootloader.img
-endif
-ifneq ($(INSTALLED_AML_LOGO),)
- @echo "copy $(INSTALLED_AML_LOGO)"
- mkdir -p $(AML_TARGET)/IMAGES
- cp $(INSTALLED_AML_LOGO) $(AML_TARGET)/IMAGES/
-endif
-ifeq ($(strip $(TARGET_OTA_UPDATE_DTB)),true)
- @echo "copy $(INSTALLED_BOARDDTB_TARGET)"
- mkdir -p $(AML_TARGET)/IMAGES
- cp $(INSTALLED_BOARDDTB_TARGET) $(AML_TARGET)/IMAGES/
-endif
-ifeq ($(PRODUCT_BUILD_SECURE_BOOT_IMAGE_DIRECTLY), true)
- @echo "PRODUCT_BUILD_SECURE_BOOT_IMAGE_DIRECTLY is $(PRODUCT_BUILD_SECURE_BOOT_IMAGE_DIRECTLY)"
- mkdir -p $(AML_TARGET)/IMAGES
- cp $(INSTALLED_BOOTIMAGE_TARGET) $(AML_TARGET)/IMAGES/boot.img
- -cp $(INSTALLED_RECOVERYIMAGE_TARGET) $(AML_TARGET)/IMAGES/recovery.img
-else
- -cp $(PRODUCT_OUT)/recovery.img $(AML_TARGET)/IMAGES/recovery.img
-endif
+ mkdir -p $(AML_TARGET)/IMAGES/
+ cp $(PRODUCT_OUT)/super_empty_all.img $(AML_TARGET)/IMAGES/
$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
./device/amlogic/common/ota_amlogic.py -v \
--block \
@@ -729,6 +689,21 @@ $(INSTALLED_AML_EMMC_BIN): $(INSTALLED_AML_UPGRADE_PACKAGE_TARGET) $(PRODUCT_CFG
aml_emmc_bin :$(INSTALLED_AML_EMMC_BIN)
endif # ifeq ($(TARGET_SUPPORT_USB_BURNING_V2),true)
+$(AML_TARGET_ZIP): $(INSTALLED_AML_UPGRADE_PACKAGE_TARGET) $(TARGET_USB_BURNING_V2_DEPEND_MODULES) $(INTERNAL_OTA_PACKAGE_TARGET)
+ifeq ($(PRODUCT_USE_DYNAMIC_PARTITIONS), true)
+ dd if=/dev/zero of=$(PRODUCT_OUT)/empty_1.bin bs=1 count=4096
+ cp $(AML_TARGET)/IMAGES/super_empty.img $(PRODUCT_OUT)/super_empty.img
+ dd if=$(AML_TARGET)/IMAGES/super_empty.img bs=1 count=4096 skip=0 of=$(PRODUCT_OUT)/empty_2.bin
+ dd if=$(AML_TARGET)/IMAGES/super_empty.img bs=1 count=4096 skip=4096 of=$(PRODUCT_OUT)/empty_3.bin
+ rm $(AML_TARGET)/IMAGES/super_empty.img
+ cat $(PRODUCT_OUT)/empty_1.bin $(PRODUCT_OUT)/empty_2.bin $(PRODUCT_OUT)/empty_1.bin $(PRODUCT_OUT)/empty_3.bin > $(AML_TARGET)/IMAGES/super_empty.img
+ rm -rf $(AML_TARGET).zip
+ #cd $(PRODUCT_OUT)/obj/PACKAGING/target_files_intermediates/; mkdir -p IMAGES; \
+ #cp $(name_aml)-target_files-$(FILE_NAME)/IMAGES/super_empty.img IMAGES/; zip -u $(name_aml)-target_files-$(FILE_NAME).zip ./IMAGES/super_empty.img
+ ./out/soong/host/linux-x86/bin/soong_zip -d -o $(AML_TARGET).zip -C $(AML_TARGET) -l $(AML_TARGET).zip.list
+ cp $(AML_TARGET)/IMAGES/super_empty.img $@
+endif
+
droidcore: $(INSTALLED_MANIFEST_XML)
otapackage: otatools-package
@@ -740,4 +715,4 @@ endif
.PHONY: aml_factory_zip
aml_factory_zip: $(INSTALLED_AML_UPGRADE_PACKAGE_TARGET) $(INSTALLED_MANIFEST_XML) $(INSTALLED_AML_FASTBOOT_ZIP)
-ota_amlogic: $(INSTALLED_AML_UPGRADE_PACKAGE_TARGET) $(INSTALLED_MANIFEST_XML) $(INSTALLED_AML_FASTBOOT_ZIP) otapackage
+$(AMLOGIC_OTA_PACKAGE_TARGET): $(INSTALLED_AML_UPGRADE_PACKAGE_TARGET) $(INSTALLED_MANIFEST_XML) $(AML_TARGET_ZIP) $(INSTALLED_AML_FASTBOOT_ZIP) $(INTERNAL_OTA_PACKAGE_TARGET)
diff --git a/flash-all-dynamic-P2Q.bat b/flash-all-dynamic-P2Q.bat
new file mode 100755
index 0000000..d1720bc
--- a/dev/null
+++ b/flash-all-dynamic-P2Q.bat
@@ -0,0 +1,51 @@
+@ECHO OFF
+:: Copyright 2012 The Android Open Source Project
+::
+:: Licensed under the Apache License, Version 2.0 (the "License");
+:: you may not use this file except in compliance with the License.
+:: You may obtain a copy of the License at
+::
+:: http://www.apache.org/licenses/LICENSE-2.0
+::
+:: Unless required by applicable law or agreed to in writing, software
+:: distributed under the License is distributed on an "AS IS" BASIS,
+:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+:: See the License for the specific language governing permissions and
+:: limitations under the License.
+
+PATH=%PATH%;"%SYSTEMROOT%\System32"
+adb reboot bootloader
+fastboot flashing unlock_critical
+fastboot flashing unlock
+fastboot flash bootloader bootloader.img
+fastboot flash bootloader-boot0 bootloader.img
+fastboot flash bootloader-boot1 bootloader.img
+fastboot erase env
+fastboot flash dts dt.img
+fastboot flash dtbo dtbo.img
+fastboot reboot-bootloader
+ping -n 5 127.0.0.1 >nul
+fastboot flashing unlock_critical
+fastboot flashing unlock
+fastboot -w
+fastboot erase param
+fastboot erase tee
+fastboot flash vbmeta vbmeta.img
+fastboot flash logo logo.img
+fastboot flash boot boot.img
+fastboot flash recovery recovery.img
+fastboot reboot fastboot
+ping -n 10 127.0.0.1 >nul
+fastboot flash super super_empty_all.img
+fastboot flash odm odm.img
+fastboot flash system system.img
+fastboot flash vendor vendor.img
+fastboot flash product product.img
+fastboot reboot-bootloader
+ping -n 5 127.0.0.1 >nul
+fastboot flashing lock
+fastboot reboot
+
+echo Press any key to exit...
+pause >nul
+exit
diff --git a/flash-all-dynamic-P2Q.sh b/flash-all-dynamic-P2Q.sh
new file mode 100755
index 0000000..9cc739c
--- a/dev/null
+++ b/flash-all-dynamic-P2Q.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+# Copyright 2012 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+cd $(dirname $0)
+
+lflag="unlock"
+if [[ $# -gt 0 ]]; then
+ lflag="$1"
+fi
+
+sern=""
+if [[ $# -gt 1 ]]; then
+ sern="-s $2"
+fi
+
+skipreboot=""
+if [[ $# -gt 2 ]]; then
+ skipreboot="$3"
+fi
+
+if [ "$skipreboot" != "skip" ]
+then
+ # Ignore failure, in case we are already in fastboot.
+ adb $sern reboot bootloader || true
+fi
+
+function flash_with_retry() {
+ local partition=${1};
+ local img=${2};
+ msg=$(fastboot ${sern} flash ${partition} ${img} 2>&1)
+ echo "${msg}"
+ if [[ ${msg} =~ 'FAILED' ]]; then
+ echo "\nFlashing ${img} is not done properly. Do it again."
+ fastboot ${sern} reboot-bootloader
+ fastboot ${sern} flash ${partition} ${img}
+ fi
+}
+
+fastboot $sern flashing unlock_critical
+fastboot $sern flashing unlock
+fastboot $sern flash bootloader bootloader.img
+fastboot $sern flash bootloader-boot0 bootloader.img
+fastboot $sern flash bootloader-boot1 bootloader.img
+fastboot $sern erase env
+fastboot $sern flash dts dt.img
+fastboot $sern flash dtbo dtbo.img
+fastboot $sern reboot-bootloader
+
+sleep 5
+fastboot $sern flashing unlock_critical
+fastboot $sern flashing unlock
+fastboot $sern -w
+fastboot $sern erase param
+fastboot $sern erase tee
+
+flash_with_retry vbmeta vbmeta.img
+flash_with_retry logo logo.img
+flash_with_retry boot boot.img
+flash_with_retry recovery recovery.img
+fastboot $sern reboot fastboot
+sleep 10
+
+flash_with_retry super super_empty_all.img
+flash_with_retry odm odm.img
+flash_with_retry system system.img
+flash_with_retry vendor vendor.img
+flash_with_retry product product.img
+fastboot $sern reboot-bootloader
+sleep 5
+
+if [ "$lflag" = "lock" ]
+then
+ fastboot $sern flashing lock
+fi
+
+fastboot $sern reboot
diff --git a/flash-all-dynamic.bat b/flash-all-dynamic.bat
index d1720bc..f6a9f17 100755
--- a/flash-all-dynamic.bat
+++ b/flash-all-dynamic.bat
@@ -36,7 +36,6 @@ fastboot flash boot boot.img
fastboot flash recovery recovery.img
fastboot reboot fastboot
ping -n 10 127.0.0.1 >nul
-fastboot flash super super_empty_all.img
fastboot flash odm odm.img
fastboot flash system system.img
fastboot flash vendor vendor.img
diff --git a/flash-all-dynamic.sh b/flash-all-dynamic.sh
index 9cc739c..71d071f 100755
--- a/flash-all-dynamic.sh
+++ b/flash-all-dynamic.sh
@@ -74,7 +74,6 @@ flash_with_retry recovery recovery.img
fastboot $sern reboot fastboot
sleep 10
-flash_with_retry super super_empty_all.img
flash_with_retry odm odm.img
flash_with_retry system system.img
flash_with_retry vendor vendor.img
diff --git a/ota_amlogic.py b/ota_amlogic.py
index 94ac5e5..274fefa 100755
--- a/ota_amlogic.py
+++ b/ota_amlogic.py
@@ -15,54 +15,12 @@
# limitations under the License.
"""
-Given a target-files zipfile, produces an OTA package that installs
-that build. An incremental OTA is produced if -i is given, otherwise
-a full OTA is produced.
+Given a target-files zipfile, produces an OTA package that installs that build.
+An incremental OTA is produced if -i is given, otherwise a full OTA is produced.
-Usage: ota_from_target_files [flags] input_target_files output_ota_package
+Usage: ota_from_target_files [options] input_target_files output_ota_package
- -k (--package_key) <key> Key to use to sign the package (default is
- the value of default_system_dev_certificate from the input
- target-files's META/misc_info.txt, or
- "build/target/product/security/testkey" if that value is not
- specified).
-
- For incremental OTAs, the default value is based on the source
- target-file, not the target build.
-
- -i (--incremental_from) <file>
- Generate an incremental OTA using the given target-files zip as
- the starting build.
-
- --full_radio
- When generating an incremental OTA, always include a full copy of
- radio image. This option is only meaningful when -i is specified,
- because a full radio is always included in a full OTA if applicable.
-
- --full_bootloader
- Similar to --full_radio. When generating an incremental OTA, always
- include a full copy of bootloader image.
-
- --verify
- Remount and verify the checksums of the files written to the system and
- vendor (if used) partitions. Non-A/B incremental OTAs only.
-
- -o (--oem_settings) <main_file[,additional_files...]>
- Comma seperated list of files used to specify the expected OEM-specific
- properties on the OEM partition of the intended device. Multiple expected
- values can be used by providing multiple files. Only the first dict will
- be used to compute fingerprint, while the rest will be used to assert
- OEM-specific properties.
-
- --oem_no_mount
- For devices with OEM-specific properties but without an OEM partition,
- do not mount the OEM partition in the updater-script. This should be
- very rarely used, since it's expected to have a dedicated OEM partition
- for OEM-specific properties. Only meaningful when -o is specified.
-
- --wipe_user_data
- Generate an OTA package that will wipe the user data partition
- when installed.
+Common options that apply to both of non-A/B and A/B OTAs
--downgrade
Intentionally generate an incremental OTA that updates from a newer build
@@ -73,6 +31,19 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
will be used in the OTA package, unless --binary flag is specified. Please
also check the comment for --override_timestamp below.
+ -i (--incremental_from) <file>
+ Generate an incremental OTA using the given target-files zip as the
+ starting build.
+
+ -k (--package_key) <key>
+ Key to use to sign the package (default is the value of
+ default_system_dev_certificate from the input target-files's
+ META/misc_info.txt, or "build/make/target/product/security/testkey" if
+ that value is not specified).
+
+ For incremental OTAs, the default value is based on the source
+ target-file, not the target build.
+
--override_timestamp
Intentionally generate an incremental OTA that updates from a newer build
to an older one (based on timestamp comparison), by setting the downgrade
@@ -89,13 +60,87 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
based on timestamp) with the same "ota-downgrade=yes" flag, with the
difference being whether "ota-wipe=yes" is set.
- -e (--extra_script) <file>
+ --wipe_user_data
+ Generate an OTA package that will wipe the user data partition when
+ installed.
+
+ --retrofit_dynamic_partitions
+ Generates an OTA package that updates a device to support dynamic
+ partitions (default False). This flag is implied when generating
+ an incremental OTA where the base build does not support dynamic
+ partitions but the target build does. For A/B, when this flag is set,
+ --skip_postinstall is implied.
+
+ --skip_compatibility_check
+ Skip checking compatibility of the input target files package.
+
+ --output_metadata_path
+ Write a copy of the metadata to a separate file. Therefore, users can
+ read the post build fingerprint without extracting the OTA package.
+
+Non-A/B OTA specific options
+
+ -b (--binary) <file>
+ Use the given binary as the update-binary in the output package, instead
+ of the binary in the build's target_files. Use for development only.
+
+ --block
+ Generate a block-based OTA for non-A/B device. We have deprecated the
+ support for file-based OTA since O. Block-based OTA will be used by
+ default for all non-A/B devices. Keeping this flag here to not break
+ existing callers.
+
+ -e (--extra_script) <file>
Insert the contents of file at the end of the update script.
+ --full_bootloader
+ Similar to --full_radio. When generating an incremental OTA, always
+ include a full copy of bootloader image.
+
+ --full_radio
+ When generating an incremental OTA, always include a full copy of radio
+ image. This option is only meaningful when -i is specified, because a full
+ radio is always included in a full OTA if applicable.
+
+ --log_diff <file>
+ Generate a log file that shows the differences in the source and target
+ builds for an incremental package. This option is only meaningful when -i
+ is specified.
+
+ -o (--oem_settings) <main_file[,additional_files...]>
+ Comma seperated list of files used to specify the expected OEM-specific
+ properties on the OEM partition of the intended device. Multiple expected
+ values can be used by providing multiple files. Only the first dict will
+ be used to compute fingerprint, while the rest will be used to assert
+ OEM-specific properties.
+
+ --oem_no_mount
+ For devices with OEM-specific properties but without an OEM partition, do
+ not mount the OEM partition in the updater-script. This should be very
+ rarely used, since it's expected to have a dedicated OEM partition for
+ OEM-specific properties. Only meaningful when -o is specified.
+
+ --stash_threshold <float>
+ Specify the threshold that will be used to compute the maximum allowed
+ stash size (defaults to 0.8).
+
+ -t (--worker_threads) <int>
+ Specify the number of worker-threads that will be used when generating
+ patches for incremental updates (defaults to 3).
+
+ --verify
+ Verify the checksums of the updated system and vendor (if any) partitions.
+ Non-A/B incremental OTAs only.
+
-2 (--two_step)
- Generate a 'two-step' OTA package, where recovery is updated
- first, so that any changes made to the system partition are done
- using the new recovery (new kernel, etc.).
+ Generate a 'two-step' OTA package, where recovery is updated first, so
+ that any changes made to the system partition are done using the new
+ recovery (new kernel, etc.).
+
+A/B OTA specific options
+
+ --disable_fec_computation
+ Disable the on device FEC data computation for incremental updates.
--include_secondary
Additionally include the payload for secondary slot images (default:
@@ -115,30 +160,6 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
Due to the special install procedure, the secondary payload will be always
generated as a full payload.
- --block
- Generate a block-based OTA for non-A/B device. We have deprecated the
- support for file-based OTA since O. Block-based OTA will be used by
- default for all non-A/B devices. Keeping this flag here to not break
- existing callers.
-
- -b (--binary) <file>
- Use the given binary as the update-binary in the output package,
- instead of the binary in the build's target_files. Use for
- development only.
-
- -t (--worker_threads) <int>
- Specifies the number of worker-threads that will be used when
- generating patches for incremental updates (defaults to 3).
-
- --stash_threshold <float>
- Specifies the threshold that will be used to compute the maximum
- allowed stash size (defaults to 0.8).
-
- --log_diff <file>
- Generate a log file that shows the differences in the source and target
- builds for an incremental package. This option is only meaningful when
- -i is specified.
-
--payload_signer <signer>
Specify the signer when signing the payload and metadata for A/B OTAs.
By default (i.e. without this flag), it calls 'openssl pkeyutl' to sign
@@ -150,6 +171,17 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
--payload_signer_args <args>
Specify the arguments needed for payload signer.
+ --payload_signer_maximum_signature_size <signature_size>
+ The maximum signature size (in bytes) that would be generated by the given
+ payload signer. Only meaningful when custom payload signer is specified
+ via '--payload_signer'.
+ If the signer uses a RSA key, this should be the number of bytes to
+ represent the modulus. If it uses an EC key, this is the size of a
+ DER-encoded ECDSA signature.
+
+ --payload_signer_key_size <key_size>
+ Deprecated. Use the '--payload_signer_maximum_signature_size' instead.
+
--skip_postinstall
Skip the postinstall hooks when generating an A/B OTA package (default:
False). Note that this discards ALL the hooks, including non-optional
@@ -160,26 +192,29 @@ Usage: ota_from_target_files [flags] input_target_files output_ota_package
from __future__ import print_function
+import collections
+import logging
import multiprocessing
import os.path
import shlex
import shutil
import struct
-import subprocess
import sys
import tempfile
import zipfile
import sys
-sys.path.append('build/tools/releasetools')
+sys.path.append('build/make/tools/releasetools')
+import check_target_files_vintf
import common
import edify_generator
-import sparse_img
+import verity_utils
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
sys.exit(1)
+logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
OPTIONS.package_key = None
@@ -207,152 +242,31 @@ OPTIONS.stash_threshold = 0.8
OPTIONS.log_diff = None
OPTIONS.payload_signer = None
OPTIONS.payload_signer_args = []
+OPTIONS.payload_signer_maximum_signature_size = None
OPTIONS.extracted_input = None
OPTIONS.key_passwords = []
OPTIONS.skip_postinstall = False
+OPTIONS.retrofit_dynamic_partitions = False
+OPTIONS.skip_compatibility_check = False
+OPTIONS.output_metadata_path = None
+OPTIONS.disable_fec_computation = False
METADATA_NAME = 'META-INF/com/android/metadata'
POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
-UNZIP_PATTERN = ['IMAGES/*', 'META/*']
-
-
-class BuildInfo(object):
- """A class that holds the information for a given build.
-
- This class wraps up the property querying for a given source or target build.
- It abstracts away the logic of handling OEM-specific properties, and caches
- the commonly used properties such as fingerprint.
-
- There are two types of info dicts: a) build-time info dict, which is generated
- at build time (i.e. included in a target_files zip); b) OEM info dict that is
- specified at package generation time (via command line argument
- '--oem_settings'). If a build doesn't use OEM-specific properties (i.e. not
- having "oem_fingerprint_properties" in build-time info dict), all the queries
- would be answered based on build-time info dict only. Otherwise if using
- OEM-specific properties, some of them will be calculated from two info dicts.
-
- Users can query properties similarly as using a dict() (e.g. info['fstab']),
- or to query build properties via GetBuildProp() or GetVendorBuildProp().
-
- Attributes:
- info_dict: The build-time info dict.
- is_ab: Whether it's a build that uses A/B OTA.
- oem_dicts: A list of OEM dicts.
- oem_props: A list of OEM properties that should be read from OEM dicts; None
- if the build doesn't use any OEM-specific property.
- fingerprint: The fingerprint of the build, which would be calculated based
- on OEM properties if applicable.
- device: The device name, which could come from OEM dicts if applicable.
- """
-
- def __init__(self, info_dict, oem_dicts):
- """Initializes a BuildInfo instance with the given dicts.
-
- Note that it only wraps up the given dicts, without making copies.
-
- Arguments:
- info_dict: The build-time info dict.
- oem_dicts: A list of OEM dicts (which is parsed from --oem_settings). Note
- that it always uses the first dict to calculate the fingerprint or the
- device name. The rest would be used for asserting OEM properties only
- (e.g. one package can be installed on one of these devices).
- """
- self.info_dict = info_dict
- self.oem_dicts = oem_dicts
-
- self._is_ab = info_dict.get("ab_update") == "true"
- self._oem_props = info_dict.get("oem_fingerprint_properties")
-
- if self._oem_props:
- assert oem_dicts, "OEM source required for this build"
-
- # These two should be computed only after setting self._oem_props.
- self._device = self.GetOemProperty("ro.product.device")
- self._fingerprint = self.CalculateFingerprint()
-
- @property
- def is_ab(self):
- return self._is_ab
-
- @property
- def device(self):
- return self._device
+DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
+AB_PARTITIONS = 'META/ab_partitions.txt'
+UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 'RADIO/*']
+# Files to be unzipped for target diffing purpose.
+TARGET_DIFFING_UNZIP_PATTERN = ['BOOT', 'RECOVERY', 'SYSTEM/*', 'VENDOR/*',
+ 'PRODUCT/*', 'SYSTEM_EXT/*', 'ODM/*']
+RETROFIT_DAP_UNZIP_PATTERN = ['OTA/super_*.img', AB_PARTITIONS]
- @property
- def fingerprint(self):
- return self._fingerprint
-
- @property
- def oem_props(self):
- return self._oem_props
-
- def __getitem__(self, key):
- return self.info_dict[key]
-
- def __setitem__(self, key, value):
- self.info_dict[key] = value
-
- def get(self, key, default=None):
- return self.info_dict.get(key, default)
-
- def items(self):
- return self.info_dict.items()
-
- def GetBuildProp(self, prop):
- """Returns the inquired build property."""
- try:
- return self.info_dict.get("build.prop", {})[prop]
- except KeyError:
- raise common.ExternalError("couldn't find %s in build.prop" % (prop,))
-
- def GetVendorBuildProp(self, prop):
- """Returns the inquired vendor build property."""
- try:
- return self.info_dict.get("vendor.build.prop", {})[prop]
- except KeyError:
- raise common.ExternalError(
- "couldn't find %s in vendor.build.prop" % (prop,))
-
- def GetOemProperty(self, key):
- if self.oem_props is not None and key in self.oem_props:
- return self.oem_dicts[0][key]
- return self.GetBuildProp(key)
-
- def CalculateFingerprint(self):
- if self.oem_props is None:
- return self.GetBuildProp("ro.build.fingerprint")
- return "%s/%s/%s:%s" % (
- self.GetOemProperty("ro.product.brand"),
- self.GetOemProperty("ro.product.name"),
- self.GetOemProperty("ro.product.device"),
- self.GetBuildProp("ro.build.thumbprint"))
-
- def WriteMountOemScript(self, script):
- assert self.oem_props is not None
- recovery_mount_options = self.info_dict.get("recovery_mount_options")
- script.Mount("/oem", recovery_mount_options)
-
- def WriteDeviceAssertions(self, script, oem_no_mount):
- # Read the property directly if not using OEM properties.
- if not self.oem_props:
- script.AssertDevice(self.device)
- return
-
- # Otherwise assert OEM properties.
- if not self.oem_dicts:
- raise common.ExternalError(
- "No OEM file provided to answer expected assertions")
-
- for prop in self.oem_props.split():
- values = []
- for oem_dict in self.oem_dicts:
- if prop in oem_dict:
- values.append(oem_dict[prop])
- if not values:
- raise common.ExternalError(
- "The OEM file is missing the property %s" % (prop,))
- script.AssertOemProperty(prop, values, oem_no_mount)
+# Images to be excluded from secondary payload. We essentially only keep
+# 'system_other' and bootloader partitions.
+SECONDARY_PAYLOAD_SKIPPED_IMAGES = [
+ 'boot', 'dtbo', 'modem', 'odm', 'product', 'radio', 'recovery',
+ 'system_ext', 'vbmeta', 'vbmeta_system', 'vbmeta_vendor', 'vendor']
class PayloadSigner(object):
@@ -380,28 +294,42 @@ class PayloadSigner(object):
cmd.extend(["-passin", "pass:" + pw] if pw else ["-nocrypt"])
signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
cmd.extend(["-out", signing_key])
-
- get_signing_key = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- stdoutdata, _ = get_signing_key.communicate()
- assert get_signing_key.returncode == 0, \
- "Failed to get signing key: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd, verbose=False)
self.signer = "openssl"
self.signer_args = ["pkeyutl", "-sign", "-inkey", signing_key,
"-pkeyopt", "digest:sha256"]
+ self.maximum_signature_size = self._GetMaximumSignatureSizeInBytes(
+ signing_key)
else:
self.signer = OPTIONS.payload_signer
self.signer_args = OPTIONS.payload_signer_args
+ if OPTIONS.payload_signer_maximum_signature_size:
+ self.maximum_signature_size = int(
+ OPTIONS.payload_signer_maximum_signature_size)
+ else:
+ # The legacy config uses RSA2048 keys.
+ logger.warning("The maximum signature size for payload signer is not"
+ " set, default to 256 bytes.")
+ self.maximum_signature_size = 256
+
+ @staticmethod
+ def _GetMaximumSignatureSizeInBytes(signing_key):
+ out_signature_size_file = common.MakeTempFile("signature_size")
+ cmd = ["delta_generator", "--out_maximum_signature_size_file={}".format(
+ out_signature_size_file), "--private_key={}".format(signing_key)]
+ common.RunAndCheckOutput(cmd)
+ with open(out_signature_size_file) as f:
+ signature_size = f.read().rstrip()
+ logger.info("% outputs the maximum signature size: %", cmd[0],
+ signature_size)
+ return int(signature_size)
def Sign(self, in_file):
"""Signs the given input file. Returns the output filename."""
out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
- signing = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = signing.communicate()
- assert signing.returncode == 0, \
- "Failed to sign the input file: {}".format(stdoutdata)
+ common.RunAndCheckOutput(cmd)
return out_file
@@ -419,12 +347,19 @@ class Payload(object):
Args:
secondary: Whether it's generating a secondary payload (default: False).
"""
- # The place where the output from the subprocess should go.
- self._log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
self.payload_file = None
self.payload_properties = None
self.secondary = secondary
+ def _Run(self, cmd): # pylint: disable=no-self-use
+ # Don't pipe (buffer) the output if verbose is set. Let
+ # brillo_update_payload write to stdout/stderr directly, so its progress can
+ # be monitored.
+ if OPTIONS.verbose:
+ common.RunAndCheckOutput(cmd, stdout=None, stderr=None)
+ else:
+ common.RunAndCheckOutput(cmd)
+
def Generate(self, target_file, source_file=None, additional_args=None):
"""Generates a payload from the given target-files zip(s).
@@ -444,11 +379,10 @@ class Payload(object):
"--target_image", target_file]
if source_file is not None:
cmd.extend(["--source_image", source_file])
+ if OPTIONS.disable_fec_computation:
+ cmd.extend(["--disable_fec_computation", "true"])
cmd.extend(additional_args)
- p = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
- "brillo_update_payload generate failed: {}".format(stdoutdata)
+ self._Run(cmd)
self.payload_file = payload_file
self.payload_properties = None
@@ -469,12 +403,10 @@ class Payload(object):
metadata_sig_file = common.MakeTempFile(prefix="sig-", suffix=".bin")
cmd = ["brillo_update_payload", "hash",
"--unsigned_payload", self.payload_file,
- "--signature_size", "256",
+ "--signature_size", str(payload_signer.maximum_signature_size),
"--metadata_hash_file", metadata_sig_file,
"--payload_hash_file", payload_sig_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload hash failed"
+ self._Run(cmd)
# 2. Sign the hashes.
signed_payload_sig_file = payload_signer.Sign(payload_sig_file)
@@ -486,12 +418,10 @@ class Payload(object):
cmd = ["brillo_update_payload", "sign",
"--unsigned_payload", self.payload_file,
"--payload", signed_payload_file,
- "--signature_size", "256",
+ "--signature_size", str(payload_signer.maximum_signature_size),
"--metadata_signature_file", signed_metadata_sig_file,
"--payload_signature_file", signed_payload_sig_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload sign failed"
+ self._Run(cmd)
# 4. Dump the signed payload properties.
properties_file = common.MakeTempFile(prefix="payload-properties-",
@@ -499,9 +429,7 @@ class Payload(object):
cmd = ["brillo_update_payload", "properties",
"--payload", signed_payload_file,
"--properties_file", properties_file]
- p1 = common.Run(cmd, stdout=self._log_file, stderr=subprocess.STDOUT)
- p1.communicate()
- assert p1.returncode == 0, "brillo_update_payload properties failed"
+ self._Run(cmd)
if self.secondary:
with open(properties_file, "a") as f:
@@ -576,51 +504,53 @@ def _WriteRecoveryImageToBoot(script, output_zip):
recovery_two_step_img_name = "recovery-two-step.img"
recovery_two_step_img_path = os.path.join(
- OPTIONS.input_tmp, "IMAGES", recovery_two_step_img_name)
+ OPTIONS.input_tmp, "OTA", recovery_two_step_img_name)
if os.path.exists(recovery_two_step_img_path):
- recovery_two_step_img = common.GetBootableImage(
- recovery_two_step_img_name, recovery_two_step_img_name,
- OPTIONS.input_tmp, "RECOVERY")
- common.ZipWriteStr(
- output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
- print("two-step package: using %s in stage 1/3" % (
- recovery_two_step_img_name,))
+ common.ZipWrite(
+ output_zip,
+ recovery_two_step_img_path,
+ arcname=recovery_two_step_img_name)
+ logger.info(
+ "two-step package: using %s in stage 1/3", recovery_two_step_img_name)
script.WriteRawImage("/boot", recovery_two_step_img_name)
else:
- print("two-step package: using recovery.img in stage 1/3")
+ logger.info("two-step package: using recovery.img in stage 1/3")
# The "recovery.img" entry has been written into package earlier.
script.WriteRawImage("/boot", "recovery.img")
-def HasRecoveryPatch(target_files_zip):
+def HasRecoveryPatch(target_files_zip, info_dict):
+ board_uses_vendorimage = info_dict.get("board_uses_vendorimage") == "true"
+
+ if board_uses_vendorimage:
+ target_files_dir = "VENDOR"
+ else:
+ target_files_dir = "SYSTEM/vendor"
+
+ patch = "%s/recovery-from-boot.p" % target_files_dir
+ img = "%s/etc/recovery.img" %target_files_dir
+
namelist = [name for name in target_files_zip.namelist()]
- return ("SYSTEM/recovery-from-boot.p" in namelist or
- "SYSTEM/etc/recovery.img" in namelist)
+ return (patch in namelist or img in namelist)
-def HasVendorPartition(target_files_zip):
+def HasPartition(target_files_zip, partition):
try:
- target_files_zip.getinfo("VENDOR/")
+ target_files_zip.getinfo(partition.upper() + "/")
return True
except KeyError:
return False
-def ZipOtherImage(which, tmpdir, output):
- """Returns an image object from IMAGES.
-
- 'which' partition eg "logo", "dtb". A prebuilt image and file
- map must already exist in tmpdir.
- """
- amlogic_img_path = os.path.join(tmpdir, "IMAGES", which + ".img")
- if os.path.exists(amlogic_img_path):
- f = open(amlogic_img_path, "rb")
- data = f.read()
- f.close()
- common.ZipWriteStr(output, which + ".img", data)
+def HasTrebleEnabled(target_files, target_info):
+ def HasVendorPartition(target_files):
+ if os.path.isdir(target_files):
+ return os.path.isdir(os.path.join(target_files, "VENDOR"))
+ if zipfile.is_zipfile(target_files):
+ return HasPartition(zipfile.ZipFile(target_files), "vendor")
+ raise ValueError("Unknown target_files argument")
-def HasTrebleEnabled(target_files_zip, target_info):
- return (HasVendorPartition(target_files_zip) and
+ return (HasVendorPartition(target_files) and
target_info.GetBuildProp("ro.treble.enabled") == "true")
@@ -645,96 +575,124 @@ def WriteFingerprintAssertion(script, target_info, source_info):
source_info.GetBuildProp("ro.build.thumbprint"))
-def AddCompatibilityArchiveIfTrebleEnabled(target_zip, output_zip, target_info,
- source_info=None):
- """Adds compatibility info into the output zip if it's Treble-enabled target.
+def CheckVintfIfTrebleEnabled(target_files, target_info):
+ """Checks compatibility info of the input target files.
- Metadata used for on-device compatibility verification is retrieved from
- target_zip then added to compatibility.zip which is added to the output_zip
- archive.
+ Metadata used for compatibility verification is retrieved from target_zip.
- Compatibility archive should only be included for devices that have enabled
+ Compatibility should only be checked for devices that have enabled
Treble support.
Args:
- target_zip: Zip file containing the source files to be included for OTA.
- output_zip: Zip file that will be sent for OTA.
+ target_files: Path to zip file containing the source files to be included
+ for OTA. Can also be the path to extracted directory.
target_info: The BuildInfo instance that holds the target build info.
- source_info: The BuildInfo instance that holds the source build info, if
- generating an incremental OTA; None otherwise.
"""
- def AddCompatibilityArchive(system_updated, vendor_updated):
- """Adds compatibility info based on system/vendor update status.
-
- Args:
- system_updated: If True, the system image will be updated and therefore
- its metadata should be included.
- vendor_updated: If True, the vendor image will be updated and therefore
- its metadata should be included.
- """
- # Determine what metadata we need. Files are names relative to META/.
- compatibility_files = []
- vendor_metadata = ("vendor_manifest.xml", "vendor_matrix.xml")
- system_metadata = ("system_manifest.xml", "system_matrix.xml")
- if vendor_updated:
- compatibility_files += vendor_metadata
- if system_updated:
- compatibility_files += system_metadata
-
- # Create new archive.
- compatibility_archive = tempfile.NamedTemporaryFile()
- compatibility_archive_zip = zipfile.ZipFile(
- compatibility_archive, "w", compression=zipfile.ZIP_DEFLATED)
-
- # Add metadata.
- for file_name in compatibility_files:
- target_file_name = "META/" + file_name
-
- if target_file_name in target_zip.namelist():
- data = target_zip.read(target_file_name)
- common.ZipWriteStr(compatibility_archive_zip, file_name, data)
-
- # Ensure files are written before we copy into output_zip.
- compatibility_archive_zip.close()
-
- # Only add the archive if we have any compatibility info.
- if compatibility_archive_zip.namelist():
- common.ZipWrite(output_zip, compatibility_archive.name,
- arcname="compatibility.zip",
- compress_type=zipfile.ZIP_STORED)
-
# Will only proceed if the target has enabled the Treble support (as well as
# having a /vendor partition).
- if not HasTrebleEnabled(target_zip, target_info):
+ if not HasTrebleEnabled(target_files, target_info):
return
- # We don't support OEM thumbprint in Treble world (which calculates
- # fingerprints in a different way as shown in CalculateFingerprint()).
- assert not target_info.oem_props
-
- # Full OTA carries the info for system/vendor both.
- if source_info is None:
- AddCompatibilityArchive(True, True)
+ # Skip adding the compatibility package as a workaround for b/114240221. The
+ # compatibility will always fail on devices without qualified kernels.
+ if OPTIONS.skip_compatibility_check:
return
- assert not source_info.oem_props
-
- source_fp = source_info.fingerprint
- target_fp = target_info.fingerprint
- system_updated = source_fp != target_fp
+ if not check_target_files_vintf.CheckVintf(target_files, target_info):
+ raise RuntimeError("VINTF compatibility check failed")
+
+
+def GetBlockDifferences(target_zip, source_zip, target_info, source_info,
+ device_specific):
+ """Returns a ordered dict of block differences with partition name as key."""
+
+ def GetIncrementalBlockDifferenceForPartition(name):
+ if not HasPartition(source_zip, name):
+ raise RuntimeError("can't generate incremental that adds {}".format(name))
+
+ partition_src = common.GetUserImage(name, OPTIONS.source_tmp, source_zip,
+ info_dict=source_info,
+ allow_shared_blocks=allow_shared_blocks)
+
+ hashtree_info_generator = verity_utils.CreateHashtreeInfoGenerator(
+ name, 4096, target_info)
+ partition_tgt = common.GetUserImage(name, OPTIONS.target_tmp, target_zip,
+ info_dict=target_info,
+ allow_shared_blocks=allow_shared_blocks,
+ hashtree_info_generator=
+ hashtree_info_generator)
+
+ # Check the first block of the source system partition for remount R/W only
+ # if the filesystem is ext4.
+ partition_source_info = source_info["fstab"]["/" + name]
+ check_first_block = partition_source_info.fs_type == "ext4"
+ # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be
+ # in zip formats. However with squashfs, a) all files are compressed in LZ4;
+ # b) the blocks listed in block map may not contain all the bytes for a
+ # given file (because they're rounded to be 4K-aligned).
+ partition_target_info = target_info["fstab"]["/" + name]
+ disable_imgdiff = (partition_source_info.fs_type == "squashfs" or
+ partition_target_info.fs_type == "squashfs")
+ return common.BlockDifference(name, partition_src, partition_tgt,
+ check_first_block,
+ version=blockimgdiff_version,
+ disable_imgdiff=disable_imgdiff)
+
+ if source_zip:
+ # See notes in common.GetUserImage()
+ allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
+ target_info.get('ext4_share_dup_blocks') == "true")
+ blockimgdiff_version = max(
+ int(i) for i in target_info.get(
+ "blockimgdiff_versions", "1").split(","))
+ assert blockimgdiff_version >= 3
+
+ block_diff_dict = collections.OrderedDict()
+ partition_names = ["system", "vendor", "product", "odm", "system_ext"]
+ for partition in partition_names:
+ if not HasPartition(target_zip, partition):
+ continue
+ # Full OTA update.
+ if not source_zip:
+ tgt = common.GetUserImage(partition, OPTIONS.input_tmp, target_zip,
+ info_dict=target_info,
+ reset_file_map=True)
+ block_diff_dict[partition] = common.BlockDifference(partition, tgt,
+ src=None)
+ # Incremental OTA update.
+ else:
+ block_diff_dict[partition] = GetIncrementalBlockDifferenceForPartition(
+ partition)
+ assert "system" in block_diff_dict
+
+ # Get the block diffs from the device specific script. If there is a
+ # duplicate block diff for a partition, ignore the diff in the generic script
+ # and use the one in the device specific script instead.
+ if source_zip:
+ device_specific_diffs = device_specific.IncrementalOTA_GetBlockDifferences()
+ function_name = "IncrementalOTA_GetBlockDifferences"
+ else:
+ device_specific_diffs = device_specific.FullOTA_GetBlockDifferences()
+ function_name = "FullOTA_GetBlockDifferences"
- source_fp_vendor = source_info.GetVendorBuildProp(
- "ro.vendor.build.fingerprint")
- target_fp_vendor = target_info.GetVendorBuildProp(
- "ro.vendor.build.fingerprint")
- vendor_updated = source_fp_vendor != target_fp_vendor
+ if device_specific_diffs:
+ assert all(isinstance(diff, common.BlockDifference)
+ for diff in device_specific_diffs), \
+ "{} is not returning a list of BlockDifference objects".format(
+ function_name)
+ for diff in device_specific_diffs:
+ if diff.partition in block_diff_dict:
+ logger.warning("Duplicate block difference found. Device specific block"
+ " diff for partition '%s' overrides the one in generic"
+ " script.", diff.partition)
+ block_diff_dict[diff.partition] = diff
- AddCompatibilityArchive(system_updated, vendor_updated)
+ return block_diff_dict
def WriteFullOTAPackage(input_zip, output_file):
- target_info = BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
+ target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
# We don't know what version it will be installed on top of. We expect the API
# just won't change very often. Similarly for fstab, it might have changed in
@@ -764,7 +722,7 @@ def WriteFullOTAPackage(input_zip, output_file):
metadata=metadata,
info_dict=OPTIONS.info_dict)
- #assert HasRecoveryPatch(input_zip)
+ assert HasRecoveryPatch(input_zip, info_dict=OPTIONS.info_dict)
# Assertions (e.g. downgrade check, device properties check).
ts = target_info.GetBuildProp("ro.build.date.utc")
@@ -774,6 +732,11 @@ def WriteFullOTAPackage(input_zip, output_file):
target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
device_specific.FullOTA_Assertions()
+ block_diff_dict = GetBlockDifferences(target_zip=input_zip, source_zip=None,
+ target_info=target_info,
+ source_info=None,
+ device_specific=device_specific)
+
# Two-step package strategy (in chronological order, which is *not*
# the order in which the generated script has things):
#
@@ -825,49 +788,39 @@ else if get_stage("%(bcb_dev)s") == "3/3" then
device_specific.FullOTA_InstallBegin()
- system_progress = 0.75
-
+ # All other partitions as well as the data wipe use 10% of the progress, and
+ # the update of the system partition takes the remaining progress.
+ system_progress = 0.9 - (len(block_diff_dict) - 1) * 0.1
if OPTIONS.wipe_user_data:
system_progress -= 0.1
- if HasVendorPartition(input_zip):
- system_progress -= 0.1
-
- script.ShowProgress(system_progress, 0)
-
- # See the notes in WriteBlockIncrementalOTAPackage().
- allow_shared_blocks = target_info.get('ext4_share_dup_blocks') == "true"
+ progress_dict = {partition: 0.1 for partition in block_diff_dict}
+ progress_dict["system"] = system_progress
+
+ if target_info.get('use_dynamic_partitions') == "true":
+ # Use empty source_info_dict to indicate that all partitions / groups must
+ # be re-added.
+ dynamic_partitions_diff = common.DynamicPartitionsDifference(
+ info_dict=OPTIONS.info_dict,
+ block_diffs=block_diff_dict.values(),
+ progress_dict=progress_dict)
+ dynamic_partitions_diff.WriteScript(script, output_zip,
+ write_verify_script=OPTIONS.verify)
+ else:
+ for block_diff in block_diff_dict.values():
+ block_diff.WriteScript(script, output_zip,
+ progress=progress_dict.get(block_diff.partition),
+ write_verify_script=OPTIONS.verify)
- # Full OTA is done as an "incremental" against an empty source image. This
- # has the effect of writing new data from the package to the entire
- # partition, but lets us reuse the updater code that writes incrementals to
- # do it.
- system_tgt = common.GetSparseImage("system", OPTIONS.input_tmp, input_zip,
- allow_shared_blocks)
- system_tgt.ResetFileMap()
- system_diff = common.BlockDifference("system", system_tgt, src=None)
- system_diff.WriteScript(script, output_zip)
+ CheckVintfIfTrebleEnabled(OPTIONS.input_tmp, target_info)
boot_img = common.GetBootableImage(
"boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
-
- if HasVendorPartition(input_zip):
- script.ShowProgress(0.1, 0)
-
- vendor_tgt = common.GetSparseImage("vendor", OPTIONS.input_tmp, input_zip,
- allow_shared_blocks)
- vendor_tgt.ResetFileMap()
- vendor_diff = common.BlockDifference("vendor", vendor_tgt)
- vendor_diff.WriteScript(script, output_zip)
-
- #AddCompatibilityArchiveIfTrebleEnabled(input_zip, output_zip, target_info)
-
common.CheckSize(boot_img.data, "boot.img", target_info)
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
- script.ShowProgress(0.05, 5)
script.WriteRawImage("/boot", "boot.img")
- script.ShowProgress(0.2, 10)
+ script.ShowProgress(0.1, 10)
device_specific.FullOTA_InstallEnd()
if OPTIONS.extra_script is not None:
@@ -879,10 +832,6 @@ else if get_stage("%(bcb_dev)s") == "3/3" then
script.ShowProgress(0.1, 10)
script.FormatPartition("/data")
- script.FormatPartition("/data")
- script.FormatPartition("/metadata")
- script.AppendExtra('wipe_cache();')
-
if OPTIONS.two_step:
script.AppendExtra("""
set_stage("%(bcb_dev)s", "");
@@ -914,10 +863,22 @@ endif;
FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
-def WriteMetadata(metadata, output_zip):
- value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.iteritems())])
- common.ZipWriteStr(output_zip, METADATA_NAME, value,
- compress_type=zipfile.ZIP_STORED)
+def WriteMetadata(metadata, output):
+ """Writes the metadata to the zip archive or a file.
+
+ Args:
+ metadata: The metadata dict for the package.
+ output: A ZipFile object or a string of the output file path.
+ """
+
+ value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.items())])
+ if isinstance(output, zipfile.ZipFile):
+ common.ZipWriteStr(output, METADATA_NAME, value,
+ compress_type=zipfile.ZIP_STORED)
+ return
+
+ with open(output, 'w') as f:
+ f.write(value)
def HandleDowngradeMetadata(metadata, target_info, source_info):
@@ -926,7 +887,7 @@ def HandleDowngradeMetadata(metadata, target_info, source_info):
post_timestamp = target_info.GetBuildProp("ro.build.date.utc")
pre_timestamp = source_info.GetBuildProp("ro.build.date.utc")
- is_downgrade = long(post_timestamp) < long(pre_timestamp)
+ is_downgrade = int(post_timestamp) < int(pre_timestamp)
if OPTIONS.downgrade:
if not is_downgrade:
@@ -957,8 +918,8 @@ def GetPackageMetadata(target_info, source_info=None):
Returns:
A dict to be written into package metadata entry.
"""
- assert isinstance(target_info, BuildInfo)
- assert source_info is None or isinstance(source_info, BuildInfo)
+ assert isinstance(target_info, common.BuildInfo)
+ assert source_info is None or isinstance(source_info, common.BuildInfo)
metadata = {
'post-build' : target_info.fingerprint,
@@ -979,6 +940,9 @@ def GetPackageMetadata(target_info, source_info=None):
if OPTIONS.wipe_user_data:
metadata['ota-wipe'] = 'yes'
+ if OPTIONS.retrofit_dynamic_partitions:
+ metadata['ota-retrofit-dynamic-partitions'] = 'yes'
+
is_incremental = source_info is not None
if is_incremental:
metadata['pre-build'] = source_info.fingerprint
@@ -1048,7 +1012,7 @@ class PropertyFiles(object):
A string with placeholders for the metadata offset/size info, e.g.
"payload.bin:679:343,payload_properties.txt:378:45,metadata: ".
"""
- return self._GetPropertyFilesString(input_zip, reserve_space=True)
+ return self.GetPropertyFilesString(input_zip, reserve_space=True)
class InsufficientSpaceException(Exception):
pass
@@ -1077,7 +1041,7 @@ class PropertyFiles(object):
InsufficientSpaceException: If the reserved length is insufficient to hold
the final string.
"""
- result = self._GetPropertyFilesString(input_zip, reserve_space=False)
+ result = self.GetPropertyFilesString(input_zip, reserve_space=False)
if len(result) > reserved_length:
raise self.InsufficientSpaceException(
'Insufficient reserved space: reserved={}, actual={}'.format(
@@ -1096,17 +1060,29 @@ class PropertyFiles(object):
Raises:
AssertionError: On finding a mismatch.
"""
- actual = self._GetPropertyFilesString(input_zip)
+ actual = self.GetPropertyFilesString(input_zip)
assert actual == expected, \
"Mismatching streaming metadata: {} vs {}.".format(actual, expected)
- def _GetPropertyFilesString(self, zip_file, reserve_space=False):
- """Constructs the property-files string per request."""
+ def GetPropertyFilesString(self, zip_file, reserve_space=False):
+ """
+ Constructs the property-files string per request.
+
+ Args:
+ zip_file: The input ZIP file.
+ reserved_length: The reserved length of the property-files string.
+
+ Returns:
+ A property-files string including the metadata offset/size info, e.g.
+ "payload.bin:679:343,payload_properties.txt:378:45,metadata: ".
+ """
def ComputeEntryOffsetSize(name):
"""Computes the zip entry offset and size."""
info = zip_file.getinfo(name)
- offset = info.header_offset + len(info.FileHeader())
+ offset = info.header_offset
+ offset += zipfile.sizeFileHeader
+ offset += len(info.extra) + len(info.filename)
size = info.file_size
return '%s:%d:%d' % (os.path.basename(name), offset, size)
@@ -1161,7 +1137,8 @@ class StreamingPropertyFiles(PropertyFiles):
'payload_properties.txt',
)
self.optional = (
- # care_map.txt is available only if dm-verity is enabled.
+ # care_map is available only if dm-verity is enabled.
+ 'care_map.pb',
'care_map.txt',
# compatibility.zip is available only if target supports Treble.
'compatibility.zip',
@@ -1230,10 +1207,12 @@ class AbOtaPropertyFiles(StreamingPropertyFiles):
payload, till the end of 'medatada_signature_message'.
"""
payload_info = input_zip.getinfo('payload.bin')
- payload_offset = payload_info.header_offset + len(payload_info.FileHeader())
+ payload_offset = payload_info.header_offset
+ payload_offset += zipfile.sizeFileHeader
+ payload_offset += len(payload_info.extra) + len(payload_info.filename)
payload_size = payload_info.file_size
- with input_zip.open('payload.bin', 'r') as payload_fp:
+ with input_zip.open('payload.bin') as payload_fp:
header_bin = payload_fp.read(24)
# network byte order (big-endian)
@@ -1346,16 +1325,21 @@ def FinalizeMetadata(metadata, input_file, output_file, needed_property_files):
for property_files in needed_property_files:
property_files.Verify(output_zip, metadata[property_files.name].strip())
+ # If requested, dump the metadata to a separate file.
+ output_metadata_path = OPTIONS.output_metadata_path
+ if output_metadata_path:
+ WriteMetadata(metadata, output_metadata_path)
+
def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
- target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
- source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
+ target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
+ source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
target_api_version = target_info["recovery_api_version"]
source_api_version = source_info["recovery_api_version"]
if source_api_version == 0:
- print("WARNING: generating edify script for a source that "
- "can't install it.")
+ logger.warning(
+ "Generating edify script for a source that can't install it.")
script = edify_generator.EdifyGenerator(
source_api_version, target_info, fstab=source_info["fstab"])
@@ -1377,8 +1361,10 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
source_version=source_api_version,
+ source_tmp=OPTIONS.source_tmp,
target_zip=target_zip,
target_version=target_api_version,
+ target_tmp=OPTIONS.target_tmp,
output_zip=output_zip,
script=script,
metadata=metadata,
@@ -1394,59 +1380,13 @@ def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_file):
target_recovery = common.GetBootableImage(
"/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
- # When target uses 'BOARD_EXT4_SHARE_DUP_BLOCKS := true', images may contain
- # shared blocks (i.e. some blocks will show up in multiple files' block
- # list). We can only allocate such shared blocks to the first "owner", and
- # disable imgdiff for all later occurrences.
- allow_shared_blocks = (source_info.get('ext4_share_dup_blocks') == "true" or
- target_info.get('ext4_share_dup_blocks') == "true")
- system_src = common.GetSparseImage("system", OPTIONS.source_tmp, source_zip,
- allow_shared_blocks)
- system_tgt = common.GetSparseImage("system", OPTIONS.target_tmp, target_zip,
- allow_shared_blocks)
-
- blockimgdiff_version = max(
- int(i) for i in target_info.get("blockimgdiff_versions", "1").split(","))
- assert blockimgdiff_version >= 3
-
- # Check the first block of the source system partition for remount R/W only
- # if the filesystem is ext4.
- system_src_partition = source_info["fstab"]["/system"]
- check_first_block = system_src_partition.fs_type == "ext4"
- # Disable using imgdiff for squashfs. 'imgdiff -z' expects input files to be
- # in zip formats. However with squashfs, a) all files are compressed in LZ4;
- # b) the blocks listed in block map may not contain all the bytes for a given
- # file (because they're rounded to be 4K-aligned).
- system_tgt_partition = target_info["fstab"]["/system"]
- disable_imgdiff = (system_src_partition.fs_type == "squashfs" or
- system_tgt_partition.fs_type == "squashfs")
- system_diff = common.BlockDifference("system", system_tgt, system_src,
- check_first_block,
- version=blockimgdiff_version,
- disable_imgdiff=disable_imgdiff)
-
- if HasVendorPartition(target_zip):
- if not HasVendorPartition(source_zip):
- raise RuntimeError("can't generate incremental that adds /vendor")
- vendor_src = common.GetSparseImage("vendor", OPTIONS.source_tmp, source_zip,
- allow_shared_blocks)
- vendor_tgt = common.GetSparseImage("vendor", OPTIONS.target_tmp, target_zip,
- allow_shared_blocks)
-
- # Check first block of vendor partition for remount R/W only if
- # disk type is ext4
- vendor_partition = source_info["fstab"]["/vendor"]
- check_first_block = vendor_partition.fs_type == "ext4"
- disable_imgdiff = vendor_partition.fs_type == "squashfs"
- vendor_diff = common.BlockDifference("vendor", vendor_tgt, vendor_src,
- check_first_block,
- version=blockimgdiff_version,
- disable_imgdiff=disable_imgdiff)
- else:
- vendor_diff = None
+ block_diff_dict = GetBlockDifferences(target_zip=target_zip,
+ source_zip=source_zip,
+ target_info=target_info,
+ source_info=source_info,
+ device_specific=device_specific)
- #AddCompatibilityArchiveIfTrebleEnabled(
- #target_zip, output_zip, target_info, source_info)
+ CheckVintfIfTrebleEnabled(OPTIONS.target_tmp, target_info)
# Assertions (e.g. device properties check).
target_info.WriteDeviceAssertions(script, OPTIONS.oem_no_mount)
@@ -1510,12 +1450,8 @@ else if get_stage("%(bcb_dev)s") != "3/3" then
WriteFingerprintAssertion(script, target_info, source_info)
# Check the required cache size (i.e. stashed blocks).
- size = []
- if system_diff:
- size.append(system_diff.required_cache)
- if vendor_diff:
- size.append(vendor_diff.required_cache)
-
+ required_cache_sizes = [diff.required_cache for diff in
+ block_diff_dict.values()]
if updating_boot:
boot_type, boot_device = common.GetTypeAndDevice("/boot", source_info)
d = common.Difference(target_boot, source_boot)
@@ -1526,19 +1462,26 @@ else if get_stage("%(bcb_dev)s") != "3/3" then
else:
include_full_boot = False
- print("boot target: %d source: %d diff: %d" % (
- target_boot.size, source_boot.size, len(d)))
+ logger.info(
+ "boot target: %d source: %d diff: %d", target_boot.size,
+ source_boot.size, len(d))
+
+ common.ZipWriteStr(output_zip, "boot.img.p", d)
- common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
+ script.PatchPartitionCheck(
+ "{}:{}:{}:{}".format(
+ boot_type, boot_device, target_boot.size, target_boot.sha1),
+ "{}:{}:{}:{}".format(
+ boot_type, boot_device, source_boot.size, source_boot.sha1))
- script.PatchCheck("%s:%s:%d:%s:%d:%s" %
- (boot_type, boot_device,
- source_boot.size, source_boot.sha1,
- target_boot.size, target_boot.sha1))
- size.append(target_boot.size)
+ required_cache_sizes.append(target_boot.size)
- if size:
- script.CacheFreeSpaceCheck(max(size))
+ if required_cache_sizes:
+ script.CacheFreeSpaceCheck(max(required_cache_sizes))
+
+ # Verify the existing partitions.
+ for diff in block_diff_dict.values():
+ diff.WriteVerifyScript(script, touched_blocks_only=True)
device_specific.IncrementalOTA_VerifyEnd()
@@ -1555,48 +1498,56 @@ else
# Stage 3/3: Make changes.
script.Comment("Stage 3/3")
- # Verify the existing partitions.
- system_diff.WriteVerifyScript(script, touched_blocks_only=True)
- if vendor_diff:
- vendor_diff.WriteVerifyScript(script, touched_blocks_only=True)
-
script.Comment("---- start making changes here ----")
device_specific.IncrementalOTA_InstallBegin()
- system_diff.WriteScript(script, output_zip,
- progress=0.8 if vendor_diff else 0.9)
+ progress_dict = {partition: 0.1 for partition in block_diff_dict}
+ progress_dict["system"] = 1 - len(block_diff_dict) * 0.1
- if vendor_diff:
- vendor_diff.WriteScript(script, output_zip, progress=0.1)
+ if OPTIONS.source_info_dict.get("use_dynamic_partitions") == "true":
+ if OPTIONS.target_info_dict.get("use_dynamic_partitions") != "true":
+ raise RuntimeError(
+ "can't generate incremental that disables dynamic partitions")
+ dynamic_partitions_diff = common.DynamicPartitionsDifference(
+ info_dict=OPTIONS.target_info_dict,
+ source_info_dict=OPTIONS.source_info_dict,
+ block_diffs=block_diff_dict.values(),
+ progress_dict=progress_dict)
+ dynamic_partitions_diff.WriteScript(
+ script, output_zip, write_verify_script=OPTIONS.verify)
+ else:
+ for block_diff in block_diff_dict.values():
+ block_diff.WriteScript(script, output_zip,
+ progress=progress_dict.get(block_diff.partition),
+ write_verify_script=OPTIONS.verify)
if OPTIONS.two_step:
common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
script.WriteRawImage("/boot", "boot.img")
- print("writing full boot image (forced by two-step mode)")
+ logger.info("writing full boot image (forced by two-step mode)")
if not OPTIONS.two_step:
if updating_boot:
if include_full_boot:
- print("boot image changed; including full.")
+ logger.info("boot image changed; including full.")
script.Print("Installing boot image...")
script.WriteRawImage("/boot", "boot.img")
else:
# Produce the boot image by applying a patch to the current
# contents of the boot partition, and write it back to the
# partition.
- print("boot image changed; including patch.")
+ logger.info("boot image changed; including patch.")
script.Print("Patching boot image...")
script.ShowProgress(0.1, 10)
- script.ApplyPatch("%s:%s:%d:%s:%d:%s"
- % (boot_type, boot_device,
- source_boot.size, source_boot.sha1,
- target_boot.size, target_boot.sha1),
- "-",
- target_boot.size, target_boot.sha1,
- source_boot.sha1, "patch/boot.img.p")
+ script.PatchPartition(
+ '{}:{}:{}:{}'.format(
+ boot_type, boot_device, target_boot.size, target_boot.sha1),
+ '{}:{}:{}:{}'.format(
+ boot_type, boot_device, source_boot.size, source_boot.sha1),
+ 'boot.img.p')
else:
- print("boot image unchanged; skipping.")
+ logger.info("boot image unchanged; skipping.")
# Do device-specific installation (eg, write radio image).
device_specific.IncrementalOTA_InstallEnd()
@@ -1654,13 +1605,54 @@ def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False):
Returns:
The filename of the target-files.zip for generating secondary payload.
"""
+
+ def GetInfoForSecondaryImages(info_file):
+ """Updates info file for secondary payload generation.
+
+ Scan each line in the info file, and remove the unwanted partitions from
+ the dynamic partition list in the related properties. e.g.
+ "super_google_dynamic_partitions_partition_list=system vendor product"
+ will become "super_google_dynamic_partitions_partition_list=system".
+
+ Args:
+ info_file: The input info file. e.g. misc_info.txt.
+
+ Returns:
+ A string of the updated info content.
+ """
+
+ output_list = []
+ with open(info_file) as f:
+ lines = f.read().splitlines()
+
+ # The suffix in partition_list variables that follows the name of the
+ # partition group.
+ LIST_SUFFIX = 'partition_list'
+ for line in lines:
+ if line.startswith('#') or '=' not in line:
+ output_list.append(line)
+ continue
+ key, value = line.strip().split('=', 1)
+ if key == 'dynamic_partition_list' or key.endswith(LIST_SUFFIX):
+ partitions = value.split()
+ partitions = [partition for partition in partitions if partition
+ not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
+ output_list.append('{}={}'.format(key, ' '.join(partitions)))
+ elif key == 'virtual_ab' or key == "virtual_ab_retrofit":
+ # Remove virtual_ab flag from secondary payload so that OTA client
+ # don't use snapshots for secondary update
+ pass
+ else:
+ output_list.append(line)
+ return '\n'.join(output_list)
+
target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)
- input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
with zipfile.ZipFile(input_file, 'r') as input_zip:
infolist = input_zip.infolist()
+ input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
for info in infolist:
unzipped_file = os.path.join(input_tmp, *info.filename.split('/'))
if info.filename == 'IMAGES/system_other.img':
@@ -1671,12 +1663,33 @@ def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False):
'IMAGES/system.map'):
pass
+ # Copy images that are not in SECONDARY_PAYLOAD_SKIPPED_IMAGES.
+ elif info.filename.startswith(('IMAGES/', 'RADIO/')):
+ image_name = os.path.basename(info.filename)
+ if image_name not in ['{}.img'.format(partition) for partition in
+ SECONDARY_PAYLOAD_SKIPPED_IMAGES]:
+ common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
+
# Skip copying the postinstall config if requested.
elif skip_postinstall and info.filename == POSTINSTALL_CONFIG:
pass
- elif info.filename.startswith(('META/', 'IMAGES/')):
- common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
+ elif info.filename.startswith('META/'):
+ # Remove the unnecessary partitions for secondary images from the
+ # ab_partitions file.
+ if info.filename == AB_PARTITIONS:
+ with open(unzipped_file) as f:
+ partition_list = f.read().splitlines()
+ partition_list = [partition for partition in partition_list if partition
+ and partition not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
+ common.ZipWriteStr(target_zip, info.filename, '\n'.join(partition_list))
+ # Remove the unnecessary partitions from the dynamic partitions list.
+ elif (info.filename == 'META/misc_info.txt' or
+ info.filename == DYNAMIC_PARTITION_INFO):
+ modified_info = GetInfoForSecondaryImages(unzipped_file)
+ common.ZipWriteStr(target_zip, info.filename, modified_info)
+ else:
+ common.ZipWrite(target_zip, unzipped_file, arcname=info.filename)
common.ZipClose(target_zip)
@@ -1708,8 +1721,91 @@ def GetTargetFilesZipWithoutPostinstallConfig(input_file):
return target_file
-def WriteABOTAPackageWithBrilloScript(target_file, output_file,
- source_file=None):
+def GetTargetFilesZipForRetrofitDynamicPartitions(input_file,
+ super_block_devices,
+ dynamic_partition_list):
+ """Returns a target-files.zip for retrofitting dynamic partitions.
+
+ This allows brillo_update_payload to generate an OTA based on the exact
+ bits on the block devices. Postinstall is disabled.
+
+ Args:
+ input_file: The input target-files.zip filename.
+ super_block_devices: The list of super block devices
+ dynamic_partition_list: The list of dynamic partitions
+
+ Returns:
+ The filename of target-files.zip with *.img replaced with super_*.img for
+ each block device in super_block_devices.
+ """
+ assert super_block_devices, "No super_block_devices are specified."
+
+ replace = {'OTA/super_{}.img'.format(dev): 'IMAGES/{}.img'.format(dev)
+ for dev in super_block_devices}
+
+ target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
+ shutil.copyfile(input_file, target_file)
+
+ with zipfile.ZipFile(input_file) as input_zip:
+ namelist = input_zip.namelist()
+
+ input_tmp = common.UnzipTemp(input_file, RETROFIT_DAP_UNZIP_PATTERN)
+
+ # Remove partitions from META/ab_partitions.txt that is in
+ # dynamic_partition_list but not in super_block_devices so that
+ # brillo_update_payload won't generate update for those logical partitions.
+ ab_partitions_file = os.path.join(input_tmp, *AB_PARTITIONS.split('/'))
+ with open(ab_partitions_file) as f:
+ ab_partitions_lines = f.readlines()
+ ab_partitions = [line.strip() for line in ab_partitions_lines]
+ # Assert that all super_block_devices are in ab_partitions
+ super_device_not_updated = [partition for partition in super_block_devices
+ if partition not in ab_partitions]
+ assert not super_device_not_updated, \
+ "{} is in super_block_devices but not in {}".format(
+ super_device_not_updated, AB_PARTITIONS)
+ # ab_partitions -= (dynamic_partition_list - super_block_devices)
+ new_ab_partitions = common.MakeTempFile(prefix="ab_partitions", suffix=".txt")
+ with open(new_ab_partitions, 'w') as f:
+ for partition in ab_partitions:
+ if (partition in dynamic_partition_list and
+ partition not in super_block_devices):
+ logger.info("Dropping %s from ab_partitions.txt", partition)
+ continue
+ f.write(partition + "\n")
+ to_delete = [AB_PARTITIONS]
+
+ # Always skip postinstall for a retrofit update.
+ to_delete += [POSTINSTALL_CONFIG]
+
+ # Delete dynamic_partitions_info.txt so that brillo_update_payload thinks this
+ # is a regular update on devices without dynamic partitions support.
+ to_delete += [DYNAMIC_PARTITION_INFO]
+
+ # Remove the existing partition images as well as the map files.
+ to_delete += list(replace.values())
+ to_delete += ['IMAGES/{}.map'.format(dev) for dev in super_block_devices]
+
+ common.ZipDelete(target_file, to_delete)
+
+ target_zip = zipfile.ZipFile(target_file, 'a', allowZip64=True)
+
+ # Write super_{foo}.img as {foo}.img.
+ for src, dst in replace.items():
+ assert src in namelist, \
+ 'Missing {} in {}; {} cannot be written'.format(src, input_file, dst)
+ unzipped_file = os.path.join(input_tmp, *src.split('/'))
+ common.ZipWrite(target_zip, unzipped_file, arcname=dst)
+
+ # Write new ab_partitions.txt file
+ common.ZipWrite(target_zip, new_ab_partitions, arcname=AB_PARTITIONS)
+
+ common.ZipClose(target_zip)
+
+ return target_file
+
+
+def GenerateAbOtaPackage(target_file, output_file, source_file=None):
"""Generates an Android OTA package that has A/B update payload."""
# Stage the output zip package for package signing.
if not OPTIONS.no_signing:
@@ -1720,16 +1816,20 @@ def WriteABOTAPackageWithBrilloScript(target_file, output_file,
compression=zipfile.ZIP_DEFLATED)
if source_file is not None:
- target_info = BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
- source_info = BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
+ target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
+ source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
else:
- target_info = BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
+ target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
source_info = None
# Metadata to comply with Android OTA package format.
metadata = GetPackageMetadata(target_info, source_info)
- if OPTIONS.skip_postinstall:
+ if OPTIONS.retrofit_dynamic_partitions:
+ target_file = GetTargetFilesZipForRetrofitDynamicPartitions(
+ target_file, target_info.get("super_block_devices").strip().split(),
+ target_info.get("dynamic_partition_list").strip().split())
+ elif OPTIONS.skip_postinstall:
target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file)
# Generate payload.
@@ -1769,22 +1869,24 @@ def WriteABOTAPackageWithBrilloScript(target_file, output_file,
target_zip = zipfile.ZipFile(target_file, "r")
if (target_info.get("verity") == "true" or
target_info.get("avb_enable") == "true"):
- care_map_path = "META/care_map.txt"
- namelist = target_zip.namelist()
- if care_map_path in namelist:
- care_map_data = target_zip.read(care_map_path)
- # In order to support streaming, care_map.txt needs to be packed as
+ care_map_list = [x for x in ["care_map.pb", "care_map.txt"] if
+ "META/" + x in target_zip.namelist()]
+
+ # Adds care_map if either the protobuf format or the plain text one exists.
+ if care_map_list:
+ care_map_name = care_map_list[0]
+ care_map_data = target_zip.read("META/" + care_map_name)
+ # In order to support streaming, care_map needs to be packed as
# ZIP_STORED.
- common.ZipWriteStr(output_zip, "care_map.txt", care_map_data,
+ common.ZipWriteStr(output_zip, care_map_name, care_map_data,
compress_type=zipfile.ZIP_STORED)
else:
- print("Warning: cannot find care map file in target_file package")
-
- AddCompatibilityArchiveIfTrebleEnabled(
- target_zip, output_zip, target_info, source_info)
+ logger.warning("Cannot find care map file in target_file package")
common.ZipClose(target_zip)
+ CheckVintfIfTrebleEnabled(target_file, target_info)
+
# We haven't written the metadata entry yet, which will be handled in
# FinalizeMetadata().
common.ZipClose(output_zip)
@@ -1800,6 +1902,66 @@ def WriteABOTAPackageWithBrilloScript(target_file, output_file,
FinalizeMetadata(metadata, staging_file, output_file, needed_property_files)
+def GenerateNonAbOtaPackage(target_file, output_file, source_file=None):
+ """Generates a non-A/B OTA package."""
+ # Sanity check the loaded info dicts first.
+ if OPTIONS.info_dict.get("no_recovery") == "true":
+ raise common.ExternalError(
+ "--- target build has specified no recovery ---")
+
+ # Non-A/B OTAs rely on /cache partition to store temporary files.
+ cache_size = OPTIONS.info_dict.get("cache_size")
+ if cache_size is None:
+ logger.warning("--- can't determine the cache partition size ---")
+ OPTIONS.cache_size = cache_size
+
+ if OPTIONS.extra_script is not None:
+ with open(OPTIONS.extra_script) as fp:
+ OPTIONS.extra_script = fp.read()
+
+ if OPTIONS.extracted_input is not None:
+ OPTIONS.input_tmp = OPTIONS.extracted_input
+ else:
+ logger.info("unzipping target target-files...")
+ OPTIONS.input_tmp = common.UnzipTemp(target_file, UNZIP_PATTERN)
+ OPTIONS.target_tmp = OPTIONS.input_tmp
+
+ # If the caller explicitly specified the device-specific extensions path via
+ # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
+ # is present in the target target_files. Otherwise, take the path of the file
+ # from 'tool_extensions' in the info dict and look for that in the local
+ # filesystem, relative to the current directory.
+ if OPTIONS.device_specific is None:
+ from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
+ if os.path.exists(from_input):
+ logger.info("(using device-specific extensions from target_files)")
+ OPTIONS.device_specific = from_input
+ else:
+ OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
+
+ if OPTIONS.device_specific is not None:
+ OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
+
+ # Generate a full OTA.
+ if source_file is None:
+ with zipfile.ZipFile(target_file) as input_zip:
+ WriteFullOTAPackage(
+ input_zip,
+ output_file)
+
+ # Generate an incremental OTA.
+ else:
+ logger.info("unzipping source target-files...")
+ OPTIONS.source_tmp = common.UnzipTemp(
+ OPTIONS.incremental_source, UNZIP_PATTERN)
+ with zipfile.ZipFile(target_file) as input_zip, \
+ zipfile.ZipFile(source_file) as source_zip:
+ WriteBlockIncrementalOTAPackage(
+ input_zip,
+ source_zip,
+ output_file)
+
+
def main(argv):
def option_handler(o, a):
@@ -1854,10 +2016,25 @@ def main(argv):
OPTIONS.payload_signer = a
elif o == "--payload_signer_args":
OPTIONS.payload_signer_args = shlex.split(a)
+ elif o == "--payload_signer_maximum_signature_size":
+ OPTIONS.payload_signer_maximum_signature_size = a
+ elif o == "--payload_signer_key_size":
+ # TODO(Xunchang) remove this option after cleaning up the callers.
+ logger.warning("The option '--payload_signer_key_size' is deprecated."
+ " Use '--payload_signer_maximum_signature_size' instead.")
+ OPTIONS.payload_signer_maximum_signature_size = a
elif o == "--extracted_input_target_files":
OPTIONS.extracted_input = a
elif o == "--skip_postinstall":
OPTIONS.skip_postinstall = True
+ elif o == "--retrofit_dynamic_partitions":
+ OPTIONS.retrofit_dynamic_partitions = True
+ elif o == "--skip_compatibility_check":
+ OPTIONS.skip_compatibility_check = True
+ elif o == "--output_metadata_path":
+ OPTIONS.output_metadata_path = a
+ elif o == "--disable_fec_computation":
+ OPTIONS.disable_fec_computation = True
else:
return False
return True
@@ -1886,14 +2063,22 @@ def main(argv):
"log_diff=",
"payload_signer=",
"payload_signer_args=",
+ "payload_signer_maximum_signature_size=",
+ "payload_signer_key_size=",
"extracted_input_target_files=",
"skip_postinstall",
+ "retrofit_dynamic_partitions",
+ "skip_compatibility_check",
+ "output_metadata_path=",
+ "disable_fec_computation",
], extra_option_handler=option_handler)
if len(args) != 2:
common.Usage(__doc__)
sys.exit(1)
+ common.InitLogging()
+
if OPTIONS.downgrade:
# We should only allow downgrading incrementals (as opposed to full).
# Otherwise the device may go back from arbitrary build with this full
@@ -1914,9 +2099,8 @@ def main(argv):
with zipfile.ZipFile(args[0], 'r') as input_zip:
OPTIONS.info_dict = common.LoadInfoDict(input_zip)
- if OPTIONS.verbose:
- print("--- target info ---")
- common.DumpInfoDict(OPTIONS.info_dict)
+ logger.info("--- target info ---")
+ common.DumpInfoDict(OPTIONS.info_dict)
# Load the source build dict if applicable.
if OPTIONS.incremental_source is not None:
@@ -1924,13 +2108,29 @@ def main(argv):
with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
- if OPTIONS.verbose:
- print("--- source info ---")
- common.DumpInfoDict(OPTIONS.source_info_dict)
+ logger.info("--- source info ---")
+ common.DumpInfoDict(OPTIONS.source_info_dict)
# Load OEM dicts if provided.
OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
+ # Assume retrofitting dynamic partitions when base build does not set
+ # use_dynamic_partitions but target build does.
+ if (OPTIONS.source_info_dict and
+ OPTIONS.source_info_dict.get("use_dynamic_partitions") != "true" and
+ OPTIONS.target_info_dict.get("use_dynamic_partitions") == "true"):
+ if OPTIONS.target_info_dict.get("dynamic_partition_retrofit") != "true":
+ raise common.ExternalError(
+ "Expect to generate incremental OTA for retrofitting dynamic "
+ "partitions, but dynamic_partition_retrofit is not set in target "
+ "build.")
+ logger.info("Implicitly generating retrofit incremental OTA.")
+ OPTIONS.retrofit_dynamic_partitions = True
+
+ # Skip postinstall for retrofitting dynamic partitions.
+ if OPTIONS.retrofit_dynamic_partitions:
+ OPTIONS.skip_postinstall = True
+
ab_update = OPTIONS.info_dict.get("ab_update") == "true"
# Use the default key to sign the package if not specified with package_key.
@@ -1940,90 +2140,44 @@ def main(argv):
if OPTIONS.package_key is None:
OPTIONS.package_key = OPTIONS.info_dict.get(
"default_system_dev_certificate",
- "build/target/product/security/testkey")
+ "build/make/target/product/security/testkey")
# Get signing keys
OPTIONS.key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
if ab_update:
- WriteABOTAPackageWithBrilloScript(
+ GenerateAbOtaPackage(
target_file=args[0],
output_file=args[1],
source_file=OPTIONS.incremental_source)
- print("done.")
- return
-
- # Sanity check the loaded info dicts first.
- if OPTIONS.info_dict.get("no_recovery") == "true":
- raise common.ExternalError(
- "--- target build has specified no recovery ---")
-
- # Non-A/B OTAs rely on /cache partition to store temporary files.
- cache_size = OPTIONS.info_dict.get("cache_size")
- if cache_size is None:
- print("--- can't determine the cache partition size ---")
- OPTIONS.cache_size = cache_size
-
- if OPTIONS.extra_script is not None:
- OPTIONS.extra_script = open(OPTIONS.extra_script).read()
-
- if OPTIONS.extracted_input is not None:
- OPTIONS.input_tmp = OPTIONS.extracted_input
else:
- print("unzipping target target-files...")
- OPTIONS.input_tmp = common.UnzipTemp(args[0], UNZIP_PATTERN)
- OPTIONS.target_tmp = OPTIONS.input_tmp
-
- # If the caller explicitly specified the device-specific extensions path via
- # -s / --device_specific, use that. Otherwise, use META/releasetools.py if it
- # is present in the target target_files. Otherwise, take the path of the file
- # from 'tool_extensions' in the info dict and look for that in the local
- # filesystem, relative to the current directory.
- if OPTIONS.device_specific is None:
- from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
- if os.path.exists(from_input):
- print("(using device-specific extensions from target_files)")
- OPTIONS.device_specific = from_input
- else:
- OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions")
-
- if OPTIONS.device_specific is not None:
- OPTIONS.device_specific = os.path.abspath(OPTIONS.device_specific)
-
- # Generate a full OTA.
- if OPTIONS.incremental_source is None:
- with zipfile.ZipFile(args[0], 'r') as input_zip:
- WriteFullOTAPackage(
- input_zip,
- output_file=args[1])
+ GenerateNonAbOtaPackage(
+ target_file=args[0],
+ output_file=args[1],
+ source_file=OPTIONS.incremental_source)
- # Generate an incremental OTA.
- else:
- print("unzipping source target-files...")
- OPTIONS.source_tmp = common.UnzipTemp(
- OPTIONS.incremental_source, UNZIP_PATTERN)
- with zipfile.ZipFile(args[0], 'r') as input_zip, \
- zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
- WriteBlockIncrementalOTAPackage(
- input_zip,
- source_zip,
- output_file=args[1])
+ # Post OTA generation works.
+ if OPTIONS.incremental_source is not None and OPTIONS.log_diff:
+ logger.info("Generating diff logs...")
+ logger.info("Unzipping target-files for diffing...")
+ target_dir = common.UnzipTemp(args[0], TARGET_DIFFING_UNZIP_PATTERN)
+ source_dir = common.UnzipTemp(
+ OPTIONS.incremental_source, TARGET_DIFFING_UNZIP_PATTERN)
- if OPTIONS.log_diff:
- with open(OPTIONS.log_diff, 'w') as out_file:
- import target_files_diff
- target_files_diff.recursiveDiff(
- '', OPTIONS.source_tmp, OPTIONS.input_tmp, out_file)
+ with open(OPTIONS.log_diff, 'w') as out_file:
+ import target_files_diff
+ target_files_diff.recursiveDiff(
+ '', source_dir, target_dir, out_file)
- print("done.")
+ logger.info("done.")
if __name__ == '__main__':
try:
common.CloseInheritedPipes()
main(sys.argv[1:])
- except common.ExternalError as e:
- print("\n ERROR: %s\n" % (e,))
+ except common.ExternalError:
+ logger.exception("\n ERROR:\n")
sys.exit(1)
finally:
common.Cleanup()
diff --git a/products/mbox/g12a/recovery/init.recovery.amlogic.rc b/products/mbox/g12a/recovery/init.recovery.amlogic.rc
index e84b4c2..6d4b3dc 100644
--- a/products/mbox/g12a/recovery/init.recovery.amlogic.rc
+++ b/products/mbox/g12a/recovery/init.recovery.amlogic.rc
@@ -37,7 +37,7 @@ on boot
setprop sys.usb.configfs 1
setprop sys.usb.controller ${sys.usb.controller}
-service console /sbin/sh
+service console /system/bin/sh
seclabel u:r:shell:s0
console
diff --git a/products/mbox/gxl/recovery/init.recovery.amlogic.rc b/products/mbox/gxl/recovery/init.recovery.amlogic.rc
index e84b4c2..6d4b3dc 100755..100644
--- a/products/mbox/gxl/recovery/init.recovery.amlogic.rc
+++ b/products/mbox/gxl/recovery/init.recovery.amlogic.rc
@@ -37,7 +37,7 @@ on boot
setprop sys.usb.configfs 1
setprop sys.usb.controller ${sys.usb.controller}
-service console /sbin/sh
+service console /system/bin/sh
seclabel u:r:shell:s0
console
diff --git a/products/mbox/sm1/recovery/init.recovery.amlogic.rc b/products/mbox/sm1/recovery/init.recovery.amlogic.rc
index e84b4c2..6d4b3dc 100644
--- a/products/mbox/sm1/recovery/init.recovery.amlogic.rc
+++ b/products/mbox/sm1/recovery/init.recovery.amlogic.rc
@@ -37,7 +37,7 @@ on boot
setprop sys.usb.configfs 1
setprop sys.usb.controller ${sys.usb.controller}
-service console /sbin/sh
+service console /system/bin/sh
seclabel u:r:shell:s0
console
diff --git a/products/tv/tl1/recovery/init.recovery.amlogic.rc b/products/tv/tl1/recovery/init.recovery.amlogic.rc
index fe3c083..5b3918c 100755..100644
--- a/products/tv/tl1/recovery/init.recovery.amlogic.rc
+++ b/products/tv/tl1/recovery/init.recovery.amlogic.rc
@@ -36,7 +36,7 @@ on boot
setprop sys.usb.configfs 1
setprop sys.usb.controller ${sys.usb.controller}
-service console /sbin/sh
+service console /system/bin/sh
seclabel u:r:shell:s0
console
diff --git a/products/tv/tm2/recovery/init.recovery.amlogic.rc b/products/tv/tm2/recovery/init.recovery.amlogic.rc
index e84b4c2..6d4b3dc 100755..100644
--- a/products/tv/tm2/recovery/init.recovery.amlogic.rc
+++ b/products/tv/tm2/recovery/init.recovery.amlogic.rc
@@ -37,7 +37,7 @@ on boot
setprop sys.usb.configfs 1
setprop sys.usb.controller ${sys.usb.controller}
-service console /sbin/sh
+service console /system/bin/sh
seclabel u:r:shell:s0
console
diff --git a/releasetools.py b/releasetools.py
index 3921e3a..6d0da2f 100755
--- a/releasetools.py
+++ b/releasetools.py
@@ -183,8 +183,7 @@ def FullOTA_InstallBegin(info):
else:
SetBootloaderEnv(info.script, "upgrade_step", "3")
info.script.FormatPartition("/metadata")
- ZipOtherImage("super_empty", OPTIONS.input_tmp, info.output_zip)
- info.script.AppendExtra('package_extract_file("super_empty.img", "/dev/block/super");')
+ info.script.AppendExtra('delete_file("/cache/recovery/dynamic_partition_metadata.UPDATED");')
def FullOTA_InstallEnd(info):
print "amlogic extensions:FullOTA_InstallEnd"