A/B system<一>

2018/07/31 a/bsystem

A/B system<一>

1. 开发前准备

1.1 开发前了解

update_engine

  • 代码位置:system/update_engine
  • 作用:整个 Android 系统中的更新流程都放在这里了
  • 在哪里调用?
service update_engine /system/bin/update_engine --logtostderr --foreground
    class late_start
    user root
    group root system wakelock inet cache
    writepid /dev/cpuset/system-background/tasks

boot_contorl

  • 代码位置(针对高通):hardware/libhardware/include/hardware/boot_control.h 中定义接口,高通代码在 hardware/qcom/bootctrl
  • 作用:类似传统 OTA ,只是用于给系统读取该启动哪个分区。三种状态,包括 active,successful,以及bootable

update_verifier

  • 代码位置: bootable/recovery/update_verifier

payload.bin 相关

  • WriteABOTAPackageWithBrilloScript -> Payload -> payload.Generate -> scripts/brillo_update_payload -> delta_generator ->payload_generator/generate_delta_main.cc
  • Makefile 里面调用 ota_from_target_files 这部分和传统 OTA 类似
$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(OTATOOLS) \
        build/tools/releasetools/ota_from_target_files
    @echo "Package OTA: $@"
    $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
       ./build/tools/releasetools/ota_from_target_files -v \
       --block \
       --extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
       -p $(HOST_OUT) \
       -k $(KEY_CERT_PAIR) \
       $(if $(OEM_OTA_CONFIG), -o $(OEM_OTA_CONFIG)) \
       $(BUILT_TARGET_FILES_PACKAGE) $@

.PHONY: otapackage
otapackage: $(INTERNAL_OTA_PACKAGE_TARGET)

  • opts, args = getopt.getopt 进行解析,注意 getopt 的返回参数,opts 返回的所以定义过的参数,而 args 则是不含 ‘-‘ ‘–’ 的参数,所以args[0] 获得了 target 包。
  • WriteABOTAPackageWithBrilloScript 调用命令 brillo_update_payload generate --payload /tmp/payload-XcxV8_.bin --target_image out/target/product/sdm845/obj/PACKAGING/target_files_intermediates/sdm845-target_files-1531877690.zip --max_timestamp 1531877948
  • brillo_update_payload
#!/bin/bash



# Exit codes:
EX_UNSUPPORTED_DELTA=100

warn() {
  echo "brillo_update_payload: warning: $*" >&2
}

die() {
  echo "brillo_update_payload: error: $*" >&2
  exit 1
}

#shflags 工具,代码路径在 external/shflags/ 用于解析命令行参数的
# Loads shflags. We first look at the default install location; then look for
# crosutils (chroot); finally check our own directory (au-generator zipfile).
load_shflags() {
  local my_dir="$(dirname "$(readlink -f "$0")")"
  echo "my dir = $my_dir"
  local path
  for path in /usr/share/misc {/usr/lib/crosutils,"${my_dir}"}/lib/shflags; do
    if [[ -r "${path}/shflags" ]]; then
      . "${path}/shflags" || die "Could not load ${path}/shflags."
      return
    fi
  done
  die "Could not find shflags."
}
# 获取shflags 位置
load_shflags

HELP_GENERATE="generate: Generate an unsigned update payload."
HELP_HASH="hash: Generate the hashes of the unsigned payload and metadata used \
for signing."
HELP_SIGN="sign: Insert the signatures into the unsigned payload."
HELP_PROPERTIES="properties: Extract payload properties to a file."

usage() {
  echo "Supported commands:"
  echo
  echo "${HELP_GENERATE}"
  echo "${HELP_HASH}"
  echo "${HELP_SIGN}"
  echo "${HELP_PROPERTIES}"
  echo
  echo "Use: \"$0 <command> --help\" for more options."
}
# $# 获取参数个数 -lt 小于(less)
# Check that a command is specified.
if [[ $# -lt 1 ]]; then
  echo "Please specify a command [generate|hash|sign|properties]"
  exit 1
fi
#获取命令行第一个参数 (a:- b 若a不为空则为a,否则为b)
# Parse command.
COMMAND="${1:-}"
#左移参数,用于分析之后的参数
shift

case "${COMMAND}" in
  generate)
    FLAGS_HELP="${HELP_GENERATE}"
    ;;

  hash)
    FLAGS_HELP="${HELP_HASH}"
    ;;

  sign)
    FLAGS_HELP="${HELP_SIGN}"
    ;;

  properties)
    FLAGS_HELP="${HELP_PROPERTIES}"
    ;;
  *)
    echo "Unrecognized command: \"${COMMAND}\"" >&2
    usage >&2
    exit 1
    ;;
esac

# Flags
FLAGS_HELP="Usage: $0 ${COMMAND} [flags]
${FLAGS_HELP}"

if [[ "${COMMAND}" == "generate" ]]; then
# DEFINE_string 来自 shflags ,命令格式:DEFINE_string [-- 参数名] [默认值] [-h 打印的帮助内容] [- 缩略参数名]
  DEFINE_string payload "" \
    "Path to output the generated unsigned payload file."
  DEFINE_string target_image "" \
    "Path to the target image that should be sent to clients."
  DEFINE_string source_image "" \
    "Optional: Path to a source image. If specified, this makes a delta update."
  DEFINE_string metadata_size_file "" \
    "Optional: Path to output metadata size."
  DEFINE_string max_timestamp "" \
    "Optional: The maximum unix timestamp of the OS allowed to apply this \
payload, should be set to a number higher than the build timestamp of the \
system running on the device, 0 if not specified."
fi
if [[ "${COMMAND}" == "hash" || "${COMMAND}" == "sign" ]]; then
  DEFINE_string unsigned_payload "" "Path to the input unsigned payload."
  DEFINE_string signature_size "" \
    "Signature sizes in bytes in the following format: size1:size2[:...]"
fi
if [[ "${COMMAND}" == "hash" ]]; then
  DEFINE_string metadata_hash_file "" \
    "Optional: Path to output metadata hash file."
  DEFINE_string payload_hash_file "" \
    "Optional: Path to output payload hash file."
fi
if [[ "${COMMAND}" == "sign" ]]; then
  DEFINE_string payload "" \
    "Path to output the generated unsigned payload file."
  DEFINE_string metadata_signature_file "" \
    "The metatada signatures in the following format: \
metadata_signature1:metadata_signature2[:...]"
  DEFINE_string payload_signature_file "" \
    "The payload signatures in the following format: \
payload_signature1:payload_signature2[:...]"
  DEFINE_string metadata_size_file "" \
    "Optional: Path to output metadata size."
fi
if [[ "${COMMAND}" == "properties" ]]; then
  DEFINE_string payload "" \
    "Path to the input signed or unsigned payload file."
  DEFINE_string properties_file "-" \
    "Path to output the extracted property files. If '-' is passed stdout will \
be used."
fi

DEFINE_string work_dir "${TMPDIR:-/tmp}" "Where to dump temporary files."

# $@ 返回所有参数,加 "" 会将每个参数加上 “” 后返回,然后调用 FLAGS 解析
# Parse command line flag arguments
FLAGS "$@" || exit 1
eval set -- "${FLAGS_ARGV}"
set -e

# Override the TMPDIR with the passed work_dir flags, which anyway defaults to
# ${TMPDIR}.
# 将 tmpdir 添加到环境变量中。注:所有使用了DEFINE_string定义的变量,在之后若要使用 shell 标准调用时,参数前面都会加上前缀FLAGS_
TMPDIR="${FLAGS_work_dir}"
export TMPDIR

# declare -A 申明关联数组
# Associative arrays from partition name to file in the source and target
# images. The size of the updated area must be the size of the file.
declare -A SRC_PARTITIONS
declare -A DST_PARTITIONS

# Associative arrays for the .map files associated with each src/dst partition
# file in SRC_PARTITIONS and DST_PARTITIONS.
declare -A SRC_PARTITIONS_MAP
declare -A DST_PARTITIONS_MAP

# List of partition names in order.
declare -a PARTITIONS_ORDER

# A list of temporary files to remove during cleanup.
CLEANUP_FILES=()

# Global options to force the version of the payload.
FORCE_MAJOR_VERSION=""
FORCE_MINOR_VERSION=""

# Path to the postinstall config file in target image if exists.
POSTINSTALL_CONFIG_FILE=""

# The fingerprint of zlib in the source image.
ZLIB_FINGERPRINT=""

# read_option_int <file.txt> <option_key> [default_value]
#
# Reads the unsigned integer value associated with |option_key| in a key=value
# file |file.txt|. Prints the read value if found and valid, otherwise prints
# the |default_value|.
read_option_uint() {
  local file_txt="$1"
  local option_key="$2"
  local default_value="${3:-}"
  local value
  if value=$(look "${option_key}=" "${file_txt}" | tail -n 1); then
    if value=$(echo "${value}" | cut -f 2- -d "=" | grep -E "^[0-9]+$"); then
      echo "${value}"
      return
    fi
  fi
  echo "${default_value}"
}
#截取文件
# truncate_file <file_path> <file_size>
#
# Truncate the given |file_path| to |file_size| using perl.
# The truncate binary might not be available.
truncate_file() {
  local file_path="$1"
  local file_size="$2"
  perl -e "open(FILE, \"+<\", \$ARGV[0]); \
           truncate(FILE, ${file_size}); \
           close(FILE);" "${file_path}"
}

# Create a temporary file in the work_dir with an optional pattern name.
# Prints the name of the newly created file.
create_tempfile() {
  local pattern="${1:-tempfile.XXXXXX}"
  mktemp --tmpdir="${FLAGS_work_dir}" "${pattern}"
}

cleanup() {
  local err=""
  rm -f "${CLEANUP_FILES[@]}" || err=1

  # If we are cleaning up after an error, or if we got an error during
  # cleanup (even if we eventually succeeded) return a non-zero exit
  # code. This triggers additional logging in most environments that call
  # this script.
  if [[ -n "${err}" ]]; then
    die "Cleanup encountered an error."
  fi
}

cleanup_on_error() {
  trap - INT TERM ERR EXIT
  cleanup
  die "Cleanup success after an error."
}

cleanup_on_exit() {
  trap - INT TERM ERR EXIT
  cleanup
}
# trap命令用于指定在接收到信号后将要采取的动作,常见的用途是在脚本程序被中断时完成清理工作。
# 如果接收到 INT TERM ERR 信号,则执行 cleanup_on_error 函数
trap cleanup_on_error INT TERM ERR
trap cleanup_on_exit EXIT


# extract_image <image> <partitions_array> [partitions_order]
#
# Detect the format of the |image| file and extract its updatable partitions
# into new temporary files. Add the list of partition names and its files to the
# associative array passed in |partitions_array|. If |partitions_order| is
# passed, set it to list of partition names in order.
extract_image() {
  local image="$1"

  # Brillo images are zip files. We detect the 4-byte magic header of the zip
  # file.
  # 检查 target 包格式
  local magic=$(head --bytes=4 "${image}" | hexdump -e '1/1 "%.2x"')
  if [[ "${magic}" == "504b0304" ]]; then
    echo "Detected .zip file, extracting Brillo image."
    # 针对 Android 设备的 OTA 会跑这里
    extract_image_brillo "$@"
    return
  fi

  # Chrome OS images are GPT partitioned disks. We should have the cgpt binary
  # bundled here and we will use it to extract the partitions, so the GPT
  # headers must be valid.
  if cgpt show -q -n "${image}" >/dev/null; then
    echo "Detected GPT image, extracting Chrome OS image."
    extract_image_cros "$@"
    return
  fi

  die "Couldn't detect the image format of ${image}"
}

# extract_image_cros <image.bin> <partitions_array> [partitions_order]
#
# Extract Chromium OS recovery images into new temporary files.
extract_image_cros() {
  local image="$1"
  local partitions_array="$2"
  local partitions_order="${3:-}"

  local kernel root
  kernel=$(create_tempfile "kernel.bin.XXXXXX")
  CLEANUP_FILES+=("${kernel}")
  root=$(create_tempfile "root.bin.XXXXXX")
  CLEANUP_FILES+=("${root}")

  cros_generate_update_payload --extract \
    --image "${image}" \
    --kern_path "${kernel}" --root_path "${root}" \
    --work_dir "${FLAGS_work_dir}" --outside_chroot

  # Chrome OS uses major_version 1 payloads for all versions, even if the
  # updater supports a newer major version.
  FORCE_MAJOR_VERSION="1"

  if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
    # Copy from zlib_fingerprint in source image to stdout.
    ZLIB_FINGERPRINT=$(e2cp "${root}":/etc/zlib_fingerprint -)
  fi

  # When generating legacy Chrome OS images, we need to use "boot" and "system"
  # for the partition names to be compatible with updating Brillo devices with
  # Chrome OS images.
  eval ${partitions_array}[boot]=\""${kernel}"\"
  eval ${partitions_array}[system]=\""${root}"\"

  if [[ -n "${partitions_order}" ]]; then
    eval "${partitions_order}=( \"system\" \"boot\" )"
  fi

  local part varname
  for part in boot system; do
    varname="${partitions_array}[${part}]"
    printf "md5sum of %s: " "${varname}"
    md5sum "${!varname}"
  done
}

# extract_image_brillo <target_files.zip> <partitions_array> [partitions_order]
#
# Extract the A/B updated partitions from a Brillo target_files zip file into
# new temporary files.
extract_image_brillo() {
  local image="$1"
  local partitions_array="$2"
  local partitions_order="${3:-}"

  local partitions=( "boot" "system" )
  local ab_partitions_list
  ab_partitions_list=$(create_tempfile "ab_partitions_list.XXXXXX")
  CLEANUP_FILES+=("${ab_partitions_list}")

  if unzip -p "${image}" "META/ab_partitions.txt" >"${ab_partitions_list}"; then
    if grep -v -E '^[a-zA-Z0-9_-]*$' "${ab_partitions_list}" >&2; then
      die "Invalid partition names found in the partition list."
    fi
    partitions=($(cat "${ab_partitions_list}"))
    if [[ ${#partitions[@]} -eq 0 ]]; then
      die "The list of partitions is empty. Can't generate a payload."
    fi
  else
    warn "No ab_partitions.txt found. Using default."
  fi
  # 从 BoardConfig.mk 获取 AB_OTA_PARTITIONS 输出到 META/ab_partitions.txt,若不存在则使用默认
  echo "List of A/B partitions: ${partitions[@]}"

  if [[ -n "${partitions_order}" ]]; then
    eval "${partitions_order}=(${partitions[@]})"
  fi

  # All Brillo updaters support major version 2.
  FORCE_MAJOR_VERSION="2"

  if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
    # Source image 增量包走这
    # META/update_engine_config.txt 文件 ,该文件里面保存了PAYLOAD_MAJOR_VERSION PAYLOAD_MINOR_VERSION
    local ue_config=$(create_tempfile "ue_config.XXXXXX")
    CLEANUP_FILES+=("${ue_config}")
    if ! unzip -p "${image}" "META/update_engine_config.txt" \
        >"${ue_config}"; then
      warn "No update_engine_config.txt found. Assuming pre-release image, \
using payload minor version 2"
    fi
    # For delta payloads, we use the major and minor version supported by the
    # old updater.
    FORCE_MINOR_VERSION=$(read_option_uint "${ue_config}" \
      "PAYLOAD_MINOR_VERSION" 2)
    FORCE_MAJOR_VERSION=$(read_option_uint "${ue_config}" \
      "PAYLOAD_MAJOR_VERSION" 2)

    # Brillo support for deltas started with minor version 3.
    if [[ "${FORCE_MINOR_VERSION}" -le 2 ]]; then
      warn "No delta support from minor version ${FORCE_MINOR_VERSION}. \
Disabling deltas for this source version."
      exit ${EX_UNSUPPORTED_DELTA}
    fi

    if [[ "${FORCE_MINOR_VERSION}" -ge 4 ]]; then
      ZLIB_FINGERPRINT=$(unzip -p "${image}" "META/zlib_fingerprint.txt")
    fi
  else
    # Target image 整包走这
    local postinstall_config=$(create_tempfile "postinstall_config.XXXXXX")
    CLEANUP_FILES+=("${postinstall_config}")
    if unzip -p "${image}" "META/postinstall_config.txt" \
        >"${postinstall_config}"; then
      POSTINSTALL_CONFIG_FILE="${postinstall_config}"
    fi
  fi

  local part part_file temp_raw filesize
  # 遍历 partitions ,对于需要 a/b 更新的,需要从 target 包中提取
  for part in "${partitions[@]}"; do
    part_file=$(create_tempfile "${part}.img.XXXXXX")
    CLEANUP_FILES+=("${part_file}")
    unzip -p "${image}" "IMAGES/${part}.img" >"${part_file}"


    # If the partition is stored as an Android sparse image file, we need to
    # convert them to a raw image for the update.
    # 调用 simg2img unsparse 镜像
    local magic=$(head --bytes=4 "${part_file}" | hexdump -e '1/1 "%.2x"')
    if [[ "${magic}" == "3aff26ed" ]]; then
      temp_raw=$(create_tempfile "${part}.raw.XXXXXX")
      CLEANUP_FILES+=("${temp_raw}")
      echo "Converting Android sparse image ${part}.img to RAW."
      simg2img "${part_file}" "${temp_raw}"
      # At this point, we can drop the contents of the old part_file file, but
      # we can't delete the file because it will be deleted in cleanup.
      true >"${part_file}"
      part_file="${temp_raw}"
    fi
	# 提取 .map 文件
    # Extract the .map file (if one is available).
    part_map_file=$(create_tempfile "${part}.map.XXXXXX")
    CLEANUP_FILES+=("${part_map_file}")
    unzip -p "${image}" "IMAGES/${part}.map" >"${part_map_file}" || \
      part_map_file=""
	
    # 增量升级  target 包需要是 4kb 的倍数,所以使用填充 0 ,旧镜像进行切割
    # delta_generator only supports images multiple of 4 KiB. For target images
    # we pad the data with zeros if needed, but for source images we truncate
    # down the data since the last block of the old image could be padded on
    # disk with unknown data.
    filesize=$(stat -c%s "${part_file}")
    if [[ $(( filesize % 4096 )) -ne 0 ]]; then
      if [[ "${partitions_array}" == "SRC_PARTITIONS" ]]; then
        echo "Rounding DOWN partition ${part}.img to a multiple of 4 KiB."
        : $(( filesize = filesize & -4096 ))
        if [[ ${filesize} == 0 ]]; then
          echo "Source partition ${part}.img is empty after rounding down," \
            "skipping."
          continue
        fi
      else
        echo "Rounding UP partition ${part}.img to a multiple of 4 KiB."
        : $(( filesize = (filesize + 4095) & -4096 ))
      fi
      truncate_file "${part_file}" "${filesize}"
    fi

    eval "${partitions_array}[\"${part}\"]=\"${part_file}\""
    eval "${partitions_array}_MAP[\"${part}\"]=\"${part_map_file}\""
    echo "Extracted ${partitions_array}[${part}]: ${filesize} bytes"
  done
}
# 检查 generate 参数命令是否正确 FLAGS_payload 在FLAGS "$@"中已经进行了解析
# --payload FILENAME --target_image FILENAME 是必须的
validate_generate() {
  [[ -n "${FLAGS_payload}" ]] ||
    die "You must specify an output filename with --payload FILENAME"

  [[ -n "${FLAGS_target_image}" ]] ||
    die "You must specify a target image with --target_image FILENAME"
}

cmd_generate() {
# local 定义局部变量
  local payload_type="delta"
# 检查 source_image 是否为空 ,为空则是整包,不为空则是增量
  if [[ -z "${FLAGS_source_image}" ]]; then
    payload_type="full"
  fi

  echo "Extracting images for ${payload_type} update."

  extract_image "${FLAGS_target_image}" DST_PARTITIONS PARTITIONS_ORDER
  if [[ "${payload_type}" == "delta" ]]; then
    extract_image "${FLAGS_source_image}" SRC_PARTITIONS
  fi
# 从这里开始进行 payload.bin 生成
  echo "Generating ${payload_type} update."
  # Common payload args:
  # payload.bin
  GENERATOR_ARGS=( -out_file="${FLAGS_payload}" )

  local part old_partitions="" new_partitions="" partition_names=""
  local old_mapfiles="" new_mapfiles=""
  for part in "${PARTITIONS_ORDER[@]}"; do
    if [[ -n "${partition_names}" ]]; then
      partition_names+=":"
      new_partitions+=":"
      old_partitions+=":"
      new_mapfiles+=":"
      old_mapfiles+=":"
    fi
    partition_names+="${part}"
    new_partitions+="${DST_PARTITIONS[${part}]}"
    old_partitions+="${SRC_PARTITIONS[${part}]:-}"
    new_mapfiles+="${DST_PARTITIONS_MAP[${part}]:-}"
    old_mapfiles+="${SRC_PARTITIONS_MAP[${part}]:-}"
  done

  # Target image args:
  GENERATOR_ARGS+=(
    -partition_names="${partition_names}"
    -new_partitions="${new_partitions}"
    -new_mapfiles="${new_mapfiles}"
  )
# 解析相关参数 这些参数拿来干嘛的可以先不用管,之后的流程分析再说
  if [[ "${payload_type}" == "delta" ]]; then
    # Source image args:
    GENERATOR_ARGS+=(
      -old_partitions="${old_partitions}"
      -old_mapfiles="${old_mapfiles}"
    )
    if [[ -n "${FORCE_MINOR_VERSION}" ]]; then
      GENERATOR_ARGS+=( --minor_version="${FORCE_MINOR_VERSION}" )
    fi
    if [[ -n "${ZLIB_FINGERPRINT}" ]]; then
      GENERATOR_ARGS+=( --zlib_fingerprint="${ZLIB_FINGERPRINT}" )
    fi
  fi

  if [[ -n "${FORCE_MAJOR_VERSION}" ]]; then
    GENERATOR_ARGS+=( --major_version="${FORCE_MAJOR_VERSION}" )
  fi

  if [[ -n "${FLAGS_metadata_size_file}" ]]; then
    GENERATOR_ARGS+=( --out_metadata_size_file="${FLAGS_metadata_size_file}" )
  fi

  if [[ -n "${FLAGS_max_timestamp}" ]]; then
    GENERATOR_ARGS+=( --max_timestamp="${FLAGS_max_timestamp}" )
  fi

  if [[ -n "${POSTINSTALL_CONFIG_FILE}" ]]; then
    GENERATOR_ARGS+=(
      --new_postinstall_config_file="${POSTINSTALL_CONFIG_FILE}"
    )
  fi
# 这里会调用 delta_generator 进行之后的 payload.bin 的生成
  echo "Running delta_generator with args: ${GENERATOR_ARGS[@]}"
  "${GENERATOR}" "${GENERATOR_ARGS[@]}"

  echo "Done generating ${payload_type} update."
}

validate_hash() {
  [[ -n "${FLAGS_signature_size}" ]] ||
    die "You must specify signature size with --signature_size SIZES"

  [[ -n "${FLAGS_unsigned_payload}" ]] ||
    die "You must specify the input unsigned payload with \
--unsigned_payload FILENAME"

  [[ -n "${FLAGS_payload_hash_file}" ]] ||
    die "You must specify --payload_hash_file FILENAME"

  [[ -n "${FLAGS_metadata_hash_file}" ]] ||
    die "You must specify --metadata_hash_file FILENAME"
}

cmd_hash() {
  "${GENERATOR}" \
      -in_file="${FLAGS_unsigned_payload}" \
      -signature_size="${FLAGS_signature_size}" \
      -out_hash_file="${FLAGS_payload_hash_file}" \
      -out_metadata_hash_file="${FLAGS_metadata_hash_file}"

  echo "Done generating hash."
}

validate_sign() {
  [[ -n "${FLAGS_signature_size}" ]] ||
    die "You must specify signature size with --signature_size SIZES"

  [[ -n "${FLAGS_unsigned_payload}" ]] ||
    die "You must specify the input unsigned payload with \
--unsigned_payload FILENAME"

  [[ -n "${FLAGS_payload}" ]] ||
    die "You must specify the output signed payload with --payload FILENAME"

  [[ -n "${FLAGS_payload_signature_file}" ]] ||
    die "You must specify the payload signature file with \
--payload_signature_file SIGNATURES"

  [[ -n "${FLAGS_metadata_signature_file}" ]] ||
    die "You must specify the metadata signature file with \
--metadata_signature_file SIGNATURES"
}

cmd_sign() {
  GENERATOR_ARGS=(
    -in_file="${FLAGS_unsigned_payload}"
    -signature_size="${FLAGS_signature_size}"
    -signature_file="${FLAGS_payload_signature_file}"
    -metadata_signature_file="${FLAGS_metadata_signature_file}"
    -out_file="${FLAGS_payload}"
  )

  if [[ -n "${FLAGS_metadata_size_file}" ]]; then
    GENERATOR_ARGS+=( --out_metadata_size_file="${FLAGS_metadata_size_file}" )
  fi

  "${GENERATOR}" "${GENERATOR_ARGS[@]}"
  echo "Done signing payload."
}

validate_properties() {
  [[ -n "${FLAGS_payload}" ]] ||
    die "You must specify the payload file with --payload FILENAME"

  [[ -n "${FLAGS_properties_file}" ]] ||
    die "You must specify a non empty --properties_file FILENAME"
}

cmd_properties() {
  "${GENERATOR}" \
      -in_file="${FLAGS_payload}" \
      -properties_file="${FLAGS_properties_file}"
}
#获取 delta_generator 命令,代码在 system/update_engine/payload_generator/generate_delta_main.cc
# Sanity check that the real generator exists:
GENERATOR="$(which delta_generator || true)"
[[ -x "${GENERATOR}" ]] || die "can't find delta_generator"

case "$COMMAND" in
  generate) validate_generate
            cmd_generate
            ;;
  hash) validate_hash
        cmd_hash
        ;;
  sign) validate_sign
        cmd_sign
        ;;
  properties) validate_properties
              cmd_properties
              ;;
esac

小结:脚本里面主要是对 Python 脚本中传进来的参数进行第一步的处理,解压 target 包中的镜像文件。真正处理的是在之后的 c++ 程序当中,整个过程主要做了以下事情:

  • shflags 解析命令行参数,FLAGS_payload 存放临时的 payload_xxx.bin ,FLAGS_target_image 存放 target 包路径,用于之后的提取
  • cmd_generate 解压 target 包。这个过程,读取 META/ab_partitions.txt 文件,哪些镜像进行解压的,都是根据这个文件进行的。然后从 target 包里面的 IMAGES/ 中的相关镜像进行解压。DST_PARTITIONS 是一个关联数组,存放的是镜像名和镜像中的关系。PARTITIONS_ORDER 存放升级的顺序文件(根据 map 文件进行升级)

整包 payload.bin 生成分析

上一步经过处理的,再调用 delta_generator,这里直接给出生成整包的命令 -out_file=/tmp/payload-XcxV8_.bin -partition_names=boot:system -new_partitions=/tmp/boot.img.OtvKW3:/tmp/system.raw.f4Zj4E -new_mapfiles=:/tmp/system.map.rx8TfX --major_version=2 --max_timestamp=1531877948

  • system/update_engine/payload_generator/generate_delta_main.cc

关键类: - OperationsGenerator(旧镜像升级到新镜像的流程控制在这个函数中)

  • payload.bin 生成方式在 GenerateUpdatePayloadFile 函数中 待续…

Search

    Table of Contents