armbian-next: artifacts: firmware/rootfs and actual usage of artifacts during image build

- artifacts: introduce `ARTIFACT_IGNORE_CACHE=yes`
- artifacts: introduce `DONT_BUILD_ARTIFACTS`, list of artifacts that if not found cached, fail the build
- kernel_package_source() is no more
- a long dissertation about kernels, families, and the universe
- artifacts: actually use rootfs artifact for image build
- artifacts: detangle via artifact_base_dir
- artifacts: rootfs: use folders in artifact_name; include cache_type
- artifacts: some cleanups / validations
- rootfs artifact; drop old rootfs cli
- artifacts: new CLI shortcuts; remove old firmware CLI
- artifacts: full firmware & usage
- use firmware artifacts in image build and install its debs
- kernel artifact sans legacy; use tmpdir for .deb target for all packages
- legacy artifact versions is no more; pack/unpack now in common obtain;
- artifacts: uboot: cleanup legacy renaming, use artifact version directly
- artifacts: add firmware (small) artifact
- deploy uboot to loop from artifact; allow tty to artifact; todos for cleaning
- fixes, kernel dtb/headers conditional; remove `.git` from Makefile url; use mapfile for finding files to hash
  - completely remove KERNEL_HAS_WORKING_HEADERS_FULL_SOURCE and `kernel_package_callback_linux_headers_full_source()`
  - don't use realpath for artifact_file_relative
  - curb some warnings
  - fix: only install headers & dtbs if such exist
- kernel .config hook modification hash versioning
- OCI_TARGET_BASE vs per-artifact defaults; only deploy to remote from CLI with OTB
- artifact revolver & installing into image
  - add artifact_map_packages and artifact_map_debs dicts
  - revolver accumulates all info
  - REPOSITORY_INSTALL is no more (for uboot/kernel, later others)
  - rename `VER` to `IMAGE_INSTALLED_KERNEL_VERSION`
This commit is contained in:
Ricardo Pardini
2023-02-03 15:36:28 +01:00
parent 8900eea0af
commit 9bffa5e749
41 changed files with 1267 additions and 1079 deletions

View File

@@ -15,7 +15,7 @@ function extension_prepare_config__prepare_flash_kernel() {
export BOOTSIZE=0 # No separate /boot, flash-kernel will "flash" the kernel+initrd to the firmware part. export BOOTSIZE=0 # No separate /boot, flash-kernel will "flash" the kernel+initrd to the firmware part.
export UEFI_MOUNT_POINT="/boot/firmware" # mount uefi partition at /boot/firmware export UEFI_MOUNT_POINT="/boot/firmware" # mount uefi partition at /boot/firmware
export CLOUD_INIT_CONFIG_LOCATION="/boot/firmware" # use /boot/firmware for cloud-init as well export CLOUD_INIT_CONFIG_LOCATION="/boot/firmware" # use /boot/firmware for cloud-init as well
export VER="${FK__PUBLISHED_KERNEL_VERSION}" # For the VERSION export IMAGE_INSTALLED_KERNEL_VERSION="${FK__PUBLISHED_KERNEL_VERSION}" # For the VERSION
export EXTRA_BSP_NAME="${EXTRA_BSP_NAME}-fk${FK__PUBLISHED_KERNEL_VERSION}" # Unique bsp name. export EXTRA_BSP_NAME="${EXTRA_BSP_NAME}-fk${FK__PUBLISHED_KERNEL_VERSION}" # Unique bsp name.
} }

View File

@@ -58,7 +58,7 @@ function extension_prepare_config__prepare_grub_standard() {
fi fi
if [[ "${DISTRO_GENERIC_KERNEL}" == "yes" ]]; then if [[ "${DISTRO_GENERIC_KERNEL}" == "yes" ]]; then
export VER="${DISTRO_KERNEL_VER}" export IMAGE_INSTALLED_KERNEL_VERSION="${DISTRO_KERNEL_VER}"
unset KERNELSOURCE # This should make Armbian skip most stuff. At least, I hacked it to. unset KERNELSOURCE # This should make Armbian skip most stuff. At least, I hacked it to.
export INSTALL_ARMBIAN_FIRMWARE=no # Should skip build and install of Armbian-firmware. export INSTALL_ARMBIAN_FIRMWARE=no # Should skip build and install of Armbian-firmware.
else else
@@ -96,8 +96,8 @@ pre_umount_final_image__install_grub() {
if [[ "${UEFI_GRUB}" == "skip" ]]; then if [[ "${UEFI_GRUB}" == "skip" ]]; then
display_alert "Skipping GRUB install" "due to UEFI_GRUB:${UEFI_GRUB}" "debug" display_alert "Skipping GRUB install" "due to UEFI_GRUB:${UEFI_GRUB}" "debug"
if [[ "${DISTRO_GENERIC_KERNEL}" == "yes" ]]; then if [[ "${DISTRO_GENERIC_KERNEL}" == "yes" ]]; then
display_alert "Skipping GRUB install" "due to UEFI_GRUB:${UEFI_GRUB} - calling update_initramfs directly with VER=${DISTRO_KERNEL_VER}" "debug" display_alert "Skipping GRUB install" "due to UEFI_GRUB:${UEFI_GRUB} - calling update_initramfs directly with IMAGE_INSTALLED_KERNEL_VERSION=${DISTRO_KERNEL_VER}" "debug"
VER="${DISTRO_KERNEL_VER}" update_initramfs "${MOUNT}" IMAGE_INSTALLED_KERNEL_VERSION="${DISTRO_KERNEL_VER}" update_initramfs "${MOUNT}"
fi fi
return 0 return 0
fi fi
@@ -119,8 +119,8 @@ pre_umount_final_image__install_grub() {
cp "${SRC}"/packages/blobs/splash/grub.png "${MOUNT}"/usr/share/images/grub/wallpaper.png cp "${SRC}"/packages/blobs/splash/grub.png "${MOUNT}"/usr/share/images/grub/wallpaper.png
if [[ "${DISTRO_GENERIC_KERNEL}" == "yes" ]]; then if [[ "${DISTRO_GENERIC_KERNEL}" == "yes" ]]; then
display_alert "Using Distro Generic Kernel" "${EXTENSION}: update_initramfs with VER: ${DISTRO_KERNEL_VER}" "debug" display_alert "Using Distro Generic Kernel" "${EXTENSION}: update_initramfs with IMAGE_INSTALLED_KERNEL_VERSION: ${DISTRO_KERNEL_VER}" "debug"
VER="${DISTRO_KERNEL_VER}" update_initramfs "${MOUNT}" IMAGE_INSTALLED_KERNEL_VERSION="${DISTRO_KERNEL_VER}" update_initramfs "${MOUNT}"
fi fi
# Mount the chroot... # Mount the chroot...

View File

@@ -12,10 +12,13 @@ function extension_prepare_config__prepare_localmodconfig() {
} }
# This needs much more love than this. can be used to make "light" versions of kernels, that compile 3x-5x faster or more # This needs much more love than this. can be used to make "light" versions of kernels, that compile 3x-5x faster or more
function custom_kernel_config_post_defconfig__apply_localmodconfig() { function custom_kernel_config__apply_localmodconfig() {
if [[ -f "${lsmod_file}" ]]; then if [[ -f "${lsmod_file}" ]]; then
display_alert "${EXTENSION}: running localmodconfig on Kernel tree" "${LSMOD}" "warn" kernel_config_modifying_hashes+=("$(cat "${lsmod_file}")")
run_kernel_make "LSMOD=${lsmod_file}" localmodconfig "> /dev/null" # quoted redirect to hide output even from logfile, it's way too long. stderr still shows if [[ -f .config ]]; then
display_alert "${EXTENSION}: running localmodconfig on Kernel tree" "${LSMOD}" "warn"
run_kernel_make "LSMOD=${lsmod_file}" localmodconfig "> /dev/null" # quoted redirect to hide output even from logfile, it's way too long. stderr still shows
fi
else else
display_alert "${EXTENSION}: lsmod file disappeared?" "${lsmod_file}" "err" display_alert "${EXTENSION}: lsmod file disappeared?" "${lsmod_file}" "err"
return 1 # exit with an error; this is not what the user expected return 1 # exit with an error; this is not what the user expected

View File

@@ -3,6 +3,8 @@ function extension_prepare_config__prepare_localmodconfig() {
} }
# This produces non-working kernels. It's meant for testing kernel image build and packaging. # This produces non-working kernels. It's meant for testing kernel image build and packaging.
function custom_kernel_config_post_defconfig__apply_mod2noconfig() { function custom_kernel_config__apply_mod2noconfig() {
run_kernel_make mod2noconfig kernel_config_modifying_hashes+=("mod2noconfig")
[[ -f .config ]] && run_kernel_make mod2noconfig
return 0 # short-circuit above
} }

View File

@@ -0,0 +1,87 @@
function artifact_firmware_prepare_version() {
artifact_version="undetermined" # outer scope
artifact_version_reason="undetermined" # outer scope
local ARMBIAN_FIRMWARE_SOURCE="${ARMBIAN_FIRMWARE_GIT_SOURCE:-"https://github.com/armbian/firmware"}"
local ARMBIAN_FIRMWARE_BRANCH="branch:${ARMBIAN_FIRMWARE_GIT_BRANCH:-"master"}"
debug_var ARMBIAN_FIRMWARE_SOURCE
debug_var ARMBIAN_FIRMWARE_BRANCH
declare short_hash_size=4
declare -A GIT_INFO=([GIT_SOURCE]="${ARMBIAN_FIRMWARE_SOURCE}" [GIT_REF]="${ARMBIAN_FIRMWARE_BRANCH}")
run_memoized GIT_INFO "git2info" memoized_git_ref_to_info
debug_dict GIT_INFO
declare fake_unchanging_base_version="1"
declare short_sha1="${GIT_INFO[SHA1]:0:${short_hash_size}}"
# get the hashes of the lib/ bash sources involved...
declare hash_files="undetermined"
calculate_hash_for_files "${SRC}"/lib/functions/compilation/packages/firmware-deb.sh "${SRC}"/lib/functions/artifacts/artifact-firmware.sh
declare bash_hash="${hash_files}"
declare bash_hash_short="${bash_hash:0:${short_hash_size}}"
# outer scope
artifact_version="${fake_unchanging_base_version}-SA${short_sha1}-B${bash_hash_short}"
declare -a reasons=(
"Armbian firmware git revision \"${GIT_INFO[SHA1]}\""
"framework bash hash \"${bash_hash}\""
)
artifact_version_reason="${reasons[*]}" # outer scope
artifact_map_packages=(
["armbian-firmware"]="armbian-firmware"
)
artifact_map_debs=(
["armbian-firmware"]="armbian-firmware_${artifact_version}_all.deb"
)
artifact_name="armbian-firmware"
artifact_type="deb"
artifact_base_dir="${DEB_STORAGE}"
artifact_final_file="${DEB_STORAGE}/armbian-firmware_${artifact_version}_all.deb"
return 0
}
function artifact_firmware_build_from_sources() {
FULL="" REPLACE="-full" LOG_SECTION="compile_firmware" do_with_logging compile_firmware
}
function artifact_firmware_cli_adapter_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function artifact_firmware_cli_adapter_config_prep() {
declare KERNEL_ONLY="yes" # @TODO: this is a hack, for the board/family code's benefit...
use_board="no" prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
}
function artifact_firmware_get_default_oci_target() {
artifact_oci_target_base="ghcr.io/rpardini/armbian-release/"
}
function artifact_firmware_is_available_in_local_cache() {
is_artifact_available_in_local_cache
}
function artifact_firmware_is_available_in_remote_cache() {
is_artifact_available_in_remote_cache
}
function artifact_firmware_obtain_from_remote_cache() {
obtain_artifact_from_remote_cache
}
function artifact_firmware_deploy_to_remote_cache() {
upload_artifact_to_oci
}

View File

@@ -0,0 +1,94 @@
function artifact_full_firmware_prepare_version() {
artifact_version="undetermined" # outer scope
artifact_version_reason="undetermined" # outer scope
local ARMBIAN_FIRMWARE_SOURCE="${ARMBIAN_FIRMWARE_GIT_SOURCE:-"https://github.com/armbian/firmware"}"
local ARMBIAN_FIRMWARE_BRANCH="branch:${ARMBIAN_FIRMWARE_GIT_BRANCH:-"master"}"
debug_var ARMBIAN_FIRMWARE_SOURCE
debug_var ARMBIAN_FIRMWARE_BRANCH
debug_var MAINLINE_FIRMWARE_SOURCE
declare short_hash_size=4
declare -A GIT_INFO=([GIT_SOURCE]="${ARMBIAN_FIRMWARE_SOURCE}" [GIT_REF]="${ARMBIAN_FIRMWARE_BRANCH}")
run_memoized GIT_INFO "git2info" memoized_git_ref_to_info
debug_dict GIT_INFO
declare -A GIT_INFO_MAINLINE=([GIT_SOURCE]="${MAINLINE_FIRMWARE_SOURCE}" [GIT_REF]="branch:main")
run_memoized GIT_INFO_MAINLINE "git2info" memoized_git_ref_to_info
debug_dict GIT_INFO_MAINLINE
declare fake_unchanging_base_version="1"
declare short_sha1="${GIT_INFO[SHA1]:0:${short_hash_size}}"
declare short_sha1_mainline="${GIT_INFO_MAINLINE[SHA1]:0:${short_hash_size}}"
# get the hashes of the lib/ bash sources involved...
declare hash_files="undetermined"
calculate_hash_for_files "${SRC}"/lib/functions/compilation/packages/firmware-deb.sh "${SRC}"/lib/functions/artifacts/artifact-firmware.sh
declare bash_hash="${hash_files}"
declare bash_hash_short="${bash_hash:0:${short_hash_size}}"
# outer scope
artifact_version="${fake_unchanging_base_version}-SA${short_sha1}-SM${short_sha1_mainline}-B${bash_hash_short}"
declare -a reasons=(
"Armbian firmware git revision \"${GIT_INFO[SHA1]}\""
"Mainline firmware git revision \"${GIT_INFO_MAINLINE[SHA1]}\""
"framework bash hash \"${bash_hash}\""
)
artifact_version_reason="${reasons[*]}" # outer scope
artifact_map_packages=(
["armbian-firmware-full"]="armbian-firmware-full"
)
artifact_map_debs=(
["armbian-firmware-full"]="armbian-firmware-full_${artifact_version}_all.deb"
)
artifact_name="armbian-firmware-full"
artifact_type="deb"
artifact_base_dir="${DEB_STORAGE}"
artifact_final_file="${DEB_STORAGE}/armbian-firmware-full_${artifact_version}_all.deb"
return 0
}
function artifact_full_firmware_build_from_sources() {
FULL="-full" REPLACE="" LOG_SECTION="compile_firmware_full" do_with_logging compile_firmware
}
function artifact_full_firmware_cli_adapter_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function artifact_full_firmware_cli_adapter_config_prep() {
declare KERNEL_ONLY="yes" # @TODO: this is a hack, for the board/family code's benefit...
use_board="no" prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
}
function artifact_full_firmware_get_default_oci_target() {
artifact_oci_target_base="ghcr.io/rpardini/armbian-release/"
}
function artifact_full_firmware_is_available_in_local_cache() {
is_artifact_available_in_local_cache
}
function artifact_full_firmware_is_available_in_remote_cache() {
is_artifact_available_in_remote_cache
}
function artifact_full_firmware_obtain_from_remote_cache() {
obtain_artifact_from_remote_cache
}
function artifact_full_firmware_deploy_to_remote_cache() {
upload_artifact_to_oci
}

View File

@@ -0,0 +1,171 @@
# This is run in a logging section.
# Prepare the version, "sans-repos": just the armbian/build repo contents are available.
# It is OK to reach out to the internet for a curl or ls-remote, but not for a git clone, but
# you *must* _cache_ results on disk @TODO with a TTL determined by live code, not preset in cached entries.
function artifact_kernel_prepare_version() {
artifact_version="undetermined" # outer scope
artifact_version_reason="undetermined" # outer scope
# - Given KERNELSOURCE and KERNELBRANCH, get:
# - SHA1 of the commit (this is generic... and used for other pkgs)
# - The first 10 lines of the root Makefile at that commit (cached lookup, same SHA1=same Makefile, http GET, not cloned)
# - This gives us the full version plus codename, plus catches "version shenanigans" possibly done by patches...
# - @TODO: Make sure this is sane, ref KERNEL_MAJOR_MINOR; it's transitional, but we need to be sure it's sane.
# - Get the drivers patch hash (given LINUXFAMILY and the vX.Z.Y version) - the harness can do this by hashing patches and bash code
# - Get the kernel patches hash. (@TODO currently hashing files directly, use Python patching proper)
# - Get the kernel .config hash, composed of
# - KERNELCONFIG .config hash (contents)
# - extensions mechanism, each hook has an array of hashes that is then hashed together; see the hooks docs.
# - Hash of the relevant lib/ bash sources involved, say compilation/kernel*.sh etc
# All those produce a version string like:
# 6.2-rc7-S4ec5-D1c5d-P0000-Ca00bHc1f3-B6d7b
# - This code first calculates the globally uniquely-identifying version string for, and then builds, exactly one (01, um,
# uno, ein) kernel.
# - This produces exacly one "linux-image" .deb package, and _might_ also produce "linux-dtb" and "linux-headers"
# packages.
# - All the .debs have the same version string, which is included in the "Version:" field of the .deb control file.
# - "Version: " has special significance in Debian repo mgmt: it governs how "apt upgrade" decides what to upgrade to.
# - Note!! how BOARD is not an input here. It is required though by the configuration step;
# - BOARDs can have hooks that completely change the kernel, including creating new LINUXFAMILY's 🫠
# - It is assumed the process to obtain "all kernels to build" involves
# - a loop over all boards, and then a loop over all all the BOARD's KERNEL_TARGET's,
# - map: obtain all the *effective configurations* after all hooks are run
# - reduce: to "${LINUXFAMILY}-${BRANCH}", but keep an "example" BOARD= for each group, so that it can be input to
# this building process 🤯
# - Also note: BOARDFAMILY is not an input here; and merely a mechanism for BOARDs to share some common defs.
# - That was later (but pre-armbian-next) made more complicated by sourcing, "families/includes/<xxx>_common.inc"
# - 👉 tl;dr: Armbian kernels can't have per-board patches or configs; "family code" is a lie; repo management is hell.
debug_var BOARD # Heh.
debug_var BOARDFAMILY # Heh.
debug_var KERNEL_MAJOR_MINOR # Double heh. transitional stuff, from when armbian-next began. 🤣
debug_var BRANCH
debug_var REVISION
debug_var KERNELSOURCE
debug_var KERNELBRANCH
debug_var LINUXFAMILY
debug_var KERNELPATCHDIR
declare short_hash_size=4
declare -A GIT_INFO=([GIT_SOURCE]="${KERNELSOURCE}" [GIT_REF]="${KERNELBRANCH}")
run_memoized GIT_INFO "git2info" memoized_git_ref_to_info "include_makefile_body"
debug_dict GIT_INFO
declare short_sha1="${GIT_INFO[SHA1]:0:${short_hash_size}}"
# get the drivers hash...
declare kernel_drivers_patch_hash
do_with_hooks kernel_drivers_create_patches_hash_only
declare kernel_drivers_hash_short="${kernel_drivers_patch_hash:0:${short_hash_size}}"
# get the kernel patches hash...
# @TODO: why not just delegate this to the python patching, with some "dry-run" / hash-only option?
declare patches_hash="undetermined"
declare hash_files="undetermined"
calculate_hash_for_all_files_in_dirs "${SRC}/patch/kernel/${KERNELPATCHDIR}" "${USERPATCHES_PATH}/kernel/${KERNELPATCHDIR}"
patches_hash="${hash_files}"
declare kernel_patches_hash_short="${patches_hash:0:${short_hash_size}}"
# get the .config hash... also userpatches...
declare kernel_config_source_filename="" # which actual .config was used?
prepare_kernel_config_core_or_userpatches
declare hash_files="undetermined"
calculate_hash_for_files "${kernel_config_source_filename}"
config_hash="${hash_files}"
declare config_hash_short="${config_hash:0:${short_hash_size}}"
# run the extensions. they _must_ behave, and not try to modify the .config, instead just fill kernel_config_modifying_hashes
declare kernel_config_modifying_hashes_hash="undetermined"
declare -a kernel_config_modifying_hashes=()
call_extensions_kernel_config
kernel_config_modification_hash="$(echo "${kernel_config_modifying_hashes[@]}" | sha256sum | cut -d' ' -f1)"
kernel_config_modification_hash="${kernel_config_modification_hash:0:16}" # "long hash"
declare kernel_config_modification_hash_short="${kernel_config_modification_hash:0:${short_hash_size}}"
# @TODO: include the compiler version? host release?
# get the hashes of the lib/ bash sources involved...
declare hash_files="undetermined"
calculate_hash_for_files "${SRC}"/lib/functions/compilation/kernel*.sh # maybe also this file, "${SRC}"/lib/functions/artifacts/kernel.sh
declare bash_hash="${hash_files}"
declare bash_hash_short="${bash_hash:0:${short_hash_size}}"
# outer scope
artifact_version="${GIT_INFO[MAKEFILE_VERSION]}-S${short_sha1}-D${kernel_drivers_hash_short}-P${kernel_patches_hash_short}-C${config_hash_short}H${kernel_config_modification_hash_short}-B${bash_hash_short}"
declare -a reasons=(
"version \"${GIT_INFO[MAKEFILE_FULL_VERSION]}\""
"git revision \"${GIT_INFO[SHA1]}\""
"codename \"${GIT_INFO[MAKEFILE_CODENAME]}\""
"drivers hash \"${kernel_drivers_patch_hash}\""
"patches hash \"${patches_hash}\""
".config hash \"${config_hash}\""
".config hook hash \"${kernel_config_modification_hash}\""
"framework bash hash \"${bash_hash}\""
)
artifact_version_reason="${reasons[*]}" # outer scope
# map what "compile_kernel()" will produce - legacy deb names and versions
# linux-image is always produced...
artifact_map_packages=(["linux-image"]="linux-image-${BRANCH}-${LINUXFAMILY}")
artifact_map_debs=(["linux-image"]="linux-image-${BRANCH}-${LINUXFAMILY}_${artifact_version}_${ARCH}.deb")
# some/most kernels have also working headers...
if [[ "${KERNEL_HAS_WORKING_HEADERS:-"no"}" == "yes" ]]; then
artifact_map_packages+=(["linux-headers"]="linux-headers-${BRANCH}-${LINUXFAMILY}")
artifact_map_debs+=(["linux-headers"]="linux-headers-${BRANCH}-${LINUXFAMILY}_${artifact_version}_${ARCH}.deb")
fi
# x86, specially, does not have working dtbs...
if [[ "${KERNEL_BUILD_DTBS:-"yes"}" == "yes" ]]; then
artifact_map_packages+=(["linux-dtb"]="linux-dtb-${BRANCH}-${LINUXFAMILY}")
artifact_map_debs+=(["linux-dtb"]="linux-dtb-${BRANCH}-${LINUXFAMILY}_${artifact_version}_${ARCH}.deb")
fi
artifact_name="kernel-${LINUXFAMILY}-${BRANCH}"
artifact_type="deb-tar" # this triggers processing of .deb files in the maps to produce a tarball
artifact_base_dir="${DEB_STORAGE}"
artifact_final_file="${DEB_STORAGE}/kernel-${LINUXFAMILY}-${BRANCH}_${artifact_version}.tar"
return 0
}
function artifact_kernel_build_from_sources() {
compile_kernel
display_alert "Kernel build finished" "${artifact_version_reason}" "info"
}
function artifact_kernel_cli_adapter_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function artifact_kernel_cli_adapter_config_prep() {
declare KERNEL_ONLY="yes" # @TODO: this is a hack, for the board/family code's benefit...
use_board="yes" prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
}
function artifact_kernel_get_default_oci_target() {
artifact_oci_target_base="ghcr.io/rpardini/armbian-release/"
}
function artifact_kernel_is_available_in_local_cache() {
is_artifact_available_in_local_cache
}
function artifact_kernel_is_available_in_remote_cache() {
is_artifact_available_in_remote_cache
}
function artifact_kernel_obtain_from_remote_cache() {
obtain_artifact_from_remote_cache
}
function artifact_kernel_deploy_to_remote_cache() {
upload_artifact_to_oci
}

View File

@@ -0,0 +1,136 @@
function artifact_rootfs_prepare_version() {
artifact_version="undetermined" # outer scope
artifact_version_reason="undetermined" # outer scope
assert_requires_aggregation # Bombs if aggregation has not run
declare -g rootfs_cache_id="none_yet"
calculate_rootfs_cache_id # sets rootfs_cache_id
display_alert "Going to build rootfs" "packages_hash: '${packages_hash:-}' cache_type: '${cache_type:-}' rootfs_cache_id: '${rootfs_cache_id}'" "info"
declare -a reasons=(
"arch \"${ARCH}\""
"release \"${RELEASE}\""
"type \"${cache_type}\""
"cache_id \"${rootfs_cache_id}\""
)
# @TODO: "rootfs_cache_id" contains "cache_type", split so we don't repeat ourselves
# @TODO: gotta include the extensions rootfs-modifying id to cache_type...
# outer scope
artifact_version="${rootfs_cache_id}"
artifact_version_reason="${reasons[*]}"
artifact_name="rootfs/rootfs-${ARCH}/rootfs-${ARCH}-${RELEASE}-${cache_type}"
artifact_type="tar.zst"
artifact_base_dir="${SRC}/cache/rootfs"
artifact_final_file="${SRC}/cache/rootfs/${ARCH}-${RELEASE}-${rootfs_cache_id}.tar.zst"
return 0
}
function artifact_rootfs_build_from_sources() {
debug_var artifact_final_file
debug_var artifact_final_file_basename
# Creates a cleanup handler 'trap_handler_cleanup_rootfs_and_image'
LOG_SECTION="prepare_rootfs_build_params_and_trap" do_with_logging prepare_rootfs_build_params_and_trap
debug_var artifact_final_file
debug_var artifact_final_file_basename
# validate that tmpfs_estimated_size is set and higher than zero, or exit_with_error
[[ -z ${tmpfs_estimated_size} ]] && exit_with_error "tmpfs_estimated_size is not set"
[[ ${tmpfs_estimated_size} -le 0 ]] && exit_with_error "tmpfs_estimated_size is not higher than zero"
# "rootfs" CLI skips over a lot goes straight to create the rootfs. It doesn't check cache etc.
LOG_SECTION="create_new_rootfs_cache" do_with_logging create_new_rootfs_cache
debug_var artifact_final_file
debug_var artifact_final_file_basename
debug_var cache_name
debug_var cache_fname
if [[ ! -f "${artifact_final_file}" ]]; then
exit_with_error "Rootfs cache file '${artifact_final_file}' does not exist after create_new_rootfs_cache()."
else
display_alert "Rootfs cache file '${artifact_final_file}' exists after create_new_rootfs_cache()." "YESSS" "warn"
fi
# obtain the size, in MiB, of "${SDCARD}" at this point.
declare -i rootfs_size_mib
rootfs_size_mib=$(du -sm "${SDCARD}" | awk '{print $1}')
display_alert "Actual rootfs size" "${rootfs_size_mib}MiB after basic/cache" ""
# warn if rootfs_size_mib is higher than the tmpfs_estimated_size
if [[ ${rootfs_size_mib} -gt ${tmpfs_estimated_size} ]]; then
display_alert "Rootfs actual size is larger than estimated tmpfs size after basic/cache" "${rootfs_size_mib}MiB > ${tmpfs_estimated_size}MiB" "wrn"
fi
# Run the cleanup handler.
execute_and_remove_cleanup_handler trap_handler_cleanup_rootfs_and_image
return 0
}
function artifact_rootfs_cli_adapter_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function artifact_rootfs_cli_adapter_config_prep() {
declare -g ROOTFS_COMPRESSION_RATIO="${ROOTFS_COMPRESSION_RATIO:-"15"}" # default to Compress stronger when we make rootfs cache
# If BOARD is set, use it to convert to an ARCH.
if [[ -n ${BOARD} ]]; then
display_alert "BOARD is set, converting to ARCH for rootfs building" "'BOARD=${BOARD}'" "warn"
# Convert BOARD to ARCH; source the BOARD and FAMILY stuff
LOG_SECTION="config_source_board_file" do_with_conditional_logging config_source_board_file
LOG_SECTION="source_family_config_and_arch" do_with_conditional_logging source_family_config_and_arch
display_alert "Done sourcing board file" "'${BOARD}' - arch: '${ARCH}'" "warn"
fi
declare -a vars_need_to_be_set=("RELEASE" "ARCH")
# loop through all vars and check if they are not set and bomb out if so
for var in "${vars_need_to_be_set[@]}"; do
if [[ -z ${!var} ]]; then
exit_with_error "Param '${var}' is not set but needs to be set for rootfs CLI."
fi
done
declare -r __wanted_rootfs_arch="${ARCH}"
declare -g -r RELEASE="${RELEASE}" # make readonly for finding who tries to change it
declare -g -r NEEDS_BINFMT="yes" # make sure binfmts are installed during prepare_host_interactive
# prep_conf_main_only_rootfs_ni is prep_conf_main_only_rootfs_ni() + mark_aggregation_required_in_default_build_start()
prep_conf_main_only_rootfs_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
declare -g -r ARCH="${ARCH}" # make readonly for finding who tries to change it
if [[ "${ARCH}" != "${__wanted_rootfs_arch}" ]]; then
exit_with_error "Param 'ARCH' is set to '${ARCH}' after config, but different from wanted '${__wanted_rootfs_arch}'"
fi
}
function artifact_rootfs_get_default_oci_target() {
artifact_oci_target_base="ghcr.io/rpardini/armbian-release/"
}
function artifact_rootfs_is_available_in_local_cache() {
is_artifact_available_in_local_cache
}
function artifact_rootfs_is_available_in_remote_cache() {
is_artifact_available_in_remote_cache
}
function artifact_rootfs_obtain_from_remote_cache() {
obtain_artifact_from_remote_cache
}
function artifact_rootfs_deploy_to_remote_cache() {
upload_artifact_to_oci
}

View File

@@ -1,17 +1,4 @@
function artifact_uboot_cli_adapter_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function artifact_uboot_cli_adapter_config_prep() {
declare KERNEL_ONLY="yes" # @TODO: this is a hack, for the board/family code's benefit...
use_board="yes" prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
}
function artifact_uboot_prepare_version() { function artifact_uboot_prepare_version() {
display_alert "artifact_uboot_XXXXXX" "artifact_uboot_XXXXXX" "warn"
artifact_version="undetermined" # outer scope artifact_version="undetermined" # outer scope
artifact_version_reason="undetermined" # outer scope artifact_version_reason="undetermined" # outer scope
@@ -31,6 +18,7 @@ function artifact_uboot_prepare_version() {
debug_var BOOTBRANCH debug_var BOOTBRANCH
debug_var BOOTPATCHDIR debug_var BOOTPATCHDIR
debug_var BOARD debug_var BOARD
debug_var BRANCH
declare short_hash_size=4 declare short_hash_size=4
@@ -57,7 +45,6 @@ function artifact_uboot_prepare_version() {
# outer scope # outer scope
artifact_version="${GIT_INFO[MAKEFILE_VERSION]}-S${short_sha1}-P${uboot_patches_hash_short}-B${bash_hash_short}" artifact_version="${GIT_INFO[MAKEFILE_VERSION]}-S${short_sha1}-P${uboot_patches_hash_short}-B${bash_hash_short}"
# @TODO: validate it begins with a digit, and is at max X chars long.
declare -a reasons=( declare -a reasons=(
"version \"${GIT_INFO[MAKEFILE_FULL_VERSION]}\"" "version \"${GIT_INFO[MAKEFILE_FULL_VERSION]}\""
@@ -66,51 +53,26 @@ function artifact_uboot_prepare_version() {
"framework bash hash \"${bash_hash}\"" "framework bash hash \"${bash_hash}\""
) )
artifact_version_reason="${reasons[*]}" # outer scope # @TODO better artifact_version_reason="${reasons[*]}" # outer scope
# now, one for each file in the artifact... artifact_map_packages=(
artifact_map_versions=( ["uboot"]="linux-u-boot-${BOARD}-${BRANCH}"
["u-boot"]="${artifact_version}"
) )
# map what "compile_uboot()" will produce - legacy deb names and versions artifact_map_debs=(
artifact_map_versions_legacy=( ["uboot"]="linux-u-boot-${BOARD}-${BRANCH}_${artifact_version}_${ARCH}.deb"
["linux-u-boot-${BRANCH}-${BOARD}"]="${REVISION}"
) )
# now, one for each file in the artifact... single package, so just one entry artifact_name="uboot-${BOARD}-${BRANCH}"
artifact_map_versions=( artifact_type="deb"
["linux-u-boot-${BRANCH}-${BOARD}"]="${artifact_version}" artifact_base_dir="${DEB_STORAGE}"
) artifact_final_file="${DEB_STORAGE}/linux-u-boot-${BOARD}-${BRANCH}_${artifact_version}_${ARCH}.deb"
return 0 return 0
} }
function artifact_uboot_is_available_in_local_cache() {
display_alert "artifact_uboot_XXXXXX" "artifact_uboot_XXXXXX" "warn"
# Check if the exact DEB exists on disk (output/debs), nothing else.
# This is more about composing the .deb filename than checking if it exists.
}
function artifact_uboot_is_available_in_remote_cache() {
display_alert "artifact_uboot_XXXXXX" "artifact_uboot_XXXXXX" "warn"
# Check if the DEB can be obtained remotely, eg:
# - in ghcr.io (via ORAS)
# - in an apt repo (via apt-get), eg, Armbian's repo.
# this is only about availability, not download. use HEAD requests / metadata-only pulls
# what about multiple possible OCI endpoints / URLs? try them all?
}
function artifact_uboot_obtain_from_remote_cache() {
display_alert "artifact_uboot_XXXXXX" "artifact_uboot_XXXXXX" "warn"
# Having confirmed it is available remotely, go download it into the local cache.
# is_available_in_local_cache() must return =yes after this.
# could be a good idea to transfer some SHA256 id from "is_available" to "obtain" to avoid overhead? or just do it together?
}
function artifact_uboot_build_from_sources() { function artifact_uboot_build_from_sources() {
display_alert "artifact_uboot_XXXXXX" "artifact_uboot_XXXXXX" "warn" LOG_SECTION="fetch_and_build_host_tools" do_with_logging fetch_and_build_host_tools
# having failed all the cache obtaining, build it from sources.
if [[ -n "${ATFSOURCE}" && "${ATFSOURCE}" != "none" ]]; then if [[ -n "${ATFSOURCE}" && "${ATFSOURCE}" != "none" ]]; then
LOG_SECTION="compile_atf" do_with_logging compile_atf LOG_SECTION="compile_atf" do_with_logging compile_atf
@@ -119,12 +81,36 @@ function artifact_uboot_build_from_sources() {
declare uboot_git_revision="not_determined_yet" declare uboot_git_revision="not_determined_yet"
LOG_SECTION="uboot_prepare_git" do_with_logging_unless_user_terminal uboot_prepare_git LOG_SECTION="uboot_prepare_git" do_with_logging_unless_user_terminal uboot_prepare_git
LOG_SECTION="compile_uboot" do_with_logging compile_uboot LOG_SECTION="compile_uboot" do_with_logging compile_uboot
}
capture_rename_legacy_debs_into_artifacts # has its own logging section function artifact_uboot_cli_adapter_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function artifact_uboot_cli_adapter_config_prep() {
declare KERNEL_ONLY="yes" # @TODO: this is a hack, for the board/family code's benefit...
use_board="yes" prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
}
function artifact_uboot_get_default_oci_target() {
artifact_oci_target_base="ghcr.io/rpardini/armbian-release/"
}
function artifact_uboot_is_available_in_local_cache() {
is_artifact_available_in_local_cache
}
function artifact_uboot_is_available_in_remote_cache() {
is_artifact_available_in_remote_cache
}
function artifact_uboot_obtain_from_remote_cache() {
obtain_artifact_from_remote_cache
} }
function artifact_uboot_deploy_to_remote_cache() { function artifact_uboot_deploy_to_remote_cache() {
display_alert "artifact_uboot_XXXXXX" "artifact_uboot_XXXXXX" "warn" upload_artifact_to_oci
# having built a new artifact, deploy it to the remote cache.
# consider multiple targets, retries, etc.
} }

View File

@@ -0,0 +1,300 @@
function create_artifact_functions() {
declare -a funcs=(
"cli_adapter_pre_run" "cli_adapter_config_prep"
"prepare_version"
"get_default_oci_target"
"is_available_in_local_cache" "is_available_in_remote_cache" "obtain_from_remote_cache"
"deploy_to_remote_cache"
"build_from_sources"
)
for func in "${funcs[@]}"; do
declare impl_func="artifact_${chosen_artifact_impl}_${func}"
if [[ $(type -t "${impl_func}") == function ]]; then
declare cmd
cmd="$(
cat <<- ARTIFACT_DEFINITION
function artifact_${func}() {
display_alert "Calling artifact function" "${impl_func}() \$*" "debug"
${impl_func} "\$@"
}
ARTIFACT_DEFINITION
)"
eval "${cmd}"
else
exit_with_error "Missing artifact implementation function '${impl_func}'"
fi
done
# If ${chosen_artifact} is in ${DONT_BUILD_ARTIFACTS}, override the build function with an error.
if [[ "${DONT_BUILD_ARTIFACTS}" = *"${chosen_artifact}"* ]]; then
display_alert "Artifact '${chosen_artifact}' is in DONT_BUILD_ARTIFACTS, overriding build function with error" "DONT_BUILD_ARTIFACTS=${chosen_artifact}" "debug"
declare cmd
cmd="$(
cat <<- ARTIFACT_DEFINITION
function artifact_build_from_sources() {
exit_with_error "Artifact '${chosen_artifact}' is in DONT_BUILD_ARTIFACTS."
}
ARTIFACT_DEFINITION
)"
eval "${cmd}"
else
display_alert "Artifact '${chosen_artifact}' is not in DONT_BUILD_ARTIFACTS, using default build function" "DONT_BUILD_ARTIFACTS!=${chosen_artifact}" "debug"
fi
}
function initialize_artifact() {
declare -g chosen_artifact="${1}"
# cant be empty, or have spaces nor commas
[[ "x${chosen_artifact}x" == "xx" ]] && exit_with_error "Artifact name is empty"
[[ "${chosen_artifact}" == *" "* ]] && exit_with_error "Artifact name cannot contain spaces"
[[ "${chosen_artifact}" == *","* ]] && exit_with_error "Artifact name cannot contain commas"
armbian_register_artifacts
declare -g chosen_artifact_impl="${ARMBIAN_ARTIFACTS_TO_HANDLERS_DICT["${chosen_artifact}"]}"
[[ "x${chosen_artifact_impl}x" == "xx" ]] && exit_with_error "Unknown artifact '${chosen_artifact}'"
display_alert "artifact" "${chosen_artifact} :: ${chosen_artifact_impl}()" "info"
create_artifact_functions
}
function obtain_complete_artifact() {
declare -g artifact_name="undetermined"
declare -g artifact_type="undetermined"
declare -g artifact_version="undetermined"
declare -g artifact_version_reason="undetermined"
declare -g artifact_base_dir="undetermined"
declare -g artifact_final_file="undetermined"
declare -g artifact_final_file_basename="undetermined"
declare -g artifact_full_oci_target="undetermined"
declare -A -g artifact_map_packages=()
declare -A -g artifact_map_debs=()
# Check if REVISION is set, otherwise exit_with_error
[[ "x${REVISION}x" == "xx" ]] && exit_with_error "REVISION is not set"
# Contentious; it might be that prepare_version is complex enough to warrant more than 1 logging section.
LOG_SECTION="artifact_prepare_version" do_with_logging artifact_prepare_version
debug_var artifact_name
debug_var artifact_type
debug_var artifact_version
debug_var artifact_version_reason
debug_var artifact_base_dir
debug_var artifact_final_file
debug_dict artifact_map_packages
debug_dict artifact_map_debs
# sanity checks. artifact_version/artifact_version_reason/artifact_final_file *must* be set
[[ "x${artifact_name}x" == "xx" || "${artifact_name}" == "undetermined" ]] && exit_with_error "artifact_name is not set after artifact_prepare_version"
[[ "x${artifact_type}x" == "xx" || "${artifact_type}" == "undetermined" ]] && exit_with_error "artifact_type is not set after artifact_prepare_version"
[[ "x${artifact_version}x" == "xx" || "${artifact_version}" == "undetermined" ]] && exit_with_error "artifact_version is not set after artifact_prepare_version"
[[ "x${artifact_version_reason}x" == "xx" || "${artifact_version_reason}" == "undetermined" ]] && exit_with_error "artifact_version_reason is not set after artifact_prepare_version"
[[ "x${artifact_base_dir}x" == "xx" || "${artifact_base_dir}" == "undetermined" ]] && exit_with_error "artifact_base_dir is not set after artifact_prepare_version"
[[ "x${artifact_final_file}x" == "xx" || "${artifact_final_file}" == "undetermined" ]] && exit_with_error "artifact_final_file is not set after artifact_prepare_version"
# validate artifact_type... it must be one of the supported types
case "${artifact_type}" in
deb | deb-tar)
# validate artifact_version begins with a digit
[[ "${artifact_version}" =~ ^[0-9] ]] || exit_with_error "${artifact_type}: artifact_version '${artifact_version}' does not begin with a digit"
;;
tar.zst)
: # valid, no restrictions on tar.zst versioning
;;
*)
exit_with_error "artifact_type '${artifact_type}' is not supported"
;;
esac
# set those as outputs for GHA
github_actions_add_output artifact_name "${artifact_name}"
github_actions_add_output artifact_type "${artifact_type}"
github_actions_add_output artifact_version "${artifact_version}"
github_actions_add_output artifact_version_reason "${artifact_version_reason}"
github_actions_add_output artifact_final_file "${artifact_final_file}"
# ensure artifact_base_dir exists
mkdir -p "${artifact_base_dir}"
# compute artifact_final_file relative to ${SRC} but don't use realpath
declare -g artifact_file_relative="${artifact_final_file#${SRC}/}"
github_actions_add_output artifact_file_relative "${artifact_file_relative}"
# just the file name, sans any path
declare -g artifact_final_file_basename="undetermined"
artifact_final_file_basename="$(basename "${artifact_final_file}")"
github_actions_add_output artifact_final_file_basename "${artifact_final_file_basename}"
debug_var artifact_final_file_basename
debug_var artifact_file_relative
# @TODO: possibly stop here if only for up-to-date-checking
# Determine OCI coordinates. OCI_TARGET_BASE overrides the default proposed by the artifact.
declare artifact_oci_target_base="undetermined"
if [[ -n "${OCI_TARGET_BASE}" ]]; then
artifact_oci_target_base="${OCI_TARGET_BASE}"
else
artifact_get_default_oci_target
fi
[[ -z "${artifact_oci_target_base}" ]] && exit_with_error "No artifact_oci_target_base defined."
declare -g artifact_full_oci_target="${artifact_oci_target_base}${artifact_name}:${artifact_version}"
declare -g artifact_exists_in_local_cache="undetermined"
declare -g artifact_exists_in_remote_cache="undetermined"
if [[ "${ARTIFACT_IGNORE_CACHE}" != "yes" ]]; then
LOG_SECTION="artifact_is_available_in_local_cache" do_with_logging artifact_is_available_in_local_cache
debug_var artifact_exists_in_local_cache
# If available in local cache, we're done (except for deb-tar which needs unpacking...)
if [[ "${artifact_exists_in_local_cache}" == "yes" ]]; then
display_alert "artifact" "exists in local cache: ${artifact_name} ${artifact_version}" "cachehit"
if [[ "${skip_unpack_if_found_in_caches:-"no"}" == "yes" ]]; then
display_alert "artifact" "skipping unpacking as requested" "info"
else
LOG_SECTION="unpack_artifact_from_local_cache" do_with_logging unpack_artifact_from_local_cache
fi
if [[ "${ignore_local_cache:-"no"}" == "yes" ]]; then
display_alert "artifact" "ignoring local cache as requested" "info"
else
display_alert "artifact" "present in local cache: ${artifact_name} ${artifact_version}" "cachehit"
return 0
fi
fi
LOG_SECTION="artifact_is_available_in_remote_cache" do_with_logging artifact_is_available_in_remote_cache
debug_var artifact_exists_in_remote_cache
if [[ "${artifact_exists_in_remote_cache}" == "yes" ]]; then
display_alert "artifact" "exists in remote cache: ${artifact_name} ${artifact_version}" "cachehit"
if [[ "${skip_unpack_if_found_in_caches:-"no"}" == "yes" ]]; then
display_alert "artifact" "skipping obtain from remote & unpacking as requested" "info"
return 0
fi
LOG_SECTION="artifact_obtain_from_remote_cache" do_with_logging artifact_obtain_from_remote_cache
LOG_SECTION="unpack_artifact_from_local_cache" do_with_logging unpack_artifact_from_local_cache
display_alert "artifact" "obtained from remote cache: ${artifact_name} ${artifact_version}" "cachehit"
return 0
fi
fi
if [[ "${artifact_exists_in_local_cache}" != "yes" && "${artifact_exists_in_remote_cache}" != "yes" ]]; then
# Not found in any cache, so we need to build it.
# @TODO: if deploying to remote cache, force high compression, DEB_COMPRESS="xz"
artifact_build_from_sources # definitely will end up having its own logging sections
# pack the artifact to local cache (eg: for deb-tar)
LOG_SECTION="pack_artifact_to_local_cache" do_with_logging pack_artifact_to_local_cache
# Sanity check: the artifact_final_file should exist now.
if [[ ! -f "${artifact_final_file}" ]]; then
exit_with_error "Artifact file ${artifact_final_file} did not exist, after artifact_build_from_sources()."
else
display_alert "Artifact file exists" "${artifact_final_file} YESSS" "warn"
fi
fi
if [[ "${deploy_to_remote:-"no"}" == "yes" ]]; then
LOG_SECTION="artifact_deploy_to_remote_cache" do_with_logging artifact_deploy_to_remote_cache
fi
}
# This is meant to be run after config, inside default build.
function build_artifact_for_image() {
initialize_artifact "${WHAT}"
obtain_complete_artifact
}
function pack_artifact_to_local_cache() {
if [[ "${artifact_type}" == "deb-tar" ]]; then
declare -a files_to_tar=()
run_host_command_logged tar -C "${artifact_base_dir}" -cvf "${artifact_final_file}" "${artifact_map_debs[@]}"
display_alert "Created deb-tar artifact" "deb-tar: ${artifact_final_file}" "info"
fi
}
function unpack_artifact_from_local_cache() {
if [[ "${artifact_type}" == "deb-tar" ]]; then
declare any_missing="no"
declare deb_name
for deb_name in "${artifact_map_debs[@]}"; do
declare new_name_full="${artifact_base_dir}/${deb_name}"
if [[ ! -f "${new_name_full}" ]]; then
display_alert "Unpacking artifact" "deb-tar: ${artifact_final_file_basename} missing: ${new_name_full}" "warn"
any_missing="yes"
fi
done
if [[ "${any_missing}" == "yes" ]]; then
display_alert "Unpacking artifact" "deb-tar: ${artifact_final_file_basename}" "info"
run_host_command_logged tar -C "${artifact_base_dir}" -xvf "${artifact_final_file}"
fi
# sanity check? did unpacking produce the expected files?
declare any_missing="no"
declare deb_name
for deb_name in "${artifact_map_debs[@]}"; do
declare new_name_full="${artifact_base_dir}/${deb_name}"
if [[ ! -f "${new_name_full}" ]]; then
display_alert "Unpacking artifact" "AFTER UNPACK! deb-tar: ${artifact_final_file_basename} missing: ${new_name_full}" "err"
any_missing="yes"
fi
done
if [[ "${any_missing}" == "yes" ]]; then
display_alert "Files missing from deb-tar" "this is a bug, please report it. artifact_name: '${artifact_name}' artifact_version: '${artifact_version}'" "err"
fi
fi
return 0
}
function upload_artifact_to_oci() {
# check artifact_full_oci_target is set
if [[ -z "${artifact_full_oci_target}" ]]; then
exit_with_error "artifact_full_oci_target is not set"
fi
display_alert "Pushing to OCI" "'${artifact_final_file}' -> '${artifact_full_oci_target}'" "info"
oras_push_artifact_file "${artifact_full_oci_target}" "${artifact_final_file}" "${artifact_name} - ${artifact_version} - ${artifact_version_reason} - type: ${artifact_type}"
}
function is_artifact_available_in_local_cache() {
artifact_exists_in_local_cache="no" # outer scope
if [[ -f "${artifact_final_file}" ]]; then
artifact_exists_in_local_cache="yes" # outer scope
fi
return 0
}
function is_artifact_available_in_remote_cache() {
# check artifact_full_oci_target is set
if [[ -z "${artifact_full_oci_target}" ]]; then
exit_with_error "artifact_full_oci_target is not set"
fi
declare oras_has_manifest="undetermined"
declare oras_manifest_json="undetermined"
declare oras_manifest_description="undetermined"
oras_get_artifact_manifest "${artifact_full_oci_target}"
display_alert "oras_has_manifest" "${oras_has_manifest}" "debug"
display_alert "oras_manifest_description" "${oras_manifest_description}" "debug"
display_alert "oras_manifest_json" "${oras_manifest_json}" "debug"
if [[ "${oras_has_manifest}" == "yes" ]]; then
display_alert "Artifact is available in remote cache" "${artifact_full_oci_target} - '${oras_manifest_description}'" "info"
artifact_exists_in_remote_cache="yes"
else
display_alert "Artifact is not available in remote cache" "${artifact_full_oci_target}" "info"
artifact_exists_in_remote_cache="no"
fi
return 0
}
function obtain_artifact_from_remote_cache() {
display_alert "Obtaining artifact from remote cache" "${artifact_full_oci_target} into ${artifact_final_file_basename}" "info"
oras_pull_artifact_file "${artifact_full_oci_target}" "${artifact_base_dir}" "${artifact_final_file_basename}"
return 0
}

View File

@@ -1,10 +1,17 @@
function armbian_register_artifacts() { function armbian_register_artifacts() {
declare -g -A ARMBIAN_ARTIFACTS_TO_HANDLERS_DICT=( declare -g -A ARMBIAN_ARTIFACTS_TO_HANDLERS_DICT=(
#["firmware"]="firmware" # deb-tar
["kernel"]="kernel" ["kernel"]="kernel"
# deb
["u-boot"]="uboot" ["u-boot"]="uboot"
["uboot"]="uboot" ["uboot"]="uboot"
["firmware"]="firmware"
["full_firmware"]="full_firmware"
# tar.zst
["rootfs"]="rootfs"
) )
} }

View File

@@ -1,150 +0,0 @@
function artifact_kernel_cli_adapter_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function artifact_kernel_cli_adapter_config_prep() {
declare KERNEL_ONLY="yes" # @TODO: this is a hack, for the board/family code's benefit...
use_board="yes" prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
}
function artifact_kernel_prepare_version() {
display_alert "artifact_kernel_XXXXXX" "artifact_kernel_XXXXXX" "warn"
artifact_version="undetermined" # outer scope
artifact_version_reason="undetermined" # outer scope
# Prepare the version, "sans-repos": just the armbian/build repo contents are available.
# It is OK to reach out to the internet for a curl or ls-remote, but not for a git clone.
# - Given KERNELSOURCE and KERNELBRANCH, get:
# - SHA1 of the commit (this is generic... and used for other pkgs)
# - The first 10 lines of the root Makefile at that commit (cached lookup, same SHA1=same Makefile)
# - This gives us the full version plus codename.
# - Make sure this is sane, ref KERNEL_MAJOR_MINOR.
# - Get the drivers patch hash (given LINUXFAMILY and the vX.Z.Y version)
# - Get the kernel patches hash. (could just hash the KERNELPATCHDIR non-disabled contents, or use Python patching proper?)
# - Get the kernel .config hash, composed of
# - KERNELCONFIG? .config hash
# - extensions mechanism, have an array of hashes that is then hashed together.
# - Hash of the relevant lib/ bash sources involved, say compilation-kernel*.sh etc
# All those produce a version string like:
# 6.1.8-<4-digit-SHA1>_<4_digit_drivers>-<4_digit_patches>-<4_digit_config>-<4_digit_libs>
# 6.2-rc5-a0b1-c2d3-e4f5-g6h7-i8j9
debug_var BRANCH
debug_var REVISION
debug_var KERNELSOURCE
debug_var KERNELBRANCH
debug_var LINUXFAMILY
debug_var BOARDFAMILY
debug_var KERNEL_MAJOR_MINOR
debug_var KERNELPATCHDIR
declare short_hash_size=4
declare -A GIT_INFO=([GIT_SOURCE]="${KERNELSOURCE}" [GIT_REF]="${KERNELBRANCH}")
run_memoized GIT_INFO "git2info" memoized_git_ref_to_info "include_makefile_body"
debug_dict GIT_INFO
declare short_sha1="${GIT_INFO[SHA1]:0:${short_hash_size}}"
# get the drivers hash...
declare kernel_drivers_patch_hash
LOG_SECTION="kernel_drivers_create_patches_hash_only" do_with_logging do_with_hooks kernel_drivers_create_patches_hash_only
declare kernel_drivers_hash_short="${kernel_drivers_patch_hash:0:${short_hash_size}}"
# get the kernel patches hash...
# @TODO: why not just delegate this to the python patching, with some "dry-run" / hash-only option?
declare patches_hash="undetermined"
declare hash_files="undetermined"
calculate_hash_for_all_files_in_dirs "${SRC}/patch/kernel/${KERNELPATCHDIR}" "${USERPATCHES_PATH}/kernel/${KERNELPATCHDIR}"
patches_hash="${hash_files}"
declare kernel_patches_hash_short="${patches_hash:0:${short_hash_size}}"
# get the .config hash... also userpatches...
declare kernel_config_source_filename="" # which actual .config was used?
prepare_kernel_config_core_or_userpatches
declare hash_files="undetermined"
calculate_hash_for_files "${kernel_config_source_filename}"
config_hash="${hash_files}"
declare config_hash_short="${config_hash:0:${short_hash_size}}"
# @TODO: get the extensions' .config modyfing hashes...
# @TODO: include the compiler version? host release?
# get the hashes of the lib/ bash sources involved...
declare hash_files="undetermined"
calculate_hash_for_files "${SRC}"/lib/functions/compilation/kernel*.sh # maybe also this file, "${SRC}"/lib/functions/artifacts/kernel.sh
declare bash_hash="${hash_files}"
declare bash_hash_short="${bash_hash:0:${short_hash_size}}"
# outer scope
artifact_version="${GIT_INFO[MAKEFILE_VERSION]}-S${short_sha1}-D${kernel_drivers_hash_short}-P${kernel_patches_hash_short}-C${config_hash_short}-B${bash_hash_short}"
# @TODO: validate it begins with a digit, and is at max X chars long.
declare -a reasons=(
"version \"${GIT_INFO[MAKEFILE_FULL_VERSION]}\""
"git revision \"${GIT_INFO[SHA1]}\""
"codename \"${GIT_INFO[MAKEFILE_CODENAME]}\""
"drivers hash \"${kernel_drivers_patch_hash}\""
"patches hash \"${patches_hash}\""
".config hash \"${config_hash}\""
"framework bash hash \"${bash_hash}\""
)
artifact_version_reason="${reasons[*]}" # outer scope # @TODO better
# map what "compile_kernel()" will produce - legacy deb names and versions
artifact_map_versions_legacy=(
["linux-image-${BRANCH}-${LINUXFAMILY}"]="${REVISION}"
["linux-dtb-${BRANCH}-${LINUXFAMILY}"]="${REVISION}"
["linux-headers-${BRANCH}-${LINUXFAMILY}"]="${REVISION}"
)
# now, one for each file in the artifact... we've 3 packages produced, all the same version
artifact_map_versions=(
["linux-image-${BRANCH}-${LINUXFAMILY}"]="${artifact_version}"
["linux-dtb-${BRANCH}-${LINUXFAMILY}"]="${artifact_version}"
["linux-headers-${BRANCH}-${LINUXFAMILY}"]="${artifact_version}"
)
return 0
}
function artifact_kernel_is_available_in_local_cache() {
display_alert "artifact_kernel_XXXXXX" "artifact_kernel_XXXXXX" "warn"
# Check if the exact DEB exists on disk (output/debs), nothing else.
# This is more about composing the .deb filename than checking if it exists.
}
function artifact_kernel_is_available_in_remote_cache() {
display_alert "artifact_kernel_XXXXXX" "artifact_kernel_XXXXXX" "warn"
# Check if the DEB can be obtained remotely, eg:
# - in ghcr.io (via ORAS)
# - in an apt repo (via apt-get), eg, Armbian's repo.
# this is only about availability, not download. use HEAD requests / metadata-only pulls
# what about multiple possible OCI endpoints / URLs? try them all?
}
function artifact_kernel_obtain_from_remote_cache() {
display_alert "artifact_kernel_XXXXXX" "artifact_kernel_XXXXXX" "warn"
# Having confirmed it is available remotely, go download it into the local cache.
# is_available_in_local_cache() must return =yes after this.
# could be a good idea to transfer some SHA256 id from "is_available" to "obtain" to avoid overhead? or just do it together?
}
function artifact_kernel_build_from_sources() {
display_alert "artifact_kernel_XXXXXX" "artifact_kernel_XXXXXX" "warn"
# having failed all the cache obtaining, build it from sources.
compile_kernel
capture_rename_legacy_debs_into_artifacts
}
function artifact_kernel_deploy_to_remote_cache() {
display_alert "artifact_kernel_XXXXXX" "artifact_kernel_XXXXXX" "warn"
# having built a new artifact, deploy it to the remote cache.
# consider multiple targets, retries, etc.
}

View File

@@ -1,109 +1,49 @@
function cli_artifact_pre_run() { function cli_artifact_pre_run() {
initialize_artifact "${WHAT:-"kernel"}" initialize_artifact "${WHAT}"
# Run the pre run adapter # Run the pre run adapter
artifact_cli_adapter_pre_run artifact_cli_adapter_pre_run
} }
function cli_artifact_run() { function cli_artifact_run() {
display_alert "artifact" "${chosen_artifact}" "warn" : "${chosen_artifact:?chosen_artifact is not set}"
display_alert "artifact" "${chosen_artifact} :: ${chosen_artifact_impl}()" "warn" : "${chosen_artifact_impl:?chosen_artifact_impl is not set}"
display_alert "artifact" "${chosen_artifact}" "debug"
display_alert "artifact" "${chosen_artifact} :: ${chosen_artifact_impl}()" "debug"
artifact_cli_adapter_config_prep # only if in cli. artifact_cli_adapter_config_prep # only if in cli.
# only if in cli, if not just run it bare, since we'd be already inside do_with_default_build # When run in GHA, assume we're checking/updating the remote cache only.
do_with_default_build obtain_complete_artifact < /dev/null # Local cache is ignored, and if found, it's not unpacked, either from local or remote.
} # If remote cache is found, does nothing.
declare default_update_remote_only="no"
if [[ "${CI}" == "true" ]] && [[ "${GITHUB_ACTIONS}" == "true" ]]; then
display_alert "Running in GitHub Actions, assuming we're updating remote cache only" "GHA remote-only" "info"
default_update_remote_only="yes"
fi
function create_artifact_functions() { declare skip_unpack_if_found_in_caches="${skip_unpack_if_found_in_caches:-"${default_update_remote_only}"}"
declare -a funcs=( declare ignore_local_cache="${ignore_local_cache:-"${default_update_remote_only}"}"
"cli_adapter_pre_run" "cli_adapter_config_prep" declare deploy_to_remote="${deploy_to_remote:-"${default_update_remote_only}"}"
"prepare_version"
"is_available_in_local_cache" "is_available_in_remote_cache" "obtain_from_remote_cache" # If OCI_TARGET_BASE is explicitly set, ignore local, skip if found in remote, and deploy to remote after build.
"deploy_to_remote_cache" if [[ -n "${OCI_TARGET_BASE}" ]]; then
"build_from_sources" skip_unpack_if_found_in_caches="yes"
) ignore_local_cache="yes"
for func in "${funcs[@]}"; do deploy_to_remote="yes"
declare impl_func="artifact_${chosen_artifact_impl}_${func}"
if [[ $(type -t "${impl_func}") == function ]]; then # Pass ARTIFACT_USE_CACHE=yes to actually use the cache versions, but don't deploy to remote.
declare cmd # @TODO this is confusing. each op should be individually controlled...
cmd="$( # what we want is:
cat <<- ARTIFACT_DEFINITION # 1: - check remote, if not found, check local, if not found, build, then deploy to remote
function artifact_${func}() { # - if remote found, do nothing.
display_alert "Calling artifact function" "${impl_func}() \$*" "warn" # - if local found, deploy it to remote (for switching targets)
${impl_func} "\$@" # 2: - get from remote -> get local -> build, then DON'T deploy to remote
} if [[ "${ARTIFACT_USE_CACHE}" == "yes" ]]; then
ARTIFACT_DEFINITION skip_unpack_if_found_in_caches="no"
)" ignore_local_cache="no"
eval "${cmd}" deploy_to_remote="no"
else
exit_with_error "Missing artifact implementation function '${impl_func}'"
fi fi
done fi
}
do_with_default_build obtain_complete_artifact # @TODO: < /dev/null -- but what about kernel configure?
function initialize_artifact() {
declare -g chosen_artifact="${1}"
armbian_register_artifacts
declare -g chosen_artifact_impl="${ARMBIAN_ARTIFACTS_TO_HANDLERS_DICT["${chosen_artifact}"]}"
[[ "x${chosen_artifact_impl}x" == "xx" ]] && exit_with_error "Unknown artifact '${chosen_artifact}'"
display_alert "artifact" "${chosen_artifact} :: ${chosen_artifact_impl}()" "info"
create_artifact_functions
}
function obtain_complete_artifact() {
declare -g artifact_version="undetermined"
declare -g artifact_version_reason="undetermined"
declare -A -g artifact_map_versions=()
declare -A -g artifact_map_versions_legacy=()
# Check if REVISION is set, otherwise exit_with_error
[[ "x${REVISION}x" == "xx" ]] && exit_with_error "REVISION is not set"
artifact_prepare_version
debug_var artifact_version
debug_var artifact_version_reason
debug_dict artifact_map_versions_legacy
debug_dict artifact_map_versions
# @TODO the whole artifact upload/download dance
artifact_is_available_in_local_cache
artifact_is_available_in_remote_cache
artifact_obtain_from_remote_cache
artifact_build_from_sources
artifact_deploy_to_remote_cache
}
# This is meant to be run after config, inside default build.
function build_artifact() {
initialize_artifact "${WHAT:-"kernel"}"
obtain_complete_artifact
}
function capture_rename_legacy_debs_into_artifacts() {
LOG_SECTION="capture_rename_legacy_debs_into_artifacts" do_with_logging capture_rename_legacy_debs_into_artifacts_logged
}
function capture_rename_legacy_debs_into_artifacts_logged() {
# So the deb-building code will consider the artifact_version in it's "Version: " field in the .debs.
# But it will produce .deb's with the legacy name. We gotta find and rename them.
# Loop over the artifact_map_versions, and rename the .debs.
debug_dict artifact_map_versions_legacy
debug_dict artifact_map_versions
declare deb_name_base deb_name_full new_name_full legacy_version legacy_base_version
for deb_name_base in "${!artifact_map_versions[@]}"; do
legacy_base_version="${artifact_map_versions_legacy[${deb_name_base}]}"
if [[ -z "${legacy_base_version}" ]]; then
exit_with_error "Legacy base version not found for artifact '${deb_name_base}'"
fi
display_alert "Legacy base version" "${legacy_base_version}" "info"
legacy_version="${legacy_base_version}_${ARCH}" # Arch-specific package; has ARCH at the end.
deb_name_full="${DEST}/debs/${deb_name_base}_${legacy_version}.deb"
new_name_full="${DEST}/debs/${deb_name_base}_${artifact_map_versions[${deb_name_base}]}_${ARCH}.deb"
display_alert "Full legacy deb name" "${deb_name_full}" "info"
display_alert "New artifact deb name" "${new_name_full}" "info"
run_host_command_logged mv -v "${deb_name_full}" "${new_name_full}"
done
} }

View File

@@ -1,27 +0,0 @@
function cli_firmware_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function cli_firmware_run() {
# minimal, non-interactive configuration - it initializes the extension manager; handles its own logging sections.
prep_conf_main_minimal_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
declare -g -r BOARD_FIRMWARE_INSTALL="-full" # Build full firmware "too"; overrides the config
# default build, but only invoke specific rootfs functions needed. It has its own logging sections.
do_with_default_build cli_firmware_only_in_default_build < /dev/null # no stdin for this, so it bombs if tries to be interactive.
#reset_uid_owner "${BUILT_ROOTFS_CACHE_FILE}"
display_alert "Firmware build complete" "fake" "info"
}
# This is run inside do_with_default_build(), above.
function cli_firmware_only_in_default_build() {
github_actions_add_output firmware_version "fake"
compile_firmware_light_and_possibly_full
}

View File

@@ -19,7 +19,7 @@ function cli_oras_run() {
exit_with_error "File to upload not found '${UPLOAD_FILE}'" exit_with_error "File to upload not found '${UPLOAD_FILE}'"
fi fi
# This will download & install ORAS and run it. # This will download & install ORAS and run it.
oras_push_artifact_file "${OCI_TARGET}" "${UPLOAD_FILE}" oras_push_artifact_file "${OCI_TARGET}" "${UPLOAD_FILE}" "uploaded from command line - this is NOT a Docker image"
;; ;;
*) *)

View File

@@ -1,86 +0,0 @@
function cli_rootfs_pre_run() {
declare -g ARMBIAN_COMMAND_REQUIRE_BASIC_DEPS="yes" # Require prepare_host_basic to run before the command.
# "gimme root on a Linux machine"
cli_standard_relaunch_docker_or_sudo
}
function cli_rootfs_run() {
declare -g ROOTFS_COMPRESSION_RATIO="${ROOTFS_COMPRESSION_RATIO:-"15"}" # default to Compress stronger when we make rootfs cache
declare -a vars_cant_be_set=("LINUXFAMILY" "BOARDFAMILY")
# loop through all vars and check if they are set and bomb out
for var in "${vars_cant_be_set[@]}"; do
if [[ -n ${!var} ]]; then
exit_with_error "Param '${var}' is set ('${!var}') but can't be set for rootfs CLI; rootfs's are shared across boards and families."
fi
done
# If BOARD is set, use it to convert to an ARCH.
if [[ -n ${BOARD} ]]; then
display_alert "BOARD is set, converting to ARCH for rootfs building" "'BOARD=${BOARD}'" "warn"
# Convert BOARD to ARCH; source the BOARD and FAMILY stuff
LOG_SECTION="config_source_board_file" do_with_conditional_logging config_source_board_file
LOG_SECTION="source_family_config_and_arch" do_with_conditional_logging source_family_config_and_arch
display_alert "Done sourcing board file" "'${BOARD}' - arch: '${ARCH}'" "warn"
fi
declare -a vars_need_to_be_set=("RELEASE" "ARCH")
# loop through all vars and check if they are not set and bomb out if so
for var in "${vars_need_to_be_set[@]}"; do
if [[ -z ${!var} ]]; then
exit_with_error "Param '${var}' is not set but needs to be set for rootfs CLI."
fi
done
declare -r __wanted_rootfs_arch="${ARCH}"
declare -g -r RELEASE="${RELEASE}" # make readonly for finding who tries to change it
declare -g -r NEEDS_BINFMT="yes" # make sure binfmts are installed during prepare_host_interactive
# prep_conf_main_only_rootfs_ni is prep_conf_main_only_rootfs_ni() + mark_aggregation_required_in_default_build_start()
prep_conf_main_only_rootfs_ni < /dev/null # no stdin for this, so it bombs if tries to be interactive.
declare -g -r ARCH="${ARCH}" # make readonly for finding who tries to change it
if [[ "${ARCH}" != "${__wanted_rootfs_arch}" ]]; then
exit_with_error "Param 'ARCH' is set to '${ARCH}' after config, but different from wanted '${__wanted_rootfs_arch}'"
fi
declare -g ROOT_FS_CREATE_VERSION
if [[ -z ${ROOT_FS_CREATE_VERSION} ]]; then
ROOT_FS_CREATE_VERSION="$(date --utc +"%Y%m%d")"
display_alert "ROOT_FS_CREATE_VERSION is not set, defaulting to current date" "ROOT_FS_CREATE_VERSION=${ROOT_FS_CREATE_VERSION}" "info"
else
display_alert "ROOT_FS_CREATE_VERSION is set" "ROOT_FS_CREATE_VERSION=${ROOT_FS_CREATE_VERSION}" "info"
fi
# default build, but only invoke specific rootfs functions needed. It has its own logging sections.
do_with_default_build cli_rootfs_only_in_default_build < /dev/null # no stdin for this, so it bombs if tries to be interactive.
reset_uid_owner "${BUILT_ROOTFS_CACHE_FILE}"
display_alert "Rootfs build complete" "${BUILT_ROOTFS_CACHE_NAME}" "info"
display_alert "Rootfs build complete, file: " "${BUILT_ROOTFS_CACHE_FILE}" "info"
}
# This is run inside do_with_default_build(), above.
function cli_rootfs_only_in_default_build() {
assert_requires_aggregation # Bombs if aggregation has not run
declare -g rootfs_cache_id="none_yet"
LOG_SECTION="prepare_rootfs_build_params_and_trap" do_with_logging prepare_rootfs_build_params_and_trap
LOG_SECTION="calculate_rootfs_cache_id" do_with_logging calculate_rootfs_cache_id # sets rootfs_cache_id
# Set a GHA output variable for the cache ID, so it can be used in other steps.
github_actions_add_output rootfs_cache_id_version "${rootfs_cache_id}-${ROOT_FS_CREATE_VERSION}" # for real filename
github_actions_add_output rootfs_cache_id "${rootfs_cache_id}" # for actual caching, sans date/version
# In GHA, prefer to reference this output variable, as it is more stable; I wanna move it to output/rootfs dir later.
github_actions_add_output rootfs_out_filename_relative "cache/rootfs/${ARCH}-${RELEASE}-${rootfs_cache_id}-${ROOT_FS_CREATE_VERSION}.tar.zst"
display_alert "Going to build rootfs" "packages_hash: '${packages_hash:-}' cache_type: '${cache_type:-}' rootfs_cache_id: '${rootfs_cache_id}'" "info"
# "rootfs" CLI skips over a lot goes straight to create the rootfs. It doesn't check cache etc.
LOG_SECTION="create_new_rootfs_cache" do_with_logging create_new_rootfs_cache
}

View File

@@ -20,24 +20,27 @@ function armbian_register_commands() {
["build"]="standard_build" # implemented in cli_standard_build_pre_run and cli_standard_build_run ["build"]="standard_build" # implemented in cli_standard_build_pre_run and cli_standard_build_run
["distccd"]="distccd" # implemented in cli_distccd_pre_run and cli_distccd_run ["distccd"]="distccd" # implemented in cli_distccd_pre_run and cli_distccd_run
["rootfs"]="rootfs" # implemented in cli_rootfs_pre_run and cli_rootfs_run
["firmware"]="firmware" # yeah this is getting old. implemented in cli_firmware_pre_run and cli_firmware_run
# shortcuts, see vars set below. the use legacy single build, and try to control it via variables
["kernel"]="standard_build"
["kernel-config"]="standard_build"
["u-boot"]="standard_build"
["uboot"]="standard_build"
# external tooling, made easy. # external tooling, made easy.
["oras-upload"]="oras" # implemented in cli_oras_pre_run and cli_oras_run; up/down/info are the same, see vars below ["oras-upload"]="oras" # implemented in cli_oras_pre_run and cli_oras_run; up/down/info are the same, see vars below
# all-around artifact wrapper # all-around artifact wrapper
["artifact"]="artifact" # implemented in cli_artifact_pre_run and cli_artifact_run ["artifact"]="artifact" # implemented in cli_artifact_pre_run and cli_artifact_run
# shortcuts, see vars set below. the use legacy single build, and try to control it via variables
["rootfs"]="artifact"
["firmware"]="artifact"
["firmware-full"]="artifact"
["kernel"]="artifact"
["kernel-config"]="artifact"
["u-boot"]="artifact"
["uboot"]="artifact"
["undecided"]="undecided" # implemented in cli_undecided_pre_run and cli_undecided_run - relaunches either build or docker ["undecided"]="undecided" # implemented in cli_undecided_pre_run and cli_undecided_run - relaunches either build or docker
) )
# common for all CLI-based artifact shortcuts
declare common_cli_artifact_vars=""
# Vars to be set for each command. Optional. # Vars to be set for each command. Optional.
declare -g -A ARMBIAN_COMMANDS_TO_VARS_DICT=( declare -g -A ARMBIAN_COMMANDS_TO_VARS_DICT=(
["docker-purge"]="DOCKER_SUBCMD='purge'" ["docker-purge"]="DOCKER_SUBCMD='purge'"
@@ -50,10 +53,14 @@ function armbian_register_commands() {
["config-dump"]="CONFIG_DEFS_ONLY='yes'" ["config-dump"]="CONFIG_DEFS_ONLY='yes'"
["configdump"]="CONFIG_DEFS_ONLY='yes'" ["configdump"]="CONFIG_DEFS_ONLY='yes'"
["kernel-config"]="KERNEL_ONLY='yes' JUST_KERNEL='yes' KERNEL_IGNORE_DEB='yes' KERNEL_CONFIGURE='yes'" # artifact shortcuts
["kernel"]="KERNEL_ONLY='yes' JUST_KERNEL='yes' KERNEL_IGNORE_DEB='yes' KERNEL_CONFIGURE='no'" ["kernel-config"]="WHAT='kernel' KERNEL_CONFIGURE='yes' ARTIFACT_BUILD_INTERACTIVE='yes' ${common_cli_artifact_vars}"
["u-boot"]="KERNEL_ONLY='yes' JUST_UBOOT='yes' UBOOT_IGNORE_DEB='yes' KERNEL_CONFIGURE='no'" ["kernel"]="WHAT='kernel' ${common_cli_artifact_vars}"
["uboot"]="KERNEL_ONLY='yes' JUST_UBOOT='yes' UBOOT_IGNORE_DEB='yes' KERNEL_CONFIGURE='no'" ["uboot"]="WHAT='uboot' ${common_cli_artifact_vars}"
["u-boot"]="WHAT='uboot' ${common_cli_artifact_vars}"
["firmware"]="WHAT='firmware' ${common_cli_artifact_vars}"
["firmware-full"]="WHAT='full_firmware' ${common_cli_artifact_vars}"
["rootfs"]="WHAT='rootfs' ${common_cli_artifact_vars}"
["oras-upload"]="ORAS_OPERATION='upload'" ["oras-upload"]="ORAS_OPERATION='upload'"

View File

@@ -2,26 +2,29 @@
# This is an internal/core extension. # This is an internal/core extension.
function armbian_kernel_config__disable_module_compression() { function armbian_kernel_config__disable_module_compression() {
display_alert "Disabling module compression and signing / debug / auto version" "armbian-kernel" "debug" kernel_config_modifying_hashes+=("CONFIG_MODULE_COMPRESS_NONE=y" "CONFIG_MODULE_SIG=n" "CONFIG_LOCALVERSION_AUTO=n" "DEBUG_INFO=n")
# DONE: Disable: signing, and compression of modules, for speed. if [[ -f .config ]]; then
kernel_config_set_n CONFIG_MODULE_COMPRESS_XZ # No use double-compressing modules display_alert "Disabling module compression and signing / debug / auto version" "armbian-kernel" "debug"
kernel_config_set_n CONFIG_MODULE_COMPRESS_ZSTD # DONE: Disable: signing, and compression of modules, for speed.
kernel_config_set_n CONFIG_MODULE_COMPRESS_GZIP kernel_config_set_n CONFIG_MODULE_COMPRESS_XZ # No use double-compressing modules
kernel_config_set_y CONFIG_MODULE_COMPRESS_NONE kernel_config_set_n CONFIG_MODULE_COMPRESS_ZSTD
kernel_config_set_n CONFIG_MODULE_COMPRESS_GZIP
kernel_config_set_y CONFIG_MODULE_COMPRESS_NONE
kernel_config_set_n CONFIG_SECURITY_LOCKDOWN_LSM kernel_config_set_n CONFIG_SECURITY_LOCKDOWN_LSM
kernel_config_set_n CONFIG_MODULE_SIG # No use signing modules kernel_config_set_n CONFIG_MODULE_SIG # No use signing modules
# DONE: Disable: version shenanigans # DONE: Disable: version shenanigans
kernel_config_set_n CONFIG_LOCALVERSION_AUTO # This causes a mismatch between what Armbian wants and what make produces. kernel_config_set_n CONFIG_LOCALVERSION_AUTO # This causes a mismatch between what Armbian wants and what make produces.
# DONE: Disable: debug option # DONE: Disable: debug option
kernel_config_set_n DEBUG_INFO # Armbian doesn't know how to package a debug kernel. kernel_config_set_n DEBUG_INFO # Armbian doesn't know how to package a debug kernel.
# @TODO: Enable the options for the extrawifi/drivers; so we don't need to worry about them when updating configs # @TODO: Enable the options for the extrawifi/drivers; so we don't need to worry about them when updating configs
fi
} }
# Helpers for manipulating kernel config. @TODO: hash of changes made # Helpers for manipulating kernel config.
function kernel_config_set_m() { function kernel_config_set_m() {
declare module="$1" declare module="$1"
display_alert "Enabling kernel module" "${module}=m" "debug" display_alert "Enabling kernel module" "${module}=m" "debug"

View File

@@ -64,13 +64,20 @@ function kernel_config_initialize() {
cd "${kernel_work_dir}" || exit_with_error "kernel_work_dir does not exist: ${kernel_work_dir}" cd "${kernel_work_dir}" || exit_with_error "kernel_work_dir does not exist: ${kernel_work_dir}"
run_kernel_make olddefconfig run_kernel_make olddefconfig
# Call the extensions. This is _also_ done during the kernel artifact's prepare_version, for consistent caching.
call_extensions_kernel_config
display_alert "Kernel configuration" "${LINUXCONFIG}" "info"
}
function call_extensions_kernel_config() {
# Run the core-armbian config modifications here, built-in extensions: # Run the core-armbian config modifications here, built-in extensions:
# 1) Enable the options for the extrawifi/drivers; so we don't need to worry about them when updating configs
# 2) Disable: debug, version shenanigans, signing, and compression of modules, to ensure sanity
call_extension_method "armbian_kernel_config" <<- 'ARMBIAN_KERNEL_CONFIG' call_extension_method "armbian_kernel_config" <<- 'ARMBIAN_KERNEL_CONFIG'
*Armbian-core default hook point for pre-olddefconfig Kernel config modifications* *Armbian-core default hook point for pre-olddefconfig Kernel config modifications*
NOT for user consumption. Do NOT use this hook, this is internal to Armbian. NOT for user consumption. Do NOT use this hook, this is internal to Armbian.
Instead, use `custom_kernel_config` which runs later and can undo anything done by this step. Instead, use `custom_kernel_config` which runs later and can undo anything done by this step.
Important: this hook might be run multiple times, and one of them might not have a .config in place.
Either way, the hook _must_ add representative changes to the `kernel_config_modifying_hashes` array, for kernel config hashing.
ARMBIAN_KERNEL_CONFIG ARMBIAN_KERNEL_CONFIG
# Custom hooks receive a clean / updated config; depending on their modifications, they may need to run olddefconfig again. # Custom hooks receive a clean / updated config; depending on their modifications, they may need to run olddefconfig again.
@@ -79,20 +86,12 @@ function kernel_config_initialize() {
Called after ${LINUXCONFIG}.config is put in place (.config). Called after ${LINUXCONFIG}.config is put in place (.config).
A good place to customize the .config directly. A good place to customize the .config directly.
Armbian default Kconfig modifications have already been applied and can be overriden. Armbian default Kconfig modifications have already been applied and can be overriden.
Important: this hook might be run multiple times, and one of them might not have a .config in place.
Either way, the hook _must_ add representative changes to the `kernel_config_modifying_hashes` array, for kernel config hashing.
CUSTOM_KERNEL_CONFIG CUSTOM_KERNEL_CONFIG
display_alert "Kernel configuration" "${LINUXCONFIG}" "info"
} }
function kernel_config_finalize() { function kernel_config_finalize() {
call_extension_method "custom_kernel_config_post_defconfig" <<- 'CUSTOM_KERNEL_CONFIG_POST_DEFCONFIG'
*Kernel .config is in place, already processed by Armbian*
Called after ${LINUXCONFIG}.config is put in place (.config).
After all olddefconfig any Kconfig make is called.
A good place to customize the .config last-minute.
CUSTOM_KERNEL_CONFIG_POST_DEFCONFIG
# Now, compare the .config with the previous one, and if they are the same, restore the original date. # Now, compare the .config with the previous one, and if they are the same, restore the original date.
# This way we avoid unnecessary recompilation of the kernel; even if the .config contents # This way we avoid unnecessary recompilation of the kernel; even if the .config contents
# have not changed, the date will be different, and Kbuild will at least re-link everything. # have not changed, the date will be different, and Kbuild will at least re-link everything.

View File

@@ -33,6 +33,9 @@ if_enabled_echo() {
} }
function prepare_kernel_packaging_debs() { function prepare_kernel_packaging_debs() {
: "${artifact_version:?artifact_version is not set}"
: "${kernel_debs_temp_dir:?kernel_debs_temp_dir is not set}"
declare kernel_work_dir="${1}" declare kernel_work_dir="${1}"
declare kernel_dest_install_dir="${2}" declare kernel_dest_install_dir="${2}"
declare kernel_version="${3}" declare kernel_version="${3}"
@@ -43,16 +46,7 @@ function prepare_kernel_packaging_debs() {
declare kernel_version_family="${kernel_version}-${LINUXFAMILY}" declare kernel_version_family="${kernel_version}-${LINUXFAMILY}"
# Package version. Affects users upgrading from repo! # Package version. Affects users upgrading from repo!
declare package_version="${REVISION}" # default, "classic" Armbian non-version. display_alert "Kernel .deb package version" "${artifact_version}" "info"
# If we're building an artifact, use the pre-determined artifact version.
if [[ "${artifact_version:-""}" != "" ]]; then
if [[ "${artifact_version}" == "undetermined" ]]; then
exit_with_error "Undetermined artifact version during kernel deb packaging. This is a bug, report it."
fi
display_alert "Using artifact version for kernel package version" "${artifact_version}" "info"
package_version="${artifact_version}"
fi
display_alert "Kernel .deb package version" "${package_version}" "info"
# show incoming tree # show incoming tree
#display_alert "Kernel install dir" "incoming from KBUILD make" "debug" #display_alert "Kernel install dir" "incoming from KBUILD make" "debug"
@@ -89,15 +83,13 @@ function prepare_kernel_packaging_debs() {
if [[ "${KERNEL_HAS_WORKING_HEADERS}" == "yes" ]]; then if [[ "${KERNEL_HAS_WORKING_HEADERS}" == "yes" ]]; then
display_alert "Packaging linux-headers" "${LINUXFAMILY} ${LINUXCONFIG}" "info" display_alert "Packaging linux-headers" "${LINUXFAMILY} ${LINUXCONFIG}" "info"
create_kernel_deb "linux-headers-${BRANCH}-${LINUXFAMILY}" "${debs_target_dir}" kernel_package_callback_linux_headers create_kernel_deb "linux-headers-${BRANCH}-${LINUXFAMILY}" "${debs_target_dir}" kernel_package_callback_linux_headers
elif [[ "${KERNEL_HAS_WORKING_HEADERS_FULL_SOURCE}" == "yes" ]]; then
display_alert "Packaging linux-headers (full source, experimental)" "${LINUXFAMILY} ${LINUXCONFIG}" "warn"
create_kernel_deb "linux-headers-${BRANCH}-${LINUXFAMILY}" "${debs_target_dir}" kernel_package_callback_linux_headers_full_source
else else
display_alert "Skipping linux-headers package" "for ${KERNEL_MAJOR_MINOR} kernel version" "warn" display_alert "Skipping linux-headers package" "for ${KERNEL_MAJOR_MINOR} kernel version" "warn"
fi fi
} }
function create_kernel_deb() { function create_kernel_deb() {
: "${kernel_debs_temp_dir:?kernel_debs_temp_dir is not set}"
declare package_name="${1}" declare package_name="${1}"
declare deb_output_dir="${2}" declare deb_output_dir="${2}"
declare callback_function="${3}" declare callback_function="${3}"
@@ -151,12 +143,7 @@ function create_kernel_deb() {
#display_alert "Package dir" "for package ${package_name}" "debug" #display_alert "Package dir" "for package ${package_name}" "debug"
#run_host_command_logged tree -C -h -d --du "${package_directory}" #run_host_command_logged tree -C -h -d --du "${package_directory}"
# Run shellcheck on the produced DEBIAN/xxx scripts fakeroot_dpkg_deb_build "${package_directory}" "${kernel_debs_temp_dir}/"
dpkg_deb_run_shellcheck_on_scripts "${package_directory}"
# @TODO: hmm, why doesn't this use fakeroot_dpkg_deb_build() ?
declare final_deb_filename="${deb_output_dir}/${package_name}_${REVISION}_${ARCH}.deb" # for compatibility with non-artifacts
run_host_command_logged dpkg-deb ${DEB_COMPRESS:+-Z$DEB_COMPRESS} --build "${package_directory}" "${final_deb_filename}" # not KDEB compress, we're not under a Makefile
done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early
} }
@@ -220,7 +207,7 @@ function kernel_package_callback_linux_image() {
# Generate a control file # Generate a control file
cat <<- CONTROL_FILE > "${package_DEBIAN_dir}/control" cat <<- CONTROL_FILE > "${package_DEBIAN_dir}/control"
Package: ${package_name} Package: ${package_name}
Version: ${package_version} Version: ${artifact_version}
Source: linux-${kernel_version} Source: linux-${kernel_version}
Architecture: ${ARCH} Architecture: ${ARCH}
Maintainer: ${MAINTAINER} <${MAINTAINERMAIL}> Maintainer: ${MAINTAINER} <${MAINTAINERMAIL}>
@@ -271,7 +258,7 @@ function kernel_package_callback_linux_dtb() {
# Generate a control file # Generate a control file
cat <<- CONTROL_FILE > "${package_DEBIAN_dir}/control" cat <<- CONTROL_FILE > "${package_DEBIAN_dir}/control"
Version: ${package_version} Version: ${artifact_version}
Maintainer: ${MAINTAINER} <${MAINTAINERMAIL}> Maintainer: ${MAINTAINER} <${MAINTAINERMAIL}>
Section: kernel Section: kernel
Package: ${package_name} Package: ${package_name}
@@ -386,7 +373,7 @@ function kernel_package_callback_linux_headers() {
# Generate a control file # Generate a control file
# TODO: libssl-dev is only required if we're signing modules, which is a kernel .config option. # TODO: libssl-dev is only required if we're signing modules, which is a kernel .config option.
cat <<- CONTROL_FILE > "${package_DEBIAN_dir}/control" cat <<- CONTROL_FILE > "${package_DEBIAN_dir}/control"
Version: ${package_version} Version: ${artifact_version}
Maintainer: ${MAINTAINER} <${MAINTAINERMAIL}> Maintainer: ${MAINTAINER} <${MAINTAINERMAIL}>
Section: devel Section: devel
Package: ${package_name} Package: ${package_name}
@@ -433,78 +420,3 @@ function kernel_package_callback_linux_headers() {
EOT_POSTINST EOT_POSTINST
) )
} }
function kernel_package_callback_linux_headers_full_source() {
display_alert "linux-headers packaging full source" "${package_directory}" "debug"
# targets.
local headers_target_dir="${package_directory}/usr/src/linux-headers-${kernel_version_family}" # headers/tools etc
local modules_target_dir="${package_directory}/lib/modules/${kernel_version_family}" # symlink to above later
mkdir -p "${headers_target_dir}" "${modules_target_dir}" # create both dirs
run_host_command_logged ln -v -s "/usr/src/linux-headers-${kernel_version_family}" "${modules_target_dir}/build" # Symlink in modules so builds find the headers
# gather stuff from the linux source tree: ${kernel_work_dir} (NOT the make install destination)
# those can be source files or object (binary/compiled) stuff
# how to get SRCARCH? only from the makefile itself. ARCH=amd64 then SRCARCH=x86. How to we know? @TODO
local SRC_ARCH="${ARCH}"
[[ "${SRC_ARCH}" == "amd64" ]] && SRC_ARCH="x86"
[[ "${SRC_ARCH}" == "armhf" ]] && SRC_ARCH="arm"
# Export git tree to the target directory.
# @TODO: this is waay too heavy. add a zst tar ball, and extract during postinst.
git -C "${kernel_work_dir}" archive --format=tar HEAD | tar -x -C "${headers_target_dir}"
# @TODO: add Module.symvers if exists
run_host_command_logged cp -vp "${kernel_work_dir}"/.config "${headers_target_dir}"/.config # copy .config manually to be where it's expected to be
# Generate a control file
cat <<- CONTROL_FILE > "${package_DEBIAN_dir}/control"
Version: ${package_version}
Maintainer: ${MAINTAINER} <${MAINTAINERMAIL}>
Section: devel
Package: ${package_name}
Architecture: ${ARCH}
Provides: linux-headers, linux-headers-armbian, armbian-$BRANCH
Depends: make, gcc, libc6-dev, bison, flex, libssl-dev, libelf-dev
Description: Armbian Linux $BRANCH full-source headers ${artifact_version_reason:-"${kernel_version_family}"}
This package provides kernel header files for ${kernel_version_family}
.
This is useful for DKMS and building of external modules.
CONTROL_FILE
# Make sure the target dir is clean/not-existing before installing.
kernel_package_hook_helper "preinst" <(
cat <<- EOT_PREINST
if [[ -d "/usr/src/linux-headers-${kernel_version_family}" ]]; then
echo "Cleaning pre-existing directory /usr/src/linux-headers-${kernel_version_family} ..."
rm -rf "/usr/src/linux-headers-${kernel_version_family}"
fi
EOT_PREINST
)
# Make sure the target dir is removed before removing the package; that way we don't leave eventual compilation artifacts over there.
kernel_package_hook_helper "prerm" <(
cat <<- EOT_PRERM
if [[ -d "/usr/src/linux-headers-${kernel_version_family}" ]]; then
echo "Cleaning directory /usr/src/linux-headers-${kernel_version_family} ..."
rm -rf "/usr/src/linux-headers-${kernel_version_family}"
fi
EOT_PRERM
)
kernel_package_hook_helper "postinst" <(
cat <<- EOT_POSTINST
cd "/usr/src/linux-headers-${kernel_version_family}"
NCPU=\$(grep -c 'processor' /proc/cpuinfo)
echo "Compiling kernel-headers tools (${kernel_version_family}) using \$NCPU CPUs - please wait ..."
yes "" | make ARCH="${SRC_ARCH}" oldconfig
make ARCH="${SRC_ARCH}" -j\$NCPU scripts
make ARCH="${SRC_ARCH}" -j\$NCPU M=scripts/mod/
make ARCH="${SRC_ARCH}" -j\$NCPU tools/objtool || echo "objtool failed, but thats okay"
# @TODO: modules_prepare -- should work with 4.19+
echo "Done compiling kernel-headers tools (${kernel_version_family})."
EOT_POSTINST
)
}

View File

@@ -61,16 +61,11 @@ function compile_kernel() {
kernel_config # has it's own logging sections inside kernel_config # has it's own logging sections inside
# package the kernel-source .deb
LOG_SECTION="kernel_package_source" do_with_logging do_with_hooks kernel_package_source
# build via make and package .debs; they're separate sub-steps # build via make and package .debs; they're separate sub-steps
kernel_prepare_build_and_package # has it's own logging sections inside kernel_prepare_build_and_package # has it's own logging sections inside
display_alert "Done with" "kernel compile" "debug" display_alert "Done with" "kernel compile" "debug"
LOG_SECTION="kernel_deploy_pkg" do_with_logging do_with_hooks kernel_deploy_pkg
return 0 return 0
} }
@@ -86,59 +81,6 @@ function kernel_maybe_clean() {
fi fi
} }
function kernel_package_source() {
[[ "${BUILD_KSRC}" != "yes" ]] && return 0
display_alert "Creating kernel source package" "${LINUXCONFIG}" "info"
local ts=${SECONDS}
local sources_pkg_dir tarball_size package_size
declare cleanup_id="" tmp_src_dir=""
prepare_temp_dir_in_workdir_and_schedule_cleanup "ksrc" cleanup_id tmp_src_dir # namerefs
sources_pkg_dir="${tmp_src_dir}/${CHOSEN_KSRC}_${REVISION}_all"
mkdir -p "${sources_pkg_dir}"/usr/src/ \
"${sources_pkg_dir}/usr/share/doc/linux-source-${version}-${LINUXFAMILY}" \
"${sources_pkg_dir}"/DEBIAN
run_host_command_logged cp -v "${SRC}/config/kernel/${LINUXCONFIG}.config" "${sources_pkg_dir}/usr/src/${LINUXCONFIG}_${version}_${REVISION}_config"
run_host_command_logged cp -v COPYING "${sources_pkg_dir}/usr/share/doc/linux-source-${version}-${LINUXFAMILY}/LICENSE"
display_alert "Compressing sources for the linux-source package" "exporting from git" "info"
cd "${kernel_work_dir}" || exit_with_error "Can't cd to kernel_work_dir: ${kernel_work_dir}"
local tar_prefix="${version}/"
local output_tarball="${sources_pkg_dir}/usr/src/linux-source-${version}-${LINUXFAMILY}.tar.zst"
# export tar with `git archive`; we point it at HEAD, but could be anything else too
run_host_command_logged git archive "--prefix=${tar_prefix}" --format=tar HEAD "| zstdmt > '${output_tarball}'"
tarball_size="$(du -h -s "${output_tarball}" | awk '{print $1}')"
cat <<- EOF > "${sources_pkg_dir}"/DEBIAN/control
Package: linux-source-${BRANCH}-${LINUXFAMILY}
Version: ${version}-${BRANCH}-${LINUXFAMILY}+${REVISION}
Architecture: all
Maintainer: ${MAINTAINER} <${MAINTAINERMAIL}>
Section: kernel
Priority: optional
Depends: binutils, coreutils
Provides: linux-source, linux-source-${version}-${LINUXFAMILY}
Recommends: gcc, make
Description: This package provides the source code for the Linux kernel $version
EOF
fakeroot_dpkg_deb_build -Znone -z0 "${sources_pkg_dir}" "${sources_pkg_dir}.deb" # do not compress .deb, it already contains a zstd compressed tarball! ignores ${KDEB_COMPRESS} on purpose
package_size="$(du -h -s "${sources_pkg_dir}.deb" | awk '{print $1}')"
run_host_command_logged rsync --remove-source-files -r "${sources_pkg_dir}.deb" "${DEB_STORAGE}/"
display_alert "$(basename "${sources_pkg_dir}.deb" ".deb") packaged" "$((SECONDS - ts)) seconds, ${tarball_size} tarball, ${package_size} .deb" "info"
done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early
}
function kernel_prepare_build_and_package() { function kernel_prepare_build_and_package() {
declare -a build_targets declare -a build_targets
declare kernel_dest_install_dir declare kernel_dest_install_dir
@@ -175,9 +117,17 @@ function kernel_prepare_build_and_package() {
# Fire off the build & package # Fire off the build & package
LOG_SECTION="kernel_build" do_with_logging do_with_hooks kernel_build LOG_SECTION="kernel_build" do_with_logging do_with_hooks kernel_build
# prepare a target dir for the shared, produced kernel .debs, across image/dtb/headers
declare cleanup_id_debs="" kernel_debs_temp_dir=""
prepare_temp_dir_in_workdir_and_schedule_cleanup "kd" cleanup_id_debs kernel_debs_temp_dir # namerefs
LOG_SECTION="kernel_package" do_with_logging do_with_hooks kernel_package LOG_SECTION="kernel_package" do_with_logging do_with_hooks kernel_package
done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early # This deploys to DEB_STORAGE...
LOG_SECTION="kernel_deploy_pkg" do_with_logging do_with_hooks kernel_deploy_pkg
done_with_temp_dir "${cleanup_id_debs}" # changes cwd to "${SRC}" and fires the cleanup function early
done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early
} }
function kernel_build() { function kernel_build() {
@@ -194,6 +144,7 @@ function kernel_build() {
function kernel_package() { function kernel_package() {
local ts=${SECONDS} local ts=${SECONDS}
cd "${kernel_debs_temp_dir}" || exit_with_error "Can't cd to kernel_debs_temp_dir: ${kernel_debs_temp_dir}"
cd "${kernel_work_dir}" || exit_with_error "Can't cd to kernel_work_dir: ${kernel_work_dir}" cd "${kernel_work_dir}" || exit_with_error "Can't cd to kernel_work_dir: ${kernel_work_dir}"
display_alert "Packaging kernel" "${LINUXFAMILY} ${LINUXCONFIG}" "info" display_alert "Packaging kernel" "${LINUXFAMILY} ${LINUXCONFIG}" "info"
prepare_kernel_packaging_debs "${kernel_work_dir}" "${kernel_dest_install_dir}" "${version}" kernel_install_dirs prepare_kernel_packaging_debs "${kernel_work_dir}" "${kernel_dest_install_dir}" "${version}" kernel_install_dirs
@@ -201,11 +152,6 @@ function kernel_package() {
} }
function kernel_deploy_pkg() { function kernel_deploy_pkg() {
cd "${kernel_work_dir}/.." || exit_with_error "Can't cd to kernel_work_dir: ${kernel_work_dir}" : "${kernel_debs_temp_dir:?kernel_debs_temp_dir is not set}"
run_host_command_logged rsync -v --remove-source-files -r "${kernel_debs_temp_dir}"/*.deb "${DEB_STORAGE}/"
# @TODO: rpardini: this is kept for historical reasons... wth?
# remove firmware image packages here - easier than patching ~40 packaging scripts at once
run_host_command_logged rm -fv "linux-firmware-image-*.deb"
run_host_command_logged rsync -v --remove-source-files -r ./*.deb "${DEB_STORAGE}/"
} }

View File

@@ -1,22 +1,6 @@
function compile_firmware_light_and_possibly_full() {
if [[ "${INSTALL_ARMBIAN_FIRMWARE:-yes}" == "yes" ]]; then # Build firmware by default.
# Build the "light" version of firmware packages, with no conditions.
FULL="" REPLACE="-full" LOG_SECTION="compile_firmware" do_with_logging compile_firmware
# Now, we'll build the "full" version of firmware packages, if:
# 1) We've CI==true, or stdout is not a terminal, or
# 2) We've been asked to install it for the board being built, BOARD_FIRMWARE_INSTALL="-full"
if [[ "${CI}" == "true" || ! -t 1 || "${BOARD_FIRMWARE_INSTALL}" == "-full" ]]; then
# Build the full version of firmware package
FULL="-full" REPLACE="" LOG_SECTION="compile_firmware_full" do_with_logging compile_firmware
else
display_alert "Skipping full firmware package build" "" "info"
fi
fi
return 0
}
function compile_firmware() { function compile_firmware() {
: "${artifact_version:?artifact_version is not set}"
display_alert "Merging and packaging linux firmware" "@host --> firmware${FULL}" "info" display_alert "Merging and packaging linux firmware" "@host --> firmware${FULL}" "info"
declare cleanup_id="" fw_temp_dir="" declare cleanup_id="" fw_temp_dir=""
@@ -66,7 +50,7 @@ function compile_firmware() {
# @TODO: rpardini: this needs Conflicts: with the standard Ubuntu/Debian linux-firmware packages and other firmware pkgs in Debian # @TODO: rpardini: this needs Conflicts: with the standard Ubuntu/Debian linux-firmware packages and other firmware pkgs in Debian
cat <<- END > DEBIAN/control cat <<- END > DEBIAN/control
Package: armbian-firmware${FULL} Package: armbian-firmware${FULL}
Version: $REVISION Version: ${artifact_version}
Architecture: all Architecture: all
Maintainer: $MAINTAINER <$MAINTAINERMAIL> Maintainer: $MAINTAINER <$MAINTAINERMAIL>
Installed-Size: 1 Installed-Size: 1
@@ -79,18 +63,9 @@ function compile_firmware() {
cd "${fw_temp_dir}" || exit_with_error "can't change directory" cd "${fw_temp_dir}" || exit_with_error "can't change directory"
# package # package, directly to DEB_STORAGE; full version might be very big for tmpfs.
run_host_command_logged mv -v "armbian-firmware${FULL}" "armbian-firmware${FULL}_${REVISION}_all" display_alert "Building firmware package" "armbian-firmware${FULL}" "info"
display_alert "Building firmware package" "armbian-firmware${FULL}_${REVISION}_all" "info" fakeroot_dpkg_deb_build "armbian-firmware${FULL}" "${DEB_STORAGE}"
if [[ -n $FULL ]]; then
display_alert "Full firmware, very big, avoiding tmpfs" "armbian-firmware${FULL}_${REVISION}_all" "info"
fakeroot_dpkg_deb_build "armbian-firmware${FULL}_${REVISION}_all" "${DEB_STORAGE}"
else
fakeroot_dpkg_deb_build "armbian-firmware${FULL}_${REVISION}_all"
run_host_command_logged mv -v "armbian-firmware${FULL}_${REVISION}_all" "armbian-firmware${FULL}"
run_host_command_logged rsync -rq "armbian-firmware${FULL}_${REVISION}_all.deb" "${DEB_STORAGE}/"
fi
done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early
} }

View File

@@ -24,7 +24,7 @@ function kernel_drivers_create_patches() {
kernel_drivers_patch_hash="${hash_files}" # outer scope kernel_drivers_patch_hash="${hash_files}" # outer scope
if [[ "${hash_only:-"no"}" == "yes" ]]; then if [[ "${hash_only:-"no"}" == "yes" ]]; then
display_alert "Hash-only kernel driver requested" "$kernel_drivers_patch_hash - returning" "warn" display_alert "Hash-only kernel driver requested" "$kernel_drivers_patch_hash - returning" "debug"
return 0 return 0
fi fi

View File

@@ -230,6 +230,8 @@ function deploy_built_uboot_bins_for_one_target_to_packaging_area() {
} }
function compile_uboot() { function compile_uboot() {
: "${artifact_version:?artifact_version is not set}"
display_alert "Compiling u-boot" "BOOTSOURCE: ${BOOTSOURCE}" "debug" display_alert "Compiling u-boot" "BOOTSOURCE: ${BOOTSOURCE}" "debug"
if [[ -n $BOOTSOURCE ]] && [[ "${BOOTSOURCE}" != "none" ]]; then if [[ -n $BOOTSOURCE ]] && [[ "${BOOTSOURCE}" != "none" ]]; then
display_alert "Extensions: fetch custom uboot" "fetch_custom_uboot" "debug" display_alert "Extensions: fetch custom uboot" "fetch_custom_uboot" "debug"
@@ -279,7 +281,7 @@ function compile_uboot() {
display_alert "Compiler version" "${UBOOT_COMPILER}gcc '${gcc_version_main}'" "info" display_alert "Compiler version" "${UBOOT_COMPILER}gcc '${gcc_version_main}'" "info"
[[ -n $toolchain2 ]] && display_alert "Additional compiler version" "${toolchain2_type}gcc $(eval env PATH="${toolchain}:${toolchain2}:${PATH}" "${toolchain2_type}gcc" -dumpfullversion -dumpversion)" "info" [[ -n $toolchain2 ]] && display_alert "Additional compiler version" "${toolchain2_type}gcc $(eval env PATH="${toolchain}:${toolchain2}:${PATH}" "${toolchain2_type}gcc" -dumpfullversion -dumpversion)" "info"
local uboot_name="${CHOSEN_UBOOT}_${REVISION}_${ARCH}" local uboot_name="${CHOSEN_UBOOT}_${REVISION}_${ARCH}" # @TODO: get rid of CHOSEN_UBOOT
# create directory structure for the .deb package # create directory structure for the .deb package
declare cleanup_id="" uboottempdir="" declare cleanup_id="" uboottempdir=""
@@ -338,22 +340,12 @@ function compile_uboot() {
$(declare -f setup_write_uboot_platform || true) $(declare -f setup_write_uboot_platform || true)
EOF EOF
# Package version. Affects users upgrading from repo! display_alert "Das U-Boot .deb package version" "${artifact_version}" "info"
declare package_version="${REVISION}" # default, "classic" Armbian non-version.
# If we're building an artifact, use the pre-determined artifact version.
if [[ "${artifact_version:-""}" != "" ]]; then
if [[ "${artifact_version}" == "undetermined" ]]; then
exit_with_error "Undetermined artifact version during u-boot deb packaging. This is a bug, report it."
fi
display_alert "Using artifact version for u-boot package version" "${artifact_version}" "info"
package_version="${artifact_version}"
fi
display_alert "Das U-Boot .deb package version" "${package_version}" "info"
# set up control file # set up control file
cat <<- EOF > "$uboottempdir/${uboot_name}/DEBIAN/control" cat <<- EOF > "$uboottempdir/${uboot_name}/DEBIAN/control"
Package: linux-u-boot-${BOARD}-${BRANCH} Package: linux-u-boot-${BOARD}-${BRANCH}
Version: ${package_version} Version: ${artifact_version}
Architecture: $ARCH Architecture: $ARCH
Maintainer: $MAINTAINER <$MAINTAINERMAIL> Maintainer: $MAINTAINER <$MAINTAINERMAIL>
Installed-Size: 1 Installed-Size: 1
@@ -371,16 +363,10 @@ function compile_uboot() {
[[ -f Licenses/README ]] && run_host_command_logged cp Licenses/README "$uboottempdir/${uboot_name}/usr/lib/u-boot/LICENSE" [[ -f Licenses/README ]] && run_host_command_logged cp Licenses/README "$uboottempdir/${uboot_name}/usr/lib/u-boot/LICENSE"
[[ -n $atftempdir && -f $atftempdir/license.md ]] && run_host_command_logged cp "${atftempdir}/license.md" "$uboottempdir/${uboot_name}/usr/lib/u-boot/LICENSE.atf" [[ -n $atftempdir && -f $atftempdir/license.md ]] && run_host_command_logged cp "${atftempdir}/license.md" "$uboottempdir/${uboot_name}/usr/lib/u-boot/LICENSE.atf"
# Important: this forces the deb to have a specific name, and not be version-dependent... display_alert "Building u-boot deb" "(version: ${artifact_version})"
# This is set to `uboot_name="${CHOSEN_UBOOT}_${REVISION}_${ARCH}"` in outer scope... fakeroot_dpkg_deb_build "$uboottempdir/${uboot_name}" "${DEB_STORAGE}"
display_alert "Building u-boot deb" "(version: ${package_version}) ${uboot_name}.deb"
fakeroot_dpkg_deb_build "$uboottempdir/${uboot_name}" "$uboottempdir/${uboot_name}.deb"
rm -rf "${uboottempdir:?}/${uboot_name:?}"
[[ -n $atftempdir ]] && rm -rf "${atftempdir:?}"
[[ ! -f $uboottempdir/${uboot_name}.deb ]] && exit_with_error "Building u-boot package failed" [[ -n $atftempdir ]] && rm -rf "${atftempdir:?}" # @TODO: intricate cleanup; u-boot's pkg uses ATF's tempdir...
run_host_command_logged rsync --remove-source-files -r "$uboottempdir/${uboot_name}.deb" "${DEB_STORAGE}/"
done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early done_with_temp_dir "${cleanup_id}" # changes cwd to "${SRC}" and fires the cleanup function early

View File

@@ -15,8 +15,12 @@
# "oldcache" = remove old cached rootfs except for the newest 8 files # "oldcache" = remove old cached rootfs except for the newest 8 files
function general_cleaning() { function general_cleaning() {
display_alert "Cleaning" "general_cleaning '$1' - NOT" "warn"
return 0
case $1 in case $1 in
debs) # delete ${DEB_STORAGE} for current branch and family debs) # delete ${DEB_STORAGE} for current branch and family
# @TODO: this is completely obsolete with artifacts?
if [[ -d "${DEB_STORAGE}" ]]; then if [[ -d "${DEB_STORAGE}" ]]; then
display_alert "Cleaning ${DEB_STORAGE} for" "$BOARD $BRANCH" "info" display_alert "Cleaning ${DEB_STORAGE} for" "$BOARD $BRANCH" "info"
# easier than dealing with variable expansion and escaping dashes in file names # easier than dealing with variable expansion and escaping dashes in file names

View File

@@ -1,16 +1,5 @@
# This has... everything: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-6.1.y # This works under memoize-cached.sh::run_memoized() -- which is full of tricks.
# This has... everything: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.2-rc5 # Nested functions are used because the source of the momoized function is used as part of the cache hash.
# get the sha1 of the commit on tag or branch
# git ls-remote --exit-code --symref git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git v6.2-rc5
# git ls-remote --exit-code --symref git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git v6.2-rc5
# 93f875a8526a291005e7f38478079526c843cbec refs/heads/linux-6.1.y
# 4cc398054ac8efe0ff832c82c7caacbdd992312a refs/tags/v6.2-rc5
# https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/tree/Makefile?h=linux-6.1.y
# plaintext: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/plain/Makefile?h=4cc398054ac8efe0ff832c82c7caacbdd992312a
function memoized_git_ref_to_info() { function memoized_git_ref_to_info() {
declare -n MEMO_DICT="${1}" # nameref declare -n MEMO_DICT="${1}" # nameref
declare ref_type ref_name declare ref_type ref_name
@@ -68,6 +57,7 @@ function memoized_git_ref_to_info() {
# parse org/repo from https://github.com/org/repo # parse org/repo from https://github.com/org/repo
declare org_and_repo="" declare org_and_repo=""
org_and_repo="$(echo "${git_source}" | cut -d/ -f4-5)" org_and_repo="$(echo "${git_source}" | cut -d/ -f4-5)"
org_and_repo="${org_and_repo%.git}" # remove .git if present
url="https://raw.githubusercontent.com/${org_and_repo}/${sha1}/Makefile" url="https://raw.githubusercontent.com/${org_and_repo}/${sha1}/Makefile"
;; ;;
@@ -89,7 +79,7 @@ function memoized_git_ref_to_info() {
;; ;;
esac esac
display_alert "Fetching Makefile via HTTP" "${url}" "warn" display_alert "Fetching Makefile via HTTP" "${url}" "debug"
makefile_url="${url}" makefile_url="${url}"
makefile_body="$(curl -sL --fail "${url}")" || exit_with_error "Failed to fetch Makefile from '${url}'" makefile_body="$(curl -sL --fail "${url}")" || exit_with_error "Failed to fetch Makefile from '${url}'"
@@ -131,7 +121,7 @@ function memoized_git_ref_to_info() {
return 0 return 0
} }
display_alert "Fetching Makefile body" "${ref_name}" "warn" display_alert "Fetching Makefile body" "${ref_name}" "debug"
declare makefile_body makefile_url declare makefile_body makefile_url
declare makefile_version makefile_codename makefile_full_version declare makefile_version makefile_codename makefile_full_version
obtain_makefile_body_from_git "${MEMO_DICT[GIT_SOURCE]}" "${sha1}" obtain_makefile_body_from_git "${MEMO_DICT[GIT_SOURCE]}" "${sha1}"

View File

@@ -14,5 +14,5 @@ function github_actions_add_output() {
local output_value="$*" local output_value="$*"
echo "${output_name}=${output_value}" >> "${GITHUB_OUTPUT}" echo "${output_name}=${output_value}" >> "${GITHUB_OUTPUT}"
display_alert "Added GHA output" "'${output_name}'='${output_value}'" "info" display_alert "Added GHA output" "'${output_name}'='${output_value}'" "debug"
} }

View File

@@ -4,23 +4,17 @@ function calculate_hash_for_all_files_in_dirs() {
for dir in "${dirs_to_hash[@]}"; do for dir in "${dirs_to_hash[@]}"; do
# skip if dir doesn't exist... # skip if dir doesn't exist...
if [[ ! -d "${dir}" ]]; then if [[ ! -d "${dir}" ]]; then
display_alert "calculate_hash_for_all_files_in_dirs" "skipping non-existent dir \"${dir}\"" "warn" display_alert "calculate_hash_for_all_files_in_dirs" "skipping non-existent dir \"${dir}\"" "debug"
continue continue
fi fi
declare found_files="no" mapfile -t files_in_dir < <(find -L "${dir}" -type f)
# shellcheck disable=SC2044 # lets expand... # -L: follow symlinks if [[ ${#files_in_dir[@]} -eq 0 ]]; then
for file in $(find -L "${dir}" -type f); do
files_to_hash+=("${file}")
found_files="yes"
done
if [[ "${found_files}" == "no" ]]; then
display_alert "calculate_hash_for_all_files_in_dirs" "empty dir \"${dir}\"" "debug" display_alert "calculate_hash_for_all_files_in_dirs" "empty dir \"${dir}\"" "debug"
continue
fi fi
files_to_hash+=("${files_in_dir[@]}")
done done
#display_alert "calculate_hash_for_all_files_in_dirs" "files_to_hash_sorted: ${#files_to_hash_sorted[@]}" "warn"
#display_alert "calculate_hash_for_all_files_in_dirs" "files_to_hash_sorted: ${files_to_hash_sorted[*]}" "warn"
calculate_hash_for_files "${files_to_hash[@]}" calculate_hash_for_files "${files_to_hash[@]}"
} }

View File

@@ -1,6 +1,7 @@
function run_tool_oras() { function run_tool_oras() {
# Default version # Default version
ORAS_VERSION=${ORAS_VERSION:-0.16.0} # https://github.com/oras-project/oras/releases ORAS_VERSION=${ORAS_VERSION:-0.16.0} # https://github.com/oras-project/oras/releases
#ORAS_VERSION=${ORAS_VERSION:-"1.0.0-rc.1"} # https://github.com/oras-project/oras/releases
declare non_cache_dir="/armbian-tools/oras" # To deploy/reuse cached ORAS in a Docker image. declare non_cache_dir="/armbian-tools/oras" # To deploy/reuse cached ORAS in a Docker image.
@@ -44,9 +45,9 @@ function run_tool_oras() {
esac esac
# Check if we have a cached version in a Docker image, and copy it over before possibly updating it. # Check if we have a cached version in a Docker image, and copy it over before possibly updating it.
if [[ "${deploy_to_non_cache_dir:-"no"}" != "yes" && -d "${non_cache_dir}" ]]; then if [[ "${deploy_to_non_cache_dir:-"no"}" != "yes" && -d "${non_cache_dir}" && ! -f "${ORAS_BIN}" ]]; then
display_alert "Using cached ORAS from Docker image" "ORAS" "debug" display_alert "Using cached ORAS from Docker image" "ORAS" "debug"
run_host_command_logged cp -v "${non_cache_dir}/"* "${DIR_ORAS}/" run_host_command_logged cp "${non_cache_dir}/"* "${DIR_ORAS}/"
fi fi
declare ORAS_FN="oras_${ORAS_VERSION}_${ORAS_OS}_${ORAS_ARCH}" declare ORAS_FN="oras_${ORAS_VERSION}_${ORAS_OS}_${ORAS_ARCH}"
@@ -90,9 +91,14 @@ function try_download_oras_tooling() {
function oras_push_artifact_file() { function oras_push_artifact_file() {
declare image_full_oci="${1}" # Something like "ghcr.io/rpardini/armbian-git-shallow/kernel-git:latest" declare image_full_oci="${1}" # Something like "ghcr.io/rpardini/armbian-git-shallow/kernel-git:latest"
declare upload_file="${2}" # Absolute path to the file to upload including the path and name declare upload_file="${2}" # Absolute path to the file to upload including the path and name
declare description="${3:-"missing description"}"
declare upload_file_base_path upload_file_name declare upload_file_base_path upload_file_name
display_alert "Pushing ${upload_file}" "ORAS to ${image_full_oci}" "info" display_alert "Pushing ${upload_file}" "ORAS to ${image_full_oci}" "info"
declare extra_params=("--verbose")
oras_add_param_plain_http
extra_params+=("--annotation" "org.opencontainers.image.description=${description}")
# make sure file exists # make sure file exists
if [[ ! -f "${upload_file}" ]]; then if [[ ! -f "${upload_file}" ]]; then
display_alert "File not found: ${upload_file}" "ORAS upload" "err" display_alert "File not found: ${upload_file}" "ORAS upload" "err"
@@ -105,9 +111,30 @@ function oras_push_artifact_file() {
display_alert "upload_file_base_path: ${upload_file_base_path}" "ORAS upload" "debug" display_alert "upload_file_base_path: ${upload_file_base_path}" "ORAS upload" "debug"
display_alert "upload_file_name: ${upload_file_name}" "ORAS upload" "debug" display_alert "upload_file_name: ${upload_file_name}" "ORAS upload" "debug"
pushd "${upload_file_base_path}" || exit_with_error "Failed to pushd to ${upload_file_base_path} - ORAS upload" pushd "${upload_file_base_path}" &> /dev/null || exit_with_error "Failed to pushd to ${upload_file_base_path} - ORAS upload"
run_tool_oras push --verbose "${image_full_oci}" "${upload_file_name}:application/vnd.unknown.layer.v1+tar" run_tool_oras push "${extra_params[@]}" "${image_full_oci}" "${upload_file_name}:application/vnd.unknown.layer.v1+tar"
popd || exit_with_error "Failed to popd" "ORAS upload" popd &> /dev/null || exit_with_error "Failed to popd" "ORAS upload"
return 0
}
# Outer scope: oras_has_manifest (yes/no) and oras_manifest_json (json)
function oras_get_artifact_manifest() {
declare image_full_oci="${1}" # Something like "ghcr.io/rpardini/armbian-git-shallow/kernel-git:latest"
display_alert "Getting ORAS manifest" "ORAS manifest from ${image_full_oci}" "info"
declare extra_params=("--verbose")
oras_add_param_plain_http
# Gotta capture the output & if it failed...
oras_manifest_json="$(run_tool_oras manifest fetch "${extra_params[@]}" "${image_full_oci}" 2>&1)" && oras_has_manifest="yes" || oras_has_manifest="no"
display_alert "oras_manifest_json: ${oras_manifest_json}" "ORAS manifest" "debug"
# if it worked, parse some basic info using jq
if [[ "${oras_has_manifest}" == "yes" ]]; then
oras_manifest_description="$(echo "${oras_manifest_json}" | jq -r '.annotations."org.opencontainers.image.description"')"
display_alert "oras_manifest_description: ${oras_manifest_description}" "ORAS oras_manifest_description" "debug"
fi
return 0 return 0
} }
@@ -117,13 +144,16 @@ function oras_pull_artifact_file() {
declare target_dir="${2}" # temporary directory we'll use for the download to workaround oras being maniac declare target_dir="${2}" # temporary directory we'll use for the download to workaround oras being maniac
declare target_fn="${3}" declare target_fn="${3}"
declare extra_params=("--verbose")
oras_add_param_plain_http
declare full_temp_dir="${target_dir}/${target_fn}.oras.pull.tmp" declare full_temp_dir="${target_dir}/${target_fn}.oras.pull.tmp"
declare full_tmp_file_path="${full_temp_dir}/${target_fn}" declare full_tmp_file_path="${full_temp_dir}/${target_fn}"
run_host_command_logged mkdir -p "${full_temp_dir}" run_host_command_logged mkdir -p "${full_temp_dir}"
# @TODO: this needs retries... # @TODO: this needs retries...
pushd "${full_temp_dir}" &> /dev/null || exit_with_error "Failed to pushd to ${full_temp_dir} - ORAS download" pushd "${full_temp_dir}" &> /dev/null || exit_with_error "Failed to pushd to ${full_temp_dir} - ORAS download"
run_tool_oras pull --verbose "${image_full_oci}" run_tool_oras pull "${extra_params[@]}" "${image_full_oci}"
popd &> /dev/null || exit_with_error "Failed to popd - ORAS download" popd &> /dev/null || exit_with_error "Failed to popd - ORAS download"
# sanity check; did we get the file we expected? # sanity check; did we get the file we expected?
@@ -138,3 +168,11 @@ function oras_pull_artifact_file() {
# remove the temp directory # remove the temp directory
run_host_command_logged rm -rf "${full_temp_dir}" run_host_command_logged rm -rf "${full_temp_dir}"
} }
function oras_add_param_plain_http() {
# if image_full_oci contains ":5000/", add --plain-http; to make easy to run self-hosted registry
if [[ "${image_full_oci}" == *":5000/"* ]]; then
display_alert "Adding --plain-http to ORAS" "ORAS to insecure registry" "warn"
extra_params+=("--plain-http")
fi
}

View File

@@ -437,6 +437,21 @@ function docker_cli_prepare_launch() {
display_alert "Passing down to Docker" "GITHUB_STEP_SUMMARY: '${GITHUB_STEP_SUMMARY}'" "info" display_alert "Passing down to Docker" "GITHUB_STEP_SUMMARY: '${GITHUB_STEP_SUMMARY}'" "info"
DOCKER_ARGS+=("--mount" "type=bind,source=${GITHUB_STEP_SUMMARY},target=${GITHUB_STEP_SUMMARY}") DOCKER_ARGS+=("--mount" "type=bind,source=${GITHUB_STEP_SUMMARY},target=${GITHUB_STEP_SUMMARY}")
DOCKER_ARGS+=("--env" "GITHUB_STEP_SUMMARY=${GITHUB_STEP_SUMMARY}") DOCKER_ARGS+=("--env" "GITHUB_STEP_SUMMARY=${GITHUB_STEP_SUMMARY}")
# For pushing/pulling from OCI/ghcr.io; if OCI_TARGET_BASE is set:
# - bind-mount the Docker config file (if it exists)
if [[ -n "${OCI_TARGET_BASE}" ]]; then
display_alert "Detected" "OCI_TARGET_BASE: '${OCI_TARGET_BASE}'" "warn"
# DOCKER_ARGS+=("--env" "OCI_TARGET_BASE=${OCI_TARGET_BASE}")
# Mount the Docker config file (if it exists)
local docker_config_file_host="${HOME}/.docker/config.json"
local docker_config_file_docker="/root/.docker/config.json" # inside Docker
if [[ -f "${docker_config_file_host}" ]]; then
display_alert "Passing down to Docker" "Docker config file: '${docker_config_file_host}' -> '${docker_config_file_docker}'" "warn"
DOCKER_ARGS+=("--mount" "type=bind,source=${docker_config_file_host},target=${docker_config_file_docker}")
fi
fi
fi fi
# This will receive the mountpoint as $1 and the mountpoint vars in the environment. # This will receive the mountpoint as $1 and the mountpoint vars in the environment.

View File

@@ -14,7 +14,7 @@
# see: https://github.com/armbian/build/issues/1584 # see: https://github.com/armbian/build/issues/1584
update_initramfs() { update_initramfs() {
local chroot_target=$1 target_dir local chroot_target=$1 target_dir
target_dir="$(find "${chroot_target}/lib/modules"/ -maxdepth 1 -type d -name "*${VER}*")" # @TODO: rpardini: this will break when we add multi-kernel images target_dir="$(find "${chroot_target}/lib/modules"/ -maxdepth 1 -type d -name "*${IMAGE_INSTALLED_KERNEL_VERSION}*")" # @TODO: rpardini: this will break when we add multi-kernel images
local initrd_kern_ver initrd_file initrd_cache_key initrd_cache_file_path initrd_hash local initrd_kern_ver initrd_file initrd_cache_key initrd_cache_file_path initrd_hash
local initrd_cache_current_manifest_filepath initrd_cache_last_manifest_filepath local initrd_cache_current_manifest_filepath initrd_cache_last_manifest_filepath
local initrd_debug="" local initrd_debug=""
@@ -28,9 +28,9 @@ update_initramfs() {
initrd_file="${chroot_target}/boot/initrd.img-${initrd_kern_ver}" initrd_file="${chroot_target}/boot/initrd.img-${initrd_kern_ver}"
update_initramfs_cmd="TMPDIR=/tmp update-initramfs -u${initrd_debug} -k ${initrd_kern_ver}" # @TODO: why? TMPDIR=/tmp update_initramfs_cmd="TMPDIR=/tmp update-initramfs -u${initrd_debug} -k ${initrd_kern_ver}" # @TODO: why? TMPDIR=/tmp
else else
display_alert "Can't find kernel for version, here's what is in /lib/modules" "VER: ${VER}" "wrn" display_alert "Can't find kernel for version, here's what is in /lib/modules" "IMAGE_INSTALLED_KERNEL_VERSION: ${IMAGE_INSTALLED_KERNEL_VERSION}" "wrn"
SHOW_LOG=yes run_host_command_logged find "${chroot_target}/lib/modules"/ -maxdepth 1 SHOW_LOG=yes run_host_command_logged find "${chroot_target}/lib/modules"/ -maxdepth 1
exit_with_error "No kernel installed for the version" "${VER}" exit_with_error "No kernel installed for the version" "${IMAGE_INSTALLED_KERNEL_VERSION}"
fi fi
# Caching. # Caching.

View File

@@ -51,28 +51,28 @@ function check_loop_device_internal() {
return 0 return 0
} }
# write_uboot_to_loop_image <loopdev> # write_uboot_to_loop_image <loopdev> <full_path_to_uboot_deb>
function write_uboot_to_loop_image() { function write_uboot_to_loop_image() {
declare loop=$1 declare loop=$1
display_alert "Preparing u-boot bootloader" "LOOP=${loop} - ${CHOSEN_UBOOT}" "info" declare uboot_deb=$2
display_alert "Preparing u-boot bootloader" "LOOP=${loop} - ${uboot_deb}" "info"
declare full_path_uboot_deb="${uboot_deb}"
if [[ ! -f "${full_path_uboot_deb}" ]]; then
exit_with_error "Missing ${full_path_uboot_deb}"
fi
declare revision="${REVISION}"
declare cleanup_id="" TEMP_DIR="" declare cleanup_id="" TEMP_DIR=""
prepare_temp_dir_in_workdir_and_schedule_cleanup "uboot-write" cleanup_id TEMP_DIR # namerefs prepare_temp_dir_in_workdir_and_schedule_cleanup "uboot-write" cleanup_id TEMP_DIR # namerefs
if [[ -n $UBOOT_REPO_VERSION ]]; then run_host_command_logged dpkg -x "${full_path_uboot_deb}" "${TEMP_DIR}"/
revision=${UBOOT_REPO_VERSION}
run_host_command_logged dpkg -x "${DEB_STORAGE}/linux-u-boot-${BOARD}-${BRANCH}_${revision}_${ARCH}.deb" "${TEMP_DIR}"/
else
run_host_command_logged dpkg -x "${DEB_STORAGE}/${CHOSEN_UBOOT}_${revision}_${ARCH}.deb" "${TEMP_DIR}"/
fi
if [[ ! -f "${TEMP_DIR}/usr/lib/u-boot/platform_install.sh" ]]; then if [[ ! -f "${TEMP_DIR}/usr/lib/u-boot/platform_install.sh" ]]; then
exit_with_error "Missing ${TEMP_DIR}/usr/lib/u-boot/platform_install.sh" exit_with_error "Missing ${TEMP_DIR}/usr/lib/u-boot/platform_install.sh"
fi fi
display_alert "Sourcing u-boot install functions" "${CHOSEN_UBOOT}" "info" display_alert "Sourcing u-boot install functions" "${uboot_deb}" "info"
source "${TEMP_DIR}"/usr/lib/u-boot/platform_install.sh source "${TEMP_DIR}"/usr/lib/u-boot/platform_install.sh
set -e # make sure, we just included something that might disable it set -e # make sure, we just included something that might disable it

View File

@@ -11,8 +11,8 @@ function create_image_from_sdcard_rootfs() {
add_cleanup_handler trap_handler_cleanup_destimg add_cleanup_handler trap_handler_cleanup_destimg
# stage: create file name # stage: create file name
# @TODO: rpardini: determine the image file name produced. a bit late in the game, since it uses VER which is from the kernel package. # @TODO: rpardini: determine the image file name produced. a bit late in the game, since it uses IMAGE_INSTALLED_KERNEL_VERSION which is from the kernel package.
local version="${VENDOR}_${REVISION}_${BOARD^}_${RELEASE}_${BRANCH}_${VER/-$LINUXFAMILY/}${DESKTOP_ENVIRONMENT:+_$DESKTOP_ENVIRONMENT}" local version="${VENDOR}_${REVISION}_${BOARD^}_${RELEASE}_${BRANCH}_${IMAGE_INSTALLED_KERNEL_VERSION/-$LINUXFAMILY/}${DESKTOP_ENVIRONMENT:+_$DESKTOP_ENVIRONMENT}"
[[ $BUILD_DESKTOP == yes ]] && version=${version}_desktop [[ $BUILD_DESKTOP == yes ]] && version=${version}_desktop
[[ $BUILD_MINIMAL == yes ]] && version=${version}_minimal [[ $BUILD_MINIMAL == yes ]] && version=${version}_minimal
[[ $ROOTFS_TYPE == nfs ]] && version=${version}_nfsboot [[ $ROOTFS_TYPE == nfs ]] && version=${version}_nfsboot
@@ -60,10 +60,10 @@ function create_image_from_sdcard_rootfs() {
display_alert "Free SD cache" "$(echo -e "$freespace" | awk -v mp="${SDCARD}" '$6==mp {print $5}')" "info" display_alert "Free SD cache" "$(echo -e "$freespace" | awk -v mp="${SDCARD}" '$6==mp {print $5}')" "info"
display_alert "Mount point" "$(echo -e "$freespace" | awk -v mp="${MOUNT}" '$6==mp {print $5}')" "info" display_alert "Mount point" "$(echo -e "$freespace" | awk -v mp="${MOUNT}" '$6==mp {print $5}')" "info"
# stage: write u-boot, unless the deb is not there, which would happen if BOOTCONFIG=none # stage: write u-boot, unless BOOTCONFIG=none
# exception: if we use the one from repository, install version which was downloaded from repo declare -g -A image_artifacts_debs
if [[ -f "${DEB_STORAGE}"/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb ]] || [[ -n $UBOOT_REPO_VERSION ]]; then if [[ "${BOOTCONFIG}" != "none" ]]; then
write_uboot_to_loop_image "${LOOP}" write_uboot_to_loop_image "${LOOP}" "${DEB_STORAGE}/${image_artifacts_debs["uboot"]}"
fi fi
# fix wrong / permissions # fix wrong / permissions

View File

@@ -4,31 +4,8 @@ function main_default_build_packages() {
LOG_SECTION="cleaning_early_sources" do_with_logging general_cleaning "sources" LOG_SECTION="cleaning_early_sources" do_with_logging general_cleaning "sources"
fi fi
# Too many things being done. Allow doing only one thing. For core development, mostly.
# Also because "KERNEL_ONLY=yes" should really be spelled "PACKAGES_ONLY=yes"
local do_build_uboot="yes" do_build_kernel="yes" exit_after_kernel_build="no" exit_after_uboot_build="no" do_host_tools="yes"
if [[ "${JUST_UBOOT}" == "yes" && "${JUST_KERNEL}" == "yes" ]]; then
exit_with_error "User of build system" "can't make up his mind about JUST_KERNEL or JUST_UBOOT"
elif [[ "${JUST_UBOOT}" == "yes" ]]; then
display_alert "JUST_KERNEL set to yes" "Building only kernel and exiting after that" "debug"
do_build_uboot="yes"
do_host_tools="${INSTALL_HOST_TOOLS:-yes}" # rkbin, fips, etc.
exit_after_uboot_build="yes"
elif [[ "${JUST_KERNEL}" == "yes" ]]; then
display_alert "JUST_KERNEL set to yes" "Building only kernel and exiting after that" "debug"
do_build_uboot="no"
exit_after_kernel_build="yes"
do_host_tools="no"
fi
# ignore updates help on building all images - for internal purposes # ignore updates help on building all images - for internal purposes
if [[ "${IGNORE_UPDATES}" != "yes" ]]; then if [[ "${IGNORE_UPDATES}" != "yes" ]]; then
# Fetch and build the host tools (via extensions)
if [[ "${do_host_tools}" == "yes" ]]; then
LOG_SECTION="fetch_and_build_host_tools" do_with_logging fetch_and_build_host_tools
fi
LOG_SECTION="clean_deprecated_mountpoints" do_with_logging clean_deprecated_mountpoints LOG_SECTION="clean_deprecated_mountpoints" do_with_logging clean_deprecated_mountpoints
for cleaning_fragment in $(tr ',' ' ' <<< "${CLEAN_LEVEL}"); do for cleaning_fragment in $(tr ',' ' ' <<< "${CLEAN_LEVEL}"); do
@@ -38,41 +15,51 @@ function main_default_build_packages() {
done done
fi fi
if [[ "${do_build_uboot}" == "yes" ]]; then ### NEW / Artifact system
# Don't build u-boot at all if the BOOTCONFIG is 'none'.
if [[ "${BOOTCONFIG}" != "none" ]]; then # Determine which artifacts to build.
# @TODO: refactor this. we use it very often declare -a artifacts_to_build=()
# Compile u-boot if packed .deb does not exist or use the one from repository if [[ "${BOOTCONFIG}" != "none" ]]; then
if [[ ! -f "${DEB_STORAGE}"/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb || "${UBOOT_IGNORE_DEB}" == "yes" ]]; then artifacts_to_build+=("uboot")
if [[ -n "${ATFSOURCE}" && "${ATFSOURCE}" != "none" && "${REPOSITORY_INSTALL}" != *u-boot* ]]; then fi
LOG_SECTION="compile_atf" do_with_logging compile_atf if [[ -n $KERNELSOURCE ]]; then
fi artifacts_to_build+=("kernel")
# @TODO: refactor this construct. we use it too many times. fi
if [[ "${REPOSITORY_INSTALL}" != *u-boot* || "${UBOOT_IGNORE_DEB}" == "yes" ]]; then
declare uboot_git_revision="not_determined_yet" if [[ "${INSTALL_ARMBIAN_FIRMWARE:-yes}" == "yes" ]]; then
LOG_SECTION="uboot_prepare_git" do_with_logging_unless_user_terminal uboot_prepare_git if [[ ${BOARD_FIRMWARE_INSTALL:-""} == "-full" ]]; then
LOG_SECTION="compile_uboot" do_with_logging compile_uboot artifacts_to_build+=("full_firmware")
fi else
fi artifacts_to_build+=("firmware")
fi
if [[ "${exit_after_uboot_build}" == "yes" ]]; then
display_alert "Exiting after u-boot build" "JUST_UBOOT=yes" "info"
exit 0
fi fi
fi fi
# Compile kernel if packed .deb does not exist or use the one from repository display_alert "Artifacts to build:" "${artifacts_to_build[*]}" "warn"
if [[ "${do_build_kernel}" == "yes" ]]; then
if [[ ! -f ${DEB_STORAGE}/${CHOSEN_KERNEL}_${REVISION}_${ARCH}.deb || "${KERNEL_IGNORE_DEB}" == "yes" ]]; then # For each artifact, try to obtain them from the local cache, remote cache, or build them.
if [[ -n $KERNELSOURCE ]] && [[ "${REPOSITORY_INSTALL}" != *kernel* ]]; then # Store info about all artifacts in the process, for later use (eg during package installation in distro-agnostic).
compile_kernel # This handles its own logging sections. declare -g -a image_artifacts_all=()
fi declare -g -A image_artifacts_packages=()
fi declare -g -A image_artifacts_debs=()
if [[ "${exit_after_kernel_build}" == "yes" ]]; then declare one_artifact one_artifact_package
display_alert "Only building kernel and exiting" "JUST_KERNEL=yes" "debug" for one_artifact in "${artifacts_to_build[@]}"; do
exit 0 declare -A artifact_map_packages=()
fi declare -A artifact_map_debs=()
fi
WHAT="${one_artifact}" build_artifact_for_image
# store info about this artifact's debs and packages
for one_artifact_package in "${!artifact_map_packages[@]}"; do
image_artifacts_all+=("${one_artifact_package}")
image_artifacts_packages["${one_artifact_package}"]="${artifact_map_packages[${one_artifact_package}]}"
image_artifacts_debs["${one_artifact_package}"]="${artifact_map_debs[${one_artifact_package}]}"
done
done
debug_dict image_artifacts_packages
debug_dict image_artifacts_debs
### OLD / Legacy / Needs conversion to new artifact system @TODO
# Compile armbian-config if packed .deb does not exist or use the one from repository # Compile armbian-config if packed .deb does not exist or use the one from repository
if [[ ! -f ${DEB_STORAGE}/armbian-config_${REVISION}_all.deb ]]; then if [[ ! -f ${DEB_STORAGE}/armbian-config_${REVISION}_all.deb ]]; then
@@ -95,13 +82,6 @@ function main_default_build_packages() {
fi fi
fi fi
# Compile armbian-firmware if packed .deb does not exist or use the one from repository
if ! ls "${DEB_STORAGE}/armbian-firmware_${REVISION}_all.deb" 1> /dev/null 2>&1 || ! ls "${DEB_STORAGE}/armbian-firmware-full_${REVISION}_all.deb" 1> /dev/null 2>&1; then
if [[ "${REPOSITORY_INSTALL}" != *armbian-firmware* ]]; then
compile_firmware_light_and_possibly_full # this has its own logging sections
fi
fi
overlayfs_wrapper "cleanup" overlayfs_wrapper "cleanup"
reset_uid_owner "${DEB_STORAGE}" reset_uid_owner "${DEB_STORAGE}"

View File

@@ -197,7 +197,6 @@ function config_post_main() {
declare -g CHOSEN_KERNEL=linux-image-${BRANCH}-${LINUXFAMILY} declare -g CHOSEN_KERNEL=linux-image-${BRANCH}-${LINUXFAMILY}
declare -g CHOSEN_ROOTFS=${BSP_CLI_PACKAGE_NAME} declare -g CHOSEN_ROOTFS=${BSP_CLI_PACKAGE_NAME}
declare -g CHOSEN_DESKTOP=armbian-${RELEASE}-desktop-${DESKTOP_ENVIRONMENT} declare -g CHOSEN_DESKTOP=armbian-${RELEASE}-desktop-${DESKTOP_ENVIRONMENT}
declare -g CHOSEN_KSRC=linux-source-${BRANCH}-${LINUXFAMILY}
declare -g CHOSEN_KERNEL_WITH_ARCH=${CHOSEN_KERNEL}-${ARCH} # Only for reporting purposes. declare -g CHOSEN_KERNEL_WITH_ARCH=${CHOSEN_KERNEL}-${ARCH} # Only for reporting purposes.
# So for kernel full cached rebuilds. # So for kernel full cached rebuilds.
@@ -216,7 +215,6 @@ function config_post_main() {
fi fi
# assume the worst, and all surprises will be happy ones # assume the worst, and all surprises will be happy ones
declare -g KERNEL_HAS_WORKING_HEADERS="no" declare -g KERNEL_HAS_WORKING_HEADERS="no"
declare -g KERNEL_HAS_WORKING_HEADERS_FULL_SOURCE="no"
# Parse/validate the the major, bail if no match # Parse/validate the the major, bail if no match
declare -i KERNEL_MAJOR_MINOR_MAJOR=${KERNEL_MAJOR_MINOR%%.*} declare -i KERNEL_MAJOR_MINOR_MAJOR=${KERNEL_MAJOR_MINOR%%.*}
@@ -226,8 +224,7 @@ function config_post_main() {
declare -g KERNEL_HAS_WORKING_HEADERS="yes" declare -g KERNEL_HAS_WORKING_HEADERS="yes"
declare -g KERNEL_MAJOR="${KERNEL_MAJOR_MINOR_MAJOR}" declare -g KERNEL_MAJOR="${KERNEL_MAJOR_MINOR_MAJOR}"
elif [[ "${KERNEL_MAJOR_MINOR_MAJOR}" -eq 4 && "${KERNEL_MAJOR_MINOR_MINOR}" -ge 19 ]]; then elif [[ "${KERNEL_MAJOR_MINOR_MAJOR}" -eq 4 && "${KERNEL_MAJOR_MINOR_MINOR}" -ge 19 ]]; then
declare -g KERNEL_MAJOR=4 # We support 4.19+ (less than 5.0) is supported, and headers via full source declare -g KERNEL_MAJOR=4 # We support 4.19+ (less than 5.0) is supported
declare -g KERNEL_HAS_WORKING_HEADERS_FULL_SOURCE="no" # full-source based headers. experimental. set to yes here to enable
elif [[ "${KERNEL_MAJOR_MINOR_MAJOR}" -eq 4 && "${KERNEL_MAJOR_MINOR_MINOR}" -ge 4 ]]; then elif [[ "${KERNEL_MAJOR_MINOR_MAJOR}" -eq 4 && "${KERNEL_MAJOR_MINOR_MINOR}" -ge 4 ]]; then
declare -g KERNEL_MAJOR=4 # We support 4.x from 4.4 declare -g KERNEL_MAJOR=4 # We support 4.x from 4.4
else else

View File

@@ -3,10 +3,8 @@
function build_rootfs_and_image() { function build_rootfs_and_image() {
display_alert "Checking for rootfs cache" "$(echo "${BRANCH} ${BOARD} ${RELEASE} ${DESKTOP_APPGROUPS_SELECTED} ${DESKTOP_ENVIRONMENT} ${BUILD_MINIMAL}" | tr -s " ")" "info" display_alert "Checking for rootfs cache" "$(echo "${BRANCH} ${BOARD} ${RELEASE} ${DESKTOP_APPGROUPS_SELECTED} ${DESKTOP_ENVIRONMENT} ${BUILD_MINIMAL}" | tr -s " ")" "info"
LOG_SECTION="prepare_rootfs_build_params_and_trap" do_with_logging prepare_rootfs_build_params_and_trap
# get a basic rootfs, either from cache or from scratch # get a basic rootfs, either from cache or from scratch
LOG_SECTION="build_rootfs_only" do_with_logging build_rootfs_only # only occurrence of this get_or_create_rootfs_cache_chroot_sdcard # only occurrence of this; has its own logging sections
# stage: with a basic rootfs available, we mount the chroot and work on it # stage: with a basic rootfs available, we mount the chroot and work on it
mount_chroot "${SDCARD}" mount_chroot "${SDCARD}"

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
apt_purge_unneeded_packages() { function apt_purge_unneeded_packages() {
# remove packages that are no longer needed. rootfs cache + uninstall might have leftovers. # remove packages that are no longer needed. rootfs cache + uninstall might have leftovers.
display_alert "No longer needed packages" "purge" "info" display_alert "No longer needed packages" "purge" "info"
chroot_sdcard_apt_get autoremove chroot_sdcard_apt_get autoremove
@@ -8,9 +8,7 @@ apt_purge_unneeded_packages() {
# this is called: # this is called:
# 1) install_deb_chroot "${DEB_STORAGE}/somethingsomething.deb" (yes, it's always ${DEB_STORAGE}) # 1) install_deb_chroot "${DEB_STORAGE}/somethingsomething.deb" (yes, it's always ${DEB_STORAGE})
# 2) install_deb_chroot "linux-u-boot-${BOARD}-${BRANCH}" "remote" (normal invocation, install from repo) function install_deb_chroot() {
# 3) install_deb_chroot "linux-u-boot-${BOARD}-${BRANCH}" "remote" "yes" (install from repo, then also copy the WHOLE CACHE back to DEB_STORAGE)
install_deb_chroot() {
local package="$1" local package="$1"
local variant="$2" local variant="$2"
local transfer="$3" local transfer="$3"

View File

@@ -1,25 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# This is already run under logging, don't use do_with_logging under here. # called by artifact-rootfs::artifact_rootfs_prepare_version()
function build_rootfs_only() {
# validate that tmpfs_estimated_size is set and higher than zero, or exit_with_error
[[ -z ${tmpfs_estimated_size} ]] && exit_with_error "tmpfs_estimated_size is not set"
[[ ${tmpfs_estimated_size} -le 0 ]] && exit_with_error "tmpfs_estimated_size is not higher than zero"
# stage: prepare basic rootfs: unpack cache or create from scratch
get_or_create_rootfs_cache_chroot_sdcard # only occurrence of this
# obtain the size, in MiB, of "${SDCARD}" at this point.
declare -i rootfs_size_mib
rootfs_size_mib=$(du -sm "${SDCARD}" | awk '{print $1}')
display_alert "Actual rootfs size" "${rootfs_size_mib}MiB after basic/cache" ""
# warn if rootfs_size_mib is higher than the tmpfs_estimated_size
if [[ ${rootfs_size_mib} -gt ${tmpfs_estimated_size} ]]; then
display_alert "Rootfs actual size is larger than estimated tmpfs size after basic/cache" "${rootfs_size_mib}MiB > ${tmpfs_estimated_size}MiB" "wrn"
fi
}
function calculate_rootfs_cache_id() { function calculate_rootfs_cache_id() {
# Validate that AGGREGATED_ROOTFS_HASH is set # Validate that AGGREGATED_ROOTFS_HASH is set
[[ -z "${AGGREGATED_ROOTFS_HASH}" ]] && exit_with_error "AGGREGATED_ROOTFS_HASH is not set at calculate_rootfs_cache_id()" [[ -z "${AGGREGATED_ROOTFS_HASH}" ]] && exit_with_error "AGGREGATED_ROOTFS_HASH is not set at calculate_rootfs_cache_id()"
@@ -52,170 +33,82 @@ function calculate_rootfs_cache_id() {
display_alert "calculate_rootfs_cache_id: done." "rootfs_cache_id: '${rootfs_cache_id}'" "debug" display_alert "calculate_rootfs_cache_id: done." "rootfs_cache_id: '${rootfs_cache_id}'" "debug"
} }
# this gets from cache or produces a basic new rootfs, ready, but not mounted, at "$SDCARD" # called by artifact-rootfs::artifact_rootfs_build_from_sources()
function get_or_create_rootfs_cache_chroot_sdcard() {
if [[ "${ROOT_FS_CREATE_ONLY}" == yes ]]; then
exit_with_error "Using deprecated ROOT_FS_CREATE_ONLY=yes, that is not longer supported. use 'rootfs' CLI command."
fi
# validate "${SDCARD}" is set. it does not exist, yet...
if [[ -z "${SDCARD}" ]]; then
exit_with_error "SDCARD is not set at get_or_create_rootfs_cache_chroot_sdcard()"
fi
[[ ! -d "${SDCARD:?}" ]] && exit_with_error "create_new_rootfs_cache: ${SDCARD} is not a directory"
# this was moved from configuration to this stage, that way configuration can be offline
# if ROOTFSCACHE_VERSION not provided, check which is current version in the cache storage in GitHub.
# - ROOTFSCACHE_VERSION is provided by external "build rootfs GHA script" in armbian/scripts
if [[ -z "${ROOTFSCACHE_VERSION}" ]]; then
if [[ "${SKIP_ARMBIAN_REPO}" != "yes" ]]; then
display_alert "ROOTFSCACHE_VERSION not set, getting remotely" "Github API and armbian/mirror " "debug"
# rpardini: why 2 calls?
# this uses `jq` hostdep
ROOTFSCACHE_VERSION=$(curl https://api.github.com/repos/armbian/cache/releases/latest -s --fail | jq .tag_name -r || true)
# anonymous API access is very limited which is why we need a fallback
# rpardini: yeah but this is 404'ing
#ROOTFSCACHE_VERSION=${ROOTFSCACHE_VERSION:-$(curl -L --silent https://cache.armbian.com/rootfs/latest --fail)}
display_alert "Remotely-obtained ROOTFSCACHE_VERSION" "${ROOTFSCACHE_VERSION}" "debug"
else
ROOTFSCACHE_VERSION=668 # The neighbour of the beast.
display_alert "Armbian mirror skipped, using fictional rootfs cache version" "${ROOTFSCACHE_VERSION}" "debug"
fi
else
display_alert "ROOTFSCACHE_VERSION is set externally" "${ROOTFSCACHE_VERSION}" "warn"
fi
# Make ROOTFSCACHE_VERSION global at this point, in case it was not.
declare -g ROOTFSCACHE_VERSION="${ROOTFSCACHE_VERSION}"
display_alert "ROOTFSCACHE_VERSION found online or preset" "${ROOTFSCACHE_VERSION}" "debug"
calculate_rootfs_cache_id # this sets packages_hash and cache_type
# seek last cache, proceed to previous otherwise build it
local -a cache_list=()
get_rootfs_cache_list_into_array_variable # sets cache_list
# Show the number of items in the cache_list array
display_alert "Found possible rootfs caches: " "${#cache_list[@]}" "debug"
display_alert "ROOTFSCACHE_VERSION after getting cache list" "${ROOTFSCACHE_VERSION}" "debug"
declare possible_cached_version
for possible_cached_version in "${cache_list[@]}"; do
ROOTFSCACHE_VERSION="${possible_cached_version}" # global var
local cache_name="${ARCH}-${RELEASE}-${cache_type}-${packages_hash}-${ROOTFSCACHE_VERSION}.tar.zst"
local cache_fname="${SRC}/cache/rootfs/${cache_name}"
display_alert "Checking cache" "$cache_name" "info"
# if aria2 file exists download didn't succeeded
if [[ ! -f $cache_fname || -f ${cache_fname}.aria2 ]]; then
if [[ "${SKIP_ARMBIAN_REPO}" != "yes" ]]; then
display_alert "Downloading from servers" # download_rootfs_cache() requires ROOTFSCACHE_VERSION
download_and_verify "rootfs" "$cache_name" || continue
fi
fi
if [[ -f $cache_fname && ! -f ${cache_fname}.aria2 ]]; then
display_alert "Cache found!" "$cache_name" "info"
break
fi
done
display_alert "ROOTFSCACHE_VERSION after looping" "${ROOTFSCACHE_VERSION}" "debug"
# if cache found, extract it
# if aria2 file exists, download didn't succeeded, so skip it
# we can ignore existing cache with IGNORE_EXISTING_ROOTFS_CACHE=yes
if [[ "${IGNORE_EXISTING_ROOTFS_CACHE}" != "yes" && -f "${cache_fname}" && ! -f "${cache_fname}.aria2" ]]; then
# validate sanity
[[ "x${SDCARD}x" == "xx" ]] && exit_with_error "get_or_create_rootfs_cache_chroot_sdcard: extract: SDCARD: ${SDCARD} is not set"
local date_diff=$((($(date +%s) - $(stat -c %Y "${cache_fname}")) / 86400))
display_alert "Extracting $cache_name" "$date_diff days old" "info"
pv -p -b -r -c -N "$(logging_echo_prefix_for_pv "extract_rootfs") $cache_name" "$cache_fname" | zstdmt -dc | tar xp --xattrs -C "${SDCARD}"/
declare -a pv_tar_zstdmt_pipe_status=("${PIPESTATUS[@]}") # capture and the pipe_status array from PIPESTATUS
declare one_pipe_status
for one_pipe_status in "${pv_tar_zstdmt_pipe_status[@]}"; do
if [[ "$one_pipe_status" != "0" ]]; then
exit_with_error "get_or_create_rootfs_cache_chroot_sdcard: extract: ${cache_fname} failed (${pv_tar_zstdmt_pipe_status[*]}) - corrupt cache?"
fi
done
wait_for_disk_sync "after restoring rootfs cache"
run_host_command_logged rm -v "${SDCARD}"/etc/resolv.conf
run_host_command_logged echo "nameserver ${NAMESERVER}" ">" "${SDCARD}"/etc/resolv.conf
create_sources_list "${RELEASE}" "${SDCARD}/"
else
display_alert "Creating rootfs" "cache miss" "info"
create_new_rootfs_cache
fi
return 0
}
function create_new_rootfs_cache() { function create_new_rootfs_cache() {
: "${artifact_final_file:?artifact_final_file is not set}"
: "${artifact_final_file_basename:?artifact_final_file_basename is not set}"
[[ ! -d "${SDCARD:?}" ]] && exit_with_error "create_new_rootfs_cache: ${SDCARD} is not a directory" [[ ! -d "${SDCARD:?}" ]] && exit_with_error "create_new_rootfs_cache: ${SDCARD} is not a directory"
# validate cache_type is set # validate cache_type is set
[[ -n "${cache_type}" ]] || exit_with_error "create_new_rootfs_cache: cache_type is not set" [[ -n "${cache_type}" ]] || exit_with_error "create_new_rootfs_cache: cache_type is not set"
# validate packages_hash is set # validate packages_hash is set
[[ -n "${packages_hash}" ]] || exit_with_error "create_new_rootfs_cache: packages_hash is not set" [[ -n "${packages_hash}" ]] || exit_with_error "create_new_rootfs_cache: packages_hash is not set"
# This var ROOT_FS_CREATE_VERSION is only used here, afterwards it's all cache_name and cache_fname # compatibility with legacy code...
declare ROOT_FS_CREATE_VERSION="${ROOT_FS_CREATE_VERSION:-"$(date --utc +"%Y%m%d")"}" declare -g cache_name="${artifact_final_file_basename}"
declare cache_name=${ARCH}-${RELEASE}-${cache_type}-${packages_hash}-${ROOT_FS_CREATE_VERSION}.tar.zst declare -g cache_fname=${artifact_final_file}
declare cache_fname=${SRC}/cache/rootfs/${cache_name}
display_alert "Creating new rootfs cache for" "'${RELEASE}' '${ARCH}' '${ROOT_FS_CREATE_VERSION}'" "info" display_alert "Creating new rootfs cache" "'${cache_name}'" "info"
create_new_rootfs_cache_via_debootstrap # in rootfs-create.sh create_new_rootfs_cache_via_debootstrap # in rootfs-create.sh
create_new_rootfs_cache_tarball # in rootfs-create.sh create_new_rootfs_cache_tarball # in rootfs-create.sh
# needed for backend to keep current only @TODO: still needed?
echo "$cache_fname" > "${cache_fname}.current"
# define a readonly global with the name of the cache
declare -g -r BUILT_ROOTFS_CACHE_NAME="${cache_name}"
declare -g -r BUILT_ROOTFS_CACHE_FILE="${cache_fname}"
return 0 # protect against possible future short-circuiting above this return 0 # protect against possible future short-circuiting above this
} }
# return a list of versions of all available cache from remote and local into outer scoe "cache_list" variable # this builds/gets cached rootfs artifact, extracts it to "${SDCARD}"
function get_rootfs_cache_list_into_array_variable() { function get_or_create_rootfs_cache_chroot_sdcard() {
# If global vars are empty, exit_with_error if [[ "${ROOT_FS_CREATE_ONLY}" == yes ]]; then
[[ "x${ARCH}x" == "xx" ]] && exit_with_error "ARCH is not set" exit_with_error "Using deprecated ROOT_FS_CREATE_ONLY=yes, that is not longer supported. use 'rootfs' CLI command."
[[ "x${RELEASE}x" == "xx" ]] && exit_with_error "RELEASE is not set" fi
[[ "x${packages_hash}x" == "xx" ]] && exit_with_error "packages_hash is not set"
[[ "x${cache_type}x" == "xx" ]] && exit_with_error "cache_type is not set"
# this uses `jq` hostdep # build the rootfs artifact; capture the filename...
declare -g artifact_final_file artifact_version artifact_final_file artifact_file_relative
WHAT="rootfs" build_artifact_for_image # has its own logging sections, for now
declare -g cache_fname="${artifact_final_file}"
declare -a local_cache_list=() # outer scope variable # Setup the cleanup handler, possibly "again", since the artifact already set it up and consumed it, if cache missed.
readarray -t local_cache_list <<< "$({ LOG_SECTION="prepare_rootfs_build_params_and_trap" do_with_logging prepare_rootfs_build_params_and_trap
# Don't even try remote if we're told to skip.
if [[ "${SKIP_ARMBIAN_REPO}" != "yes" ]]; then LOG_SECTION="extract_rootfs_artifact" do_with_logging extract_rootfs_artifact
curl --silent --fail -L "https://api.github.com/repos/armbian/cache/releases?per_page=3" | jq -r '.[].tag_name' || return 0
curl --silent --fail -L https://cache.armbian.com/rootfs/list }
function extract_rootfs_artifact() {
: "${artifact_file_relative:?artifact_file_relative is not set}"
: "${artifact_final_file:?artifact_final_file is not set}"
# compatibility with legacy code...
declare cache_name="${artifact_file_relative}"
declare cache_fname=${artifact_final_file}
if [[ ! -f "${cache_fname}" ]]; then
exit_with_error "get_or_create_rootfs_cache_chroot_sdcard: extract: ${cache_fname} is not a file"
fi
# validate sanity
[[ "x${SDCARD}x" == "xx" ]] && exit_with_error "get_or_create_rootfs_cache_chroot_sdcard: extract: SDCARD: ${SDCARD} is not set"
[[ ! -d "${SDCARD}" ]] && exit_with_error "get_or_create_rootfs_cache_chroot_sdcard: ${SDCARD} is not a directory"
# @TODO: validate SDCARD is empty; if not, the artifact build "leaked" a cleanup
local date_diff=$((($(date +%s) - $(stat -c %Y "${cache_fname}")) / 86400))
display_alert "Extracting ${artifact_version}" "${date_diff} days old" "info"
pv -p -b -r -c -N "$(logging_echo_prefix_for_pv "extract_rootfs") ${artifact_version}" "${cache_fname}" | zstdmt -dc | tar xp --xattrs -C "${SDCARD}"/
declare -a pv_tar_zstdmt_pipe_status=("${PIPESTATUS[@]}") # capture and the pipe_status array from PIPESTATUS
declare one_pipe_status
for one_pipe_status in "${pv_tar_zstdmt_pipe_status[@]}"; do
if [[ "$one_pipe_status" != "0" ]]; then
exit_with_error "get_or_create_rootfs_cache_chroot_sdcard: extract: ${cache_fname} failed (${pv_tar_zstdmt_pipe_status[*]}) - corrupt cache?"
fi fi
find "${SRC}"/cache/rootfs/ -mtime -7 -name "${ARCH}-${RELEASE}-${cache_type}-${packages_hash}-*.tar.zst" |
sed -e 's#^.*/##' |
sed -e 's#\..*$##' |
awk -F'-' '{print $5}'
} | sort | uniq | sort -r)"
# Show the contents
display_alert "Available cache versions number" "${#local_cache_list[*]}" "debug"
# Loop each and show
for cache_version in "${local_cache_list[@]}"; do
display_alert "One available cache version" "${cache_version}" "debug"
done done
# return the list to outer scope wait_for_disk_sync "after restoring rootfs cache"
cache_list=("${local_cache_list[@]}")
run_host_command_logged rm -v "${SDCARD}"/etc/resolv.conf
run_host_command_logged echo "nameserver ${NAMESERVER}" ">" "${SDCARD}"/etc/resolv.conf
create_sources_list "${RELEASE}" "${SDCARD}/"
return 0
} }

View File

@@ -46,7 +46,7 @@ function install_distribution_agnostic() {
# /usr/share/initramfs-tools/hooks/dropbear will automatically add 'id_ecdsa.pub' to authorized_keys file # /usr/share/initramfs-tools/hooks/dropbear will automatically add 'id_ecdsa.pub' to authorized_keys file
# during mkinitramfs of update-initramfs # during mkinitramfs of update-initramfs
#cat "${SDCARD}"/etc/dropbear-initramfs/id_ecdsa.pub > "${SDCARD}"/etc/dropbear-initramfs/authorized_keys #cat "${SDCARD}"/etc/dropbear-initramfs/id_ecdsa.pub > "${SDCARD}"/etc/dropbear-initramfs/authorized_keys
CRYPTROOT_SSH_UNLOCK_KEY_NAME="${VENDOR}_${REVISION}_${BOARD^}_${RELEASE}_${BRANCH}_${VER/-$LINUXFAMILY/}_${DESKTOP_ENVIRONMENT}".key CRYPTROOT_SSH_UNLOCK_KEY_NAME="${VENDOR}_${REVISION}_${BOARD^}_${RELEASE}_${BRANCH}_${DESKTOP_ENVIRONMENT}".key
# copy dropbear ssh key to image output dir for convenience # copy dropbear ssh key to image output dir for convenience
cp "${SDCARD}"/etc/dropbear-initramfs/id_ecdsa "${DEST}/images/${CRYPTROOT_SSH_UNLOCK_KEY_NAME}" cp "${SDCARD}"/etc/dropbear-initramfs/id_ecdsa "${DEST}/images/${CRYPTROOT_SSH_UNLOCK_KEY_NAME}"
display_alert "SSH private key for dropbear (initramfs) has been copied to:" \ display_alert "SSH private key for dropbear (initramfs) has been copied to:" \
@@ -299,57 +299,55 @@ function install_distribution_agnostic() {
# install u-boot # install u-boot
# @TODO: add install_bootloader() extension method, refactor into u-boot extension # @TODO: add install_bootloader() extension method, refactor into u-boot extension
[[ "${BOOTCONFIG}" != "none" ]] && { declare -g image_artifacts_packages image_artifacts_debs
if [[ "${REPOSITORY_INSTALL}" != *u-boot* ]]; then debug_dict image_artifacts_packages
UBOOT_VER=$(dpkg --info "${DEB_STORAGE}/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb" | grep Descr | awk '{print $(NF)}') debug_dict image_artifacts_debs
install_deb_chroot "${DEB_STORAGE}/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb" if [[ "${BOOTCONFIG}" != "none" ]]; then
else install_deb_chroot "${DEB_STORAGE}/${image_artifacts_debs["uboot"]}"
install_deb_chroot "linux-u-boot-${BOARD}-${BRANCH}" "remote" "yes" # @TODO: rpardini: this is completely different! "remote" "yes" fi
UBOOT_REPO_VERSION=$(dpkg-deb -f "${SDCARD}/var/cache/apt/archives/linux-u-boot-${BOARD}-${BRANCH}*_${ARCH}.deb" Version)
fi
}
call_extension_method "pre_install_kernel_debs" <<- 'PRE_INSTALL_KERNEL_DEBS' call_extension_method "pre_install_kernel_debs" <<- 'PRE_INSTALL_KERNEL_DEBS'
*called before installing the Armbian-built kernel deb packages* *called before installing the Armbian-built kernel deb packages*
It is not too late to `unset KERNELSOURCE` here and avoid kernel install. It is not too late to `unset KERNELSOURCE` here and avoid kernel install.
PRE_INSTALL_KERNEL_DEBS PRE_INSTALL_KERNEL_DEBS
# default VER, will be parsed from Kernel version in the installed deb package. # default IMAGE_INSTALLED_KERNEL_VERSION, will be parsed from Kernel version in the installed deb package.
VER="linux" IMAGE_INSTALLED_KERNEL_VERSION="linux"
# install kernel # install kernel: image/dtb/headers
[[ -n $KERNELSOURCE ]] && { if [[ -n $KERNELSOURCE ]]; then
if [[ "${REPOSITORY_INSTALL}" != *kernel* ]]; then install_deb_chroot "${DEB_STORAGE}/${image_artifacts_debs["linux-image"]}"
VER=$(dpkg --info "${DEB_STORAGE}/${CHOSEN_KERNEL}_${REVISION}_${ARCH}.deb" | grep "^ Source:" | sed -e 's/ Source: linux-//')
display_alert "Parsed kernel version from local package" "${VER}" "debug"
install_deb_chroot "${DEB_STORAGE}/${CHOSEN_KERNEL}_${REVISION}_${ARCH}.deb" if [[ "${KERNEL_BUILD_DTBS:-"yes"}" == "yes" ]]; then
if [[ -f ${DEB_STORAGE}/${CHOSEN_KERNEL/image/dtb}_${REVISION}_${ARCH}.deb ]]; then install_deb_chroot "${DEB_STORAGE}/${image_artifacts_debs["linux-dtb"]}"
install_deb_chroot "${DEB_STORAGE}/${CHOSEN_KERNEL/image/dtb}_${REVISION}_${ARCH}.deb"
fi
if [[ $INSTALL_HEADERS == yes ]]; then
install_deb_chroot "${DEB_STORAGE}/${CHOSEN_KERNEL/image/headers}_${REVISION}_${ARCH}.deb"
fi
else
install_deb_chroot "linux-image-${BRANCH}-${LINUXFAMILY}" "remote" # @TODO: rpardini: again a different one, without "yes" this time
VER=$(dpkg-deb -f "${SDCARD}"/var/cache/apt/archives/linux-image-${BRANCH}-${LINUXFAMILY}*_${ARCH}.deb Source)
VER="${VER/-$LINUXFAMILY/}"
VER="${VER/linux-/}"
display_alert "Parsed kernel version from remote package" "${VER}" "debug"
if [[ "${ARCH}" != "amd64" && "${LINUXFAMILY}" != "media" ]]; then # amd64 does not have dtb package, see packages/armbian/builddeb:355
install_deb_chroot "linux-dtb-${BRANCH}-${LINUXFAMILY}" "remote" # @TODO: rpardini: again a different one, without "yes" this time
fi
[[ $INSTALL_HEADERS == yes ]] && install_deb_chroot "linux-headers-${BRANCH}-${LINUXFAMILY}" "remote" # @TODO: rpardini: again a different one, without "yes" this time
fi fi
# Eh, short circuit above. Beware.
} if [[ "${KERNEL_HAS_WORKING_HEADERS:-"no"}" == "yes" ]]; then
if [[ $INSTALL_HEADERS == yes ]]; then # @TODO remove? might be a good idea to always install headers.
install_deb_chroot "${DEB_STORAGE}/${image_artifacts_debs["linux-headers"]}"
fi
fi
# Determine "IMAGE_INSTALLED_KERNEL_VERSION" for compatiblity with legacy update-initramfs code. @TODO get rid of this one day
IMAGE_INSTALLED_KERNEL_VERSION=$(dpkg --info "${DEB_STORAGE}/${image_artifacts_debs["linux-image"]}" | grep "^ Source:" | sed -e 's/ Source: linux-//')
display_alert "Parsed kernel version from local package" "${IMAGE_INSTALLED_KERNEL_VERSION}" "debug"
fi
call_extension_method "post_install_kernel_debs" <<- 'POST_INSTALL_KERNEL_DEBS' call_extension_method "post_install_kernel_debs" <<- 'POST_INSTALL_KERNEL_DEBS'
*allow config to do more with the installed kernel/headers* *allow config to do more with the installed kernel/headers*
Called after packages, u-boot, kernel and headers installed in the chroot, but before the BSP is installed. Called after packages, u-boot, kernel and headers installed in the chroot, but before the BSP is installed.
If `KERNELSOURCE` is (still?) unset after this, Armbian-built firmware will not be installed.
POST_INSTALL_KERNEL_DEBS POST_INSTALL_KERNEL_DEBS
# install armbian-firmware by default. Set BOARD_FIRMWARE_INSTALL="-full" to install full firmware variant
if [[ "${INSTALL_ARMBIAN_FIRMWARE:-yes}" == "yes" ]]; then
if [[ ${BOARD_FIRMWARE_INSTALL:-""} == "-full" ]]; then
install_deb_chroot "${DEB_STORAGE}/${image_artifacts_debs["armbian-firmware-full"]}"
else
install_deb_chroot "${DEB_STORAGE}/${image_artifacts_debs["armbian-firmware"]}"
fi
fi
# install board support packages # install board support packages
if [[ "${REPOSITORY_INSTALL}" != *bsp* ]]; then if [[ "${REPOSITORY_INSTALL}" != *bsp* ]]; then
install_deb_chroot "${DEB_STORAGE}/${BSP_CLI_PACKAGE_FULLNAME}.deb" install_deb_chroot "${DEB_STORAGE}/${BSP_CLI_PACKAGE_FULLNAME}.deb"
@@ -373,17 +371,6 @@ function install_distribution_agnostic() {
fi fi
fi fi
# install armbian-firmware by default. Set BOARD_FIRMWARE_INSTALL="-full" to install full firmware variant
[[ "${INSTALL_ARMBIAN_FIRMWARE:-yes}" == "yes" ]] && {
if [[ "${REPOSITORY_INSTALL}" != *armbian-firmware* ]]; then
if [[ -f ${DEB_STORAGE}/armbian-firmware_${REVISION}_all.deb ]]; then
install_deb_chroot "${DEB_STORAGE}/armbian-firmware${BOARD_FIRMWARE_INSTALL:-""}_${REVISION}_all.deb"
fi
else
install_deb_chroot "armbian-firmware${BOARD_FIRMWARE_INSTALL:-""}" "remote"
fi
}
# install armbian-config # install armbian-config
if [[ "${PACKAGE_LIST_RM}" != *armbian-config* ]]; then if [[ "${PACKAGE_LIST_RM}" != *armbian-config* ]]; then
if [[ "${REPOSITORY_INSTALL}" != *armbian-config* ]]; then if [[ "${REPOSITORY_INSTALL}" != *armbian-config* ]]; then
@@ -419,11 +406,6 @@ function install_distribution_agnostic() {
fi fi
fi fi
# install kernel sources
if [[ -f ${DEB_STORAGE}/${CHOSEN_KSRC}_${REVISION}_all.deb && $INSTALL_KSRC == yes ]]; then
install_deb_chroot "${DEB_STORAGE}/${CHOSEN_KSRC}_${REVISION}_all.deb"
fi
# install wireguard tools # install wireguard tools
if [[ $WIREGUARD == yes ]]; then if [[ $WIREGUARD == yes ]]; then
install_deb_chroot "wireguard-tools" "remote" install_deb_chroot "wireguard-tools" "remote"

View File

@@ -1,4 +1,5 @@
## Prepare/cleanup pair @TODO needs to be split between SDCARD and MOUNT, no sense doing both in rootfs trap anymore ## Prepare/cleanup pair @TODO needs to be split between SDCARD and MOUNT, no sense doing both in rootfs trap anymore
# called by artifact-rootfs::artifact_rootfs_prepare_version()
function prepare_rootfs_build_params_and_trap() { function prepare_rootfs_build_params_and_trap() {
# add handler to cleanup when done or if something fails or is interrupted. # add handler to cleanup when done or if something fails or is interrupted.
add_cleanup_handler trap_handler_cleanup_rootfs_and_image add_cleanup_handler trap_handler_cleanup_rootfs_and_image
@@ -7,13 +8,6 @@ function prepare_rootfs_build_params_and_trap() {
run_host_command_logged rm -rfv "${SDCARD}" "${MOUNT}" run_host_command_logged rm -rfv "${SDCARD}" "${MOUNT}"
run_host_command_logged mkdir -pv "${SDCARD}" "${MOUNT}" "${SRC}/cache/rootfs" "${DEST}/images" # @TODO images needs its own trap run_host_command_logged mkdir -pv "${SDCARD}" "${MOUNT}" "${SRC}/cache/rootfs" "${DEST}/images" # @TODO images needs its own trap
# bind mount rootfs if defined # @TODO: is this used? Igor's NAS?
if [[ -d "${ARMBIAN_CACHE_ROOTFS_PATH}" ]]; then
display_alert "Warning, using untested code path" "ARMBIAN_CACHE_ROOTFS_PATH" "warn"
mountpoint -q "${SRC}"/cache/rootfs && umount "${SRC}"/cache/rootfs
mount --bind "${ARMBIAN_CACHE_ROOTFS_PATH}" "${SRC}/cache/rootfs"
fi
# stage: verify tmpfs configuration and mount # stage: verify tmpfs configuration and mount
# CLI needs ~2GiB, desktop ~5GiB # CLI needs ~2GiB, desktop ~5GiB
# vs 60% of "available" RAM (free + buffers + magic) # vs 60% of "available" RAM (free + buffers + magic)
@@ -35,13 +29,12 @@ function prepare_rootfs_build_params_and_trap() {
display_alert "Not using tmpfs for rootfs" "RAM available: ${available_physical_memory_mib}MiB < ${tmpfs_estimated_size}MiB estimated" "info" display_alert "Not using tmpfs for rootfs" "RAM available: ${available_physical_memory_mib}MiB < ${tmpfs_estimated_size}MiB estimated" "info"
fi fi
# make global and readonly, for sanity declare -g -i tmpfs_estimated_size="${tmpfs_estimated_size}"
declare -g -r -i tmpfs_estimated_size="${tmpfs_estimated_size}" declare -g -i available_physical_memory_mib="${available_physical_memory_mib}"
declare -g -r -i available_physical_memory_mib="${available_physical_memory_mib}"
if [[ $use_tmpfs == yes ]]; then if [[ $use_tmpfs == yes ]]; then
declare -g -r ROOTFS_IS_UNDER_TMPFS=yes
mount -t tmpfs -o "size=99%" tmpfs "${SDCARD}" # size=50% is the Linux default, but we need more. mount -t tmpfs -o "size=99%" tmpfs "${SDCARD}" # size=50% is the Linux default, but we need more.
# this cleaned up by trap_handler_cleanup_rootfs_and_image, configured above
fi fi
} }
@@ -58,10 +51,7 @@ function trap_handler_cleanup_rootfs_and_image() {
# unmount tmpfs mounted on SDCARD if it exists. #@TODO: move to new tmpfs-utils scheme # unmount tmpfs mounted on SDCARD if it exists. #@TODO: move to new tmpfs-utils scheme
mountpoint -q "${SDCARD}" && umount "${SDCARD}" mountpoint -q "${SDCARD}" && umount "${SDCARD}"
# @TODO: rpardini: igor: why lazy umounts? [[ $CRYPTROOT_ENABLE == yes ]] && cryptsetup luksClose "${ROOT_MAPPER}"
mountpoint -q "${SRC}"/cache/toolchain && umount -l "${SRC}"/cache/toolchain >&2
mountpoint -q "${SRC}"/cache/rootfs && umount -l "${SRC}"/cache/rootfs >&2
[[ $CRYPTROOT_ENABLE == yes ]] && cryptsetup luksClose "${ROOT_MAPPER}" >&2
if [[ "${PRESERVE_SDCARD_MOUNT}" == "yes" ]]; then if [[ "${PRESERVE_SDCARD_MOUNT}" == "yes" ]]; then
display_alert "Preserving SD card mount" "trap_handler_cleanup_rootfs_and_image" "warn" display_alert "Preserving SD card mount" "trap_handler_cleanup_rootfs_and_image" "warn"

View File

@@ -1,6 +1,60 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# This file is/was autogenerated by lib/tools/gen-library.sh; don't modify manually # This file is/was autogenerated by lib/tools/gen-library.sh; don't modify manually
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/artifacts/artifact-firmware.sh
# shellcheck source=lib/functions/artifacts/artifact-firmware.sh
source "${SRC}"/lib/functions/artifacts/artifact-firmware.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/artifacts/artifact-full_firmware.sh
# shellcheck source=lib/functions/artifacts/artifact-full_firmware.sh
source "${SRC}"/lib/functions/artifacts/artifact-full_firmware.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/artifacts/artifact-kernel.sh
# shellcheck source=lib/functions/artifacts/artifact-kernel.sh
source "${SRC}"/lib/functions/artifacts/artifact-kernel.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/artifacts/artifact-rootfs.sh
# shellcheck source=lib/functions/artifacts/artifact-rootfs.sh
source "${SRC}"/lib/functions/artifacts/artifact-rootfs.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/artifacts/artifact-uboot.sh
# shellcheck source=lib/functions/artifacts/artifact-uboot.sh
source "${SRC}"/lib/functions/artifacts/artifact-uboot.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/artifacts/artifacts-obtain.sh
# shellcheck source=lib/functions/artifacts/artifacts-obtain.sh
source "${SRC}"/lib/functions/artifacts/artifacts-obtain.sh
# no errors tolerated. invoked before each sourced file to make sure. # no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon" #set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled #set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
@@ -10,24 +64,6 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true
# shellcheck source=lib/functions/artifacts/artifacts-registry.sh # shellcheck source=lib/functions/artifacts/artifacts-registry.sh
source "${SRC}"/lib/functions/artifacts/artifacts-registry.sh source "${SRC}"/lib/functions/artifacts/artifacts-registry.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/artifacts/kernel.sh
# shellcheck source=lib/functions/artifacts/kernel.sh
source "${SRC}"/lib/functions/artifacts/kernel.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/artifacts/u-boot.sh
# shellcheck source=lib/functions/artifacts/u-boot.sh
source "${SRC}"/lib/functions/artifacts/u-boot.sh
# no errors tolerated. invoked before each sourced file to make sure. # no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon" #set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled #set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
@@ -100,15 +136,6 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true
# shellcheck source=lib/functions/cli/cli-docker.sh # shellcheck source=lib/functions/cli/cli-docker.sh
source "${SRC}"/lib/functions/cli/cli-docker.sh source "${SRC}"/lib/functions/cli/cli-docker.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/cli/cli-firmware.sh
# shellcheck source=lib/functions/cli/cli-firmware.sh
source "${SRC}"/lib/functions/cli/cli-firmware.sh
# no errors tolerated. invoked before each sourced file to make sure. # no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon" #set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled #set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
@@ -145,15 +172,6 @@ set -o errexit ## set -e : exit the script if any statement returns a non-true
# shellcheck source=lib/functions/cli/cli-requirements.sh # shellcheck source=lib/functions/cli/cli-requirements.sh
source "${SRC}"/lib/functions/cli/cli-requirements.sh source "${SRC}"/lib/functions/cli/cli-requirements.sh
# no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled
set -o errtrace # trace ERR through - enabled
set -o errexit ## set -e : exit the script if any statement returns a non-true return value - enabled
### lib/functions/cli/cli-rootfs.sh
# shellcheck source=lib/functions/cli/cli-rootfs.sh
source "${SRC}"/lib/functions/cli/cli-rootfs.sh
# no errors tolerated. invoked before each sourced file to make sure. # no errors tolerated. invoked before each sourced file to make sure.
#set -o pipefail # trace ERR through pipes - will be enabled "soon" #set -o pipefail # trace ERR through pipes - will be enabled "soon"
#set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled #set -o nounset ## set -u : exit the script if you try to use an uninitialised variable - one day will be enabled