🔥 JSON info pipeline: v18

- pipeline: add `pipeline` context object to targets; use it to filter artifacts and images to build; warn about oci-name with multiple oci-tags
- pipeline: better error messages when info's fail; show some (out-of-order) progress messages during parallel info gathering
- pipeline: targets-compositor: add `not-eos` inventory
- TARGETS_FILENAME, log all OCI lookups
- SKIP_IMAGES
- IMAGES_ONLY_OUTDATED_ARTIFACTS
- no dash in chunk id in JSON
- pipeline: very initial chunking, using the same outputs
- pipeline: template targets, `items-from-inventory:` inventory expansion, CHECK_OCI=yes, CLEAN_MATRIX=yes, CLEAN_INFO=yes, many fixes
- cli: `inventory` / `targets` / `matrix` / `workflow`
- pipeline: workflow beginnings
- pipeline: general log cleanup + OCI stats / better miss handling
- pipeline: fixes/reorg
- pipeline: catch & log JSON parsing errors
- pipeline: gha matrix: use IMAGE_FILE_ID as job description
- pipeline (delusion): gha workflow output, based on old matrix code
- pipeline: better parsing and reporting of stderr log lines (under `ANSI_COLOR=none`)
- pipeline: mapper-oci-uptodate: use separate positive/negative cache dirs (GHA will only cache positives); cache negs for 5 minutes locally
- pipeline: output-gha-matrix artifacts + images
  - pipeline: output-gha-matrix artifacts + images: "really" and fake 1-item matrix if empty
- pipeline: move files into subdir; update copyright & cleanup
- pipeline: refactor bash jsoninfo driver a bit
- pipeline: outdated-artifact-image-reducer
- pipeline: introduce `target_id` at the compositor, aggregate it at the reducer, carry it over in the artifact info mapper
- pipeline: mapper-oci-uptodate
- pipeline: info-gatherer-artifact, with PRE_PREPARED_HOST
- pipeline: refactor/rename info-gatherer-image.py
- pipeline: beginnings
This commit is contained in:
Ricardo Pardini
2022-12-30 12:20:53 +01:00
committed by igorpecovnik
parent 9c6829bccb
commit f8ddf7f9e2
17 changed files with 1636 additions and 214 deletions

View File

@@ -0,0 +1,18 @@
targets:
cli-ubuntu:
vars:
BUILD_MINIMAL: "no" # quoting "no", since we want the string 'no', not the False boolean
BUILD_DESKTOP: "no"
RELEASE: jammy
items-from-inventory:
all: yes # includes all available BOARD and BRANCH combinations
#conf: yes # includes all supported boards
#wip: yes # includes all work-in-progress boards
#not-eos: yes # not-eos boards, all branches
# comment items-from-inventory: above, and uncomment items: below if you want to build only a subset of the inventory
#items:
# - { BOARD: odroidn2, BRANCH: edge }
# - { BOARD: odroidhc4, BRANCH: edge }

View File

@@ -17,26 +17,155 @@ function cli_json_info_run() {
prep_conf_main_minimal_ni
function json_info_logged() { # logging wrapper
LOG_SECTION="json_info" do_with_logging json_info_only
}
function json_info_only() {
prepare_python_and_pip # requires HOSTRELEASE
# The info extractor itself...
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${SRC}"/lib/tools/info.py ">" "${SRC}/output/info.json"
declare INFO_TOOLS_DIR="${SRC}"/lib/tools/info
display_alert "Here we go" "generating JSON info :: ${ARMBIAN_COMMAND} " "info"
# Targets inventory. Will do all-by-all if no targets file is provided.
declare TARGETS_FILE="${TARGETS_FILE-"${USERPATCHES_PATH}/${TARGETS_FILENAME:-"targets.yaml"}"}" # @TODO: return to targets.yaml one day
declare BASE_INFO_OUTPUT_DIR="${SRC}/output/info" # Output dir for info
if [[ "${CLEAN_INFO}" == "yes" ]]; then
display_alert "Cleaning info output dir" "${BASE_INFO_OUTPUT_DIR}" "info"
rm -rf "${BASE_INFO_OUTPUT_DIR}"
fi
mkdir -p "${BASE_INFO_OUTPUT_DIR}"
declare ALL_BOARDS_ALL_BRANCHES_INVENTORY_FILE="${BASE_INFO_OUTPUT_DIR}/all_boards_all_branches.json"
declare TARGETS_OUTPUT_FILE="${BASE_INFO_OUTPUT_DIR}/all-targets.json"
declare IMAGE_INFO_FILE="${BASE_INFO_OUTPUT_DIR}/image-info.json"
declare IMAGE_INFO_CSV_FILE="${BASE_INFO_OUTPUT_DIR}/image-info.csv"
declare REDUCED_ARTIFACTS_FILE="${BASE_INFO_OUTPUT_DIR}/artifacts-reduced.json"
declare ARTIFACTS_INFO_FILE="${BASE_INFO_OUTPUT_DIR}/artifacts-info.json"
declare ARTIFACTS_INFO_UPTODATE_FILE="${BASE_INFO_OUTPUT_DIR}/artifacts-info-uptodate.json"
declare OUTDATED_ARTIFACTS_IMAGES_FILE="${BASE_INFO_OUTPUT_DIR}/outdated-artifacts-images.json"
# Board/branch inventory.
if [[ ! -f "${ALL_BOARDS_ALL_BRANCHES_INVENTORY_FILE}" ]]; then
display_alert "Generating board/branch inventory" "all_boards_all_branches.json" "info"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/board-inventory.py ">" "${ALL_BOARDS_ALL_BRANCHES_INVENTORY_FILE}"
fi
# @TODO: Release/rootfs inventory?
# A simplistic all-boards-all-branches target file, for the all-boards-all-branches-targets.json.
# Then just use the same info-gatherer-image to get the image info.
# This will be used as database for the targets-compositor, for example to get "all boards+branches that have kernel < 5.0" or "all boards+branches of meson64 family" etc.
# @TODO: this is a bit heavy; only do it if out-of-date (compared to config/, lib/, extensions/, userpatches/ file mtimes...)
# if TARGETS_FILE does not exist, one will be provided for you, from a template.
if [[ ! -f "${TARGETS_FILE}" ]]; then
declare TARGETS_TEMPLATE="${TARGETS_TEMPLATE:-"targets-all-cli.yaml"}"
display_alert "No targets file found" "using default targets template ${TARGETS_TEMPLATE}" "info"
TARGETS_FILE="${SRC}/config/templates/${TARGETS_TEMPLATE}"
else
display_alert "Using targets file" "${TARGETS_FILE}" "info"
fi
if [[ ! -f "${TARGETS_OUTPUT_FILE}" ]]; then
display_alert "Generating targets inventory" "targets-compositor" "info"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/targets-compositor.py "${ALL_BOARDS_ALL_BRANCHES_INVENTORY_FILE}" "not_yet_releases.json" "${TARGETS_FILE}" ">" "${TARGETS_OUTPUT_FILE}"
fi
### Images.
# The image info extractor.
if [[ ! -f "${IMAGE_INFO_FILE}" ]]; then
display_alert "Generating image info" "info-gatherer-image" "info"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/info-gatherer-image.py "${TARGETS_OUTPUT_FILE}" ">" "${IMAGE_INFO_FILE}"
# if stdin is a terminal...
if [ -t 0 ]; then
display_alert "To load the OpenSearch dashboards:" "
pip3 install opensearch-py # install needed lib to talk to OS
docker-compose --file tools/dashboards/docker-compose-opensearch.yaml up -d # start up OS in docker-compose
python3 lib/tools/index-opensearch.py < output/info/image-info.json # index the JSON into OS
# go check out http://localhost:5601
docker-compose --file tools/dashboards/docker-compose-opensearch.yaml down # shut down OS when you're done
" "info"
fi
fi
# convert image info output to CSV for easy import into Google Sheets etc
if [[ ! -f "${IMAGE_INFO_CSV_FILE}" ]]; then
display_alert "Generating CSV info" "info.csv" "info"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/json2csv.py "<" "${IMAGE_INFO_FILE}" ">" ${IMAGE_INFO_CSV_FILE}
fi
### Artifacts.
# Reducer: artifacts.
if [[ ! -f "${REDUCED_ARTIFACTS_FILE}" ]]; then
display_alert "Reducing info into artifacts" "artifact-reducer" "info"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/artifact-reducer.py "${IMAGE_INFO_FILE}" ">" "${REDUCED_ARTIFACTS_FILE}"
fi
# The artifact info extractor.
if [[ ! -f "${ARTIFACTS_INFO_FILE}" ]]; then
display_alert "Generating artifact info" "info-gatherer-artifact" "info"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/info-gatherer-artifact.py "${REDUCED_ARTIFACTS_FILE}" ">" "${ARTIFACTS_INFO_FILE}"
fi
# Now a mapper, check each OCI coordinate to see if it's up-to-date or not. _cache_ (eternally) the positives, but _never_ cache the negatives.
# This should ideally use the authentication info and other stuff that ORAS.land would.
# this is controlled by "CHECK_OCI=yes". most people are not interested in what is or not in the cache when generating a build plan, and it is slow to do.
if [[ ! -f "${ARTIFACTS_INFO_UPTODATE_FILE}" ]]; then
display_alert "Gathering OCI info" "mapper-oci-uptodate :: real lookups (CHECK_OCI): ${CHECK_OCI:-"no"}" "info"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/mapper-oci-uptodate.py "${ARTIFACTS_INFO_FILE}" "${CHECK_OCI:-"no"}" ">" "${ARTIFACTS_INFO_UPTODATE_FILE}"
fi
# A combinator/reducer: image + artifact; outdated artifacts plus the images that depend on them.
if [[ ! -f "${OUTDATED_ARTIFACTS_IMAGES_FILE}" ]]; then
display_alert "Combining image and artifact info" "outdated-artifact-image-reducer" "info"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/outdated-artifact-image-reducer.py "${ARTIFACTS_INFO_UPTODATE_FILE}" "${IMAGE_INFO_FILE}" ">" "${OUTDATED_ARTIFACTS_IMAGES_FILE}"
fi
### CI/CD Outputs.
# Output stage: GHA simplest possible two-matrix worflow.
# A prepare job running this, prepares two matrixes:
# One for artifacts. One for images.
# If the image or artifact is up-to-date, it is still included in matrix, but the job is skipped.
# If any of the matrixes is bigger than 255 items, an error is generated.
if [[ "${ARMBIAN_COMMAND}" == "matrix" ]]; then
if [[ "${CLEAN_MATRIX}" == "yes" ]]; then
display_alert "Cleaning GHA matrix output" "clean-matrix" "info"
run_host_command_logged rm -fv "${BASE_INFO_OUTPUT_DIR}"/gha-*-matrix.json
fi
display_alert "Generating GHA matrix for artifacts" "output-gha-matrix :: artifacts" "info"
declare GHA_ALL_ARTIFACTS_JSON_MATRIX_FILE="${BASE_INFO_OUTPUT_DIR}/gha-all-artifacts-matrix.json"
if [[ ! -f "${GHA_ALL_ARTIFACTS_JSON_MATRIX_FILE}" ]]; then
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/output-gha-matrix.py artifacts "${OUTDATED_ARTIFACTS_IMAGES_FILE}" "${MATRIX_ARTIFACT_CHUNKS}" ">" "${GHA_ALL_ARTIFACTS_JSON_MATRIX_FILE}"
fi
github_actions_add_output "artifact-matrix" "$(cat "${GHA_ALL_ARTIFACTS_JSON_MATRIX_FILE}")"
display_alert "Generating GHA matrix for images" "output-gha-matrix :: images" "info"
declare GHA_ALL_IMAGES_JSON_MATRIX_FILE="${BASE_INFO_OUTPUT_DIR}/gha-all-images-matrix.json"
if [[ ! -f "${GHA_ALL_IMAGES_JSON_MATRIX_FILE}" ]]; then
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/output-gha-matrix.py images "${OUTDATED_ARTIFACTS_IMAGES_FILE}" "${MATRIX_IMAGE_CHUNKS}" ">" "${GHA_ALL_IMAGES_JSON_MATRIX_FILE}"
fi
github_actions_add_output "image-matrix" "$(cat "${GHA_ALL_IMAGES_JSON_MATRIX_FILE}")"
fi
### a secondary stage, which only makes sense to be run inside GHA, and as such should be split in a different CLI or under a flag.
if [[ "${ARMBIAN_COMMAND}" == "workflow" ]]; then
# GHA Workflow output. A delusion. Maybe.
display_alert "Generating GHA workflow" "output-gha-workflow :: complete" "info"
declare GHA_WORKFLOW_FILE="${BASE_INFO_OUTPUT_DIR}/gha-workflow.yaml"
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${INFO_TOOLS_DIR}"/output-gha-workflow.py "${OUTDATED_ARTIFACTS_IMAGES_FILE}" "${GHA_WORKFLOW_FILE}"
fi
# Also convert output to CSV for easy import into Google Sheets etc
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${SRC}"/lib/tools/json2csv.py "<" "${SRC}/output/info.json" ">" "${SRC}/output/info.csv"
}
do_with_default_build do_with_logging json_info_only
display_alert "JSON info generated" "in output/info.json" "info"
display_alert "CSV info generated" "in output/info.csv" "info"
display_alert "To load the OpenSearch dashboards:" "
pip3 install opensearch-py # install needed lib to talk to OS
docker-compose --file tools/dashboards/docker-compose-opensearch.yaml up -d # start up OS in docker-compose
python3 lib/tools/index-opensearch.py < output/info.json # index the info.json into OS
# go check out http://localhost:5601
docker-compose --file tools/dashboards/docker-compose-opensearch.yaml down # shut down OS when you're done
" "info"
do_with_default_build json_info_logged
}

View File

@@ -23,8 +23,10 @@ function armbian_register_commands() {
["configdump"]="config_dump_json" # implemented in cli_config_dump_json_pre_run and cli_config_dump_json_run
["config-dump-json"]="config_dump_json" # implemented in cli_config_dump_json_pre_run and cli_config_dump_json_run
["json-info-boards"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
["write-all-boards-branches-json"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
["inventory"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
["targets"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
["matrix"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
["workflow"]="json_info" # implemented in cli_json_info_pre_run and cli_json_info_run
["kernel-patches-to-git"]="patch_kernel" # implemented in cli_patch_kernel_pre_run and cli_patch_kernel_run

View File

@@ -18,6 +18,8 @@ function early_prepare_pip3_dependencies_for_python_tools() {
"GitPython==3.1.30" # for manipulating git repos
"unidecode==1.3.6" # for converting strings to ascii
"coloredlogs==15.0.1" # for colored logging
"PyYAML==6.0" # for parsing/writing YAML
"oras==0.1.17" # for OCI stuff in mapper-oci-update
)
return 0
}

View File

@@ -7,16 +7,27 @@
# This file is a part of the Armbian Build Framework
# https://github.com/armbian/build/
#
import concurrent
import concurrent.futures
import glob
import json
import logging
import multiprocessing
import os
import re
import subprocess
from pathlib import Path
import sys
ARMBIAN_CONFIG_REGEX_KERNEL_TARGET = r"^([export |declare -g]+)?KERNEL_TARGET=\"(.*)\""
log: logging.Logger = logging.getLogger("armbian_utils")
def parse_env_for_tokens(env_name):
result = []
# Read the environment; if None, return an empty list.
# Read the environment; if None, return an empty list.
val = os.environ.get(env_name, None)
if val is None:
return result
@@ -55,11 +66,15 @@ def show_incoming_environment():
log.debug(f"--ENV-- {key}={os.environ[key]}")
def is_debug():
return get_from_env("LOG_DEBUG") == "yes"
def setup_logging():
try:
import coloredlogs
level = "INFO"
if get_from_env("LOG_DEBUG") == "yes":
if is_debug():
level = "DEBUG"
format = "%(message)s"
styles = {
@@ -73,6 +88,243 @@ def setup_logging():
coloredlogs.install(level=level, stream=sys.stderr, isatty=True, fmt=format, level_styles=styles)
except ImportError:
level = logging.INFO
if get_from_env("LOG_DEBUG") == "yes":
if is_debug():
level = logging.DEBUG
logging.basicConfig(level=level, stream=sys.stderr)
def parse_json(json_contents_str):
import json
return json.loads(json_contents_str)
def to_yaml(gha_workflow):
import yaml
return yaml.safe_dump(gha_workflow, explicit_start=True, default_flow_style=False, sort_keys=False, allow_unicode=True, indent=2, width=1000)
# I've to read the first line from the board file, that's the hardware description in a pound comment.
# Also, 'KERNEL_TARGET="legacy,current,edge"' which we need to parse.
def armbian_parse_board_file_for_static_info(board_file, board_id):
file_handle = open(board_file, 'r')
file_lines = file_handle.readlines()
file_handle.close()
file_lines.reverse()
hw_desc_line = file_lines.pop()
hw_desc_clean = None
if hw_desc_line.startswith("# "):
hw_desc_clean = hw_desc_line.strip("# ").strip("\n")
# Parse KERNEL_TARGET line.
kernel_targets = None
kernel_target_matches = re.findall(ARMBIAN_CONFIG_REGEX_KERNEL_TARGET, "\n".join(file_lines), re.MULTILINE)
if len(kernel_target_matches) == 1:
kernel_targets = kernel_target_matches[0][1].split(",")
ret = {"BOARD": board_id, "BOARD_SUPPORT_LEVEL": (Path(board_file).suffix)[1:]}
if hw_desc_clean is not None:
ret["BOARD_FILE_HARDWARE_DESC"] = hw_desc_clean
if kernel_targets is not None:
ret["BOARD_POSSIBLE_BRANCHES"] = kernel_targets
return ret
def armbian_get_all_boards_list(boards_path):
ret = {}
for file in glob.glob(boards_path + "/*.*"):
stem = Path(file).stem
if stem != "README":
ret[stem] = file
return ret
def find_armbian_src_path():
# Find the location of compile.sh, relative to this Python script.
this_script_full_path = os.path.realpath(__file__)
log.debug(f"Real path to this script: '{this_script_full_path}'")
armbian_src_path = os.path.realpath(os.path.join(os.path.dirname(this_script_full_path), "..", "..", ".."))
log.debug(f"Real path to Armbian SRC '{armbian_src_path}'")
compile_sh_full_path = os.path.realpath(os.path.join(armbian_src_path, "compile.sh"))
log.debug(f"Real path to compile.sh '{compile_sh_full_path}'")
# Make sure it exists
if not os.path.exists(compile_sh_full_path):
raise Exception("Can't find compile.sh")
core_boards_path = os.path.realpath(os.path.join(armbian_src_path, "config", "boards"))
log.debug(f"Real path to core boards '{core_boards_path}'")
# Make sure it exists
if not os.path.exists(core_boards_path):
raise Exception("Can't find config/boards")
userpatches_boards_path = os.path.realpath(os.path.join(armbian_src_path, "userpatches", "config", "boards"))
log.debug(f"Real path to userpatches boards '{userpatches_boards_path}'")
has_userpatches_path = os.path.exists(userpatches_boards_path)
return {"armbian_src_path": armbian_src_path, "compile_sh_full_path": compile_sh_full_path, "core_boards_path": core_boards_path,
"userpatches_boards_path": userpatches_boards_path, "has_userpatches_path": has_userpatches_path}
def armbian_get_all_boards_inventory():
armbian_paths = find_armbian_src_path()
core_boards = armbian_get_all_boards_list(armbian_paths["core_boards_path"])
# first, gather the board_info for every core board. if any fail, stop.
info_for_board = {}
for board in core_boards.keys():
board_info = armbian_parse_board_file_for_static_info(core_boards[board], board)
board_info["BOARD_CORE_OR_USERPATCHED"] = "core"
# Core boards must have the KERNEL_TARGET defined.
if "BOARD_POSSIBLE_BRANCHES" not in board_info:
raise Exception(f"Core board '{board}' must have KERNEL_TARGET defined")
info_for_board[board] = board_info
# Now go for the userpatched boards. Those can be all-new, or they can be patches to existing boards.
if armbian_paths["has_userpatches_path"]:
userpatched_boards = armbian_get_all_boards_list(armbian_paths["userpatches_boards_path"])
for uboard_name in userpatched_boards.keys():
uboard = armbian_parse_board_file_for_static_info(userpatched_boards[uboard_name], uboard_name)
uboard["BOARD_CORE_OR_USERPATCHED"] = "userpatched"
is_new_board = not (uboard_name in info_for_board)
if is_new_board:
log.debug(f"Userpatched Board {uboard_name} is new")
# New userpatched boards must have the KERNEL_TARGET defined.
if "BOARD_POSSIBLE_BRANCHES" not in uboard:
raise Exception(f"NEW userpatched board '{uboard_name}' must have KERNEL_TARGET defined")
info_for_board[uboard_name] = uboard
else:
log.debug(f"Userpatched Board {uboard_name} is already in core boards")
info_for_board[uboard_name] = {**info_for_board[uboard_name], **uboard}
return info_for_board
def map_to_armbian_params(map_params):
ret = []
for param in map_params:
ret.append(param + "=" + map_params[param])
return ret
def armbian_run_command_and_parse_json_from_stdout(exec_cmd: list[str], params: dict):
result = None
logs = []
try:
result = subprocess.run(
exec_cmd,
stdout=subprocess.PIPE,
check=True,
universal_newlines=False, # universal_newlines messes up bash encoding, don't use, instead decode utf8 manually;
bufsize=-1, # full buffering
# Early (pre-param-parsing) optimizations for those in Armbian bash code, so use an ENV (not PARAM)
env={
"CONFIG_DEFS_ONLY": "yes", # Dont do anything. Just output vars.
"ANSI_COLOR": "none", # Do not use ANSI colors in logging output, don't write to log files
"WRITE_EXTENSIONS_METADATA": "no", # Not interested in ext meta here
"ALLOW_ROOT": "yes", # We're gonna be calling it as root, so allow it @TODO not the best option
"PRE_PREPARED_HOST": "yes" # We're gonna be calling it as root, so allow it @TODO not the best option
},
stderr=subprocess.PIPE
)
except subprocess.CalledProcessError as e:
# decode utf8 manually, universal_newlines messes up bash encoding
logs = parse_log_lines_from_stderr(e.stderr)
log.error(f"Error calling Armbian command: {' '.join(exec_cmd)}")
log.error(f"Error details: params: {params} - return code: {e.returncode} - stderr: {'; '.join(logs[-5:])}")
return {"in": params, "out": {}, "logs": logs, "config_ok": False}
if result is not None:
if result.stderr:
logs = parse_log_lines_from_stderr(result.stderr)
# parse the result.stdout as json.
try:
parsed = json.loads(result.stdout.decode("utf8"))
info = {"in": params, "out": parsed, "config_ok": True}
info["logs"] = logs
return info
except json.decoder.JSONDecodeError as e:
log.error(f"Error parsing Armbian JSON: params: {params}, stderr: {'; '.join(logs[-5:])}")
# return {"in": params, "out": {}, "logs": logs, "config_ok": False}
raise e
def parse_log_lines_from_stderr(lines_stderr: str):
# parse list, split by newline
lines = lines_stderr.decode("utf8").split("\n")
# trim lines, remove empty ones
logs = [line.strip() for line in lines if line.strip()]
# each line, split at the first ocurrence of two colons ("::")
result = []
for line in logs:
line = line.strip()
if not line:
continue
parts = line.split("::", 1)
if len(parts) != 2:
# very probably something that leaked out of logging manager, grab it
result.append("[LEAKED]:" + line.strip())
continue
type = parts[0].strip()
msg = parts[1].strip()
# if type begins "err" or "warn" or "wrn":
if type.startswith("err") or type.startswith("warn") or type.startswith("wrn"):
# remove some redundant stuff we don't want
if ("Exiting with error " in msg) or ("please wait for cleanups to finish" in msg):
continue
result.append(f"{type}: {msg}")
return result
def gather_json_output_from_armbian(command: str, targets: list[dict]):
armbian_paths = find_armbian_src_path()
# now loop over gathered infos
every_info = []
use_parallel: bool = True
if use_parallel:
counter = 0
total = len(targets)
# get the number of processor cores on this machine
max_workers = multiprocessing.cpu_count() * 2 # use double the number of cpu cores, that's the sweet spot
log.info(f"Using {max_workers} workers for parallel processing.")
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
every_future = []
for target in targets:
counter += 1
future = executor.submit(get_info_for_one_build, armbian_paths, command, target, counter, total)
every_future.append(future)
log.info(f"Submitted {len(every_future)} jobs to the parallel executor. Waiting for them to finish...")
executor.shutdown(wait=True)
log.info(f"All jobs finished!")
for future in every_future:
info = future.result()
if info is not None:
every_info.append(info)
else:
for target in targets:
info = get_info_for_one_build(armbian_paths, command, target)
if info is not None:
every_info.append(info)
return every_info
def get_info_for_one_build(armbian_paths: dict[str, str], command: str, params: dict, counter: int, total: int):
try:
try:
sh: str = armbian_paths["compile_sh_full_path"]
cmds: list[str] = ([sh] + [command] + map_to_armbian_params(params["vars"]) + params["configs"])
parsed = armbian_run_command_and_parse_json_from_stdout(cmds, params)
return parsed
except BaseException as e:
log.error(f"Failed get info for build '{command}' '{params}': '{e}'", exc_info=True)
return {"ARMBIAN_CONFIG_OK": False, "PYTHON_INFO_ERROR": "{}".format(e), "INPUT": params}
finally:
if counter % 10 == 0:
log.info(f"Processed {counter} / {total} targets.")

176
lib/tools/common/gha.py Normal file
View File

@@ -0,0 +1,176 @@
import logging
import os
import uuid
log: logging.Logger = logging.getLogger("bash_declare_parser")
def wrap_with_gha_expression(value):
return "${{ " + value + " }}"
def set_gha_output(name, value):
if os.environ.get('GITHUB_OUTPUT') is None:
log.warning(f"Environment variable GITHUB_OUTPUT is not set. Cannot set output '{name}' to '{value}'")
return
with open(os.environ['GITHUB_OUTPUT'], 'a') as fh:
print(f'{name}={value}', file=fh)
log.info(f"Set GHA output '{name}' to '{value}'")
def set_multiline_gha_output(name, value):
with open(os.environ['GITHUB_OUTPUT'], 'a') as fh:
delimiter = uuid.uuid1()
print(f'{name}<<{delimiter}', file=fh)
print(value, file=fh)
print(delimiter, file=fh)
class WorkflowJobCondition:
def __init__(self, condition):
self.condition = condition
# Warning: there are no real "job inputs" in GHA. this is just an abstraction to make it easier to work with
class WorkflowJobInput:
def __init__(self, value: str):
self.value = value
# The Job that holds this input
self.job: BaseWorkflowJob | None = None
class WorkflowJobOutput:
def __init__(self, name: str, value: str):
self.name = name
self.value = value
# The Job that produces this output
self.job: BaseWorkflowJob | None = None
# The step that produces this output (optional)
self.step: WorkflowJobStep | None = None
def render_yaml(self):
return wrap_with_gha_expression(f"{self.value}")
class WorkflowJobStep:
def __init__(self, id: str, name: str):
self.id = id
self.name = name
self.run: "str | None" = None
self.uses: "str | None" = None
self.withs: dict[str, str] = {}
def render_yaml(self):
all = {"id": self.id, "name": self.name}
if len(self.withs) > 0:
all["with"] = self.withs
if self.run is not None:
all["run"] = self.run
if self.uses is not None:
all["uses"] = self.uses
return all
class BaseWorkflowJob:
def __init__(self, job_id: str, job_name: str):
self.job_id: str = job_id
self.job_name: str = job_name
self.outputs: dict[str, WorkflowJobOutput] = {}
self.needs: set[BaseWorkflowJob] = set()
self.conditions: list[WorkflowJobCondition] = []
self.steps: list[WorkflowJobStep] = []
self.runs_on: list[str] | str = "ubuntu-latest"
self.envs: dict[str, str] = {}
def set_runs_on(self, runs_on):
self.runs_on = runs_on
return self
def add_step(self, step_id: str, step_name: str):
step = WorkflowJobStep(step_id, step_name)
self.steps.append(step)
return step
def add_job_output_from_step(self, step: WorkflowJobStep, output_name: str) -> WorkflowJobOutput:
job_wide_name = f"{step.id}_{output_name}"
output = WorkflowJobOutput(job_wide_name, f"steps.{step.id}.outputs.{output_name}")
output.step = step
output.job = self
self.outputs[job_wide_name] = output
return output
def add_job_output_from_input(self, name: str, input: WorkflowJobInput) -> WorkflowJobOutput:
output = WorkflowJobOutput(name, input.value)
output.job = self
self.outputs[name] = output
return output
def add_job_input_from_needed_job_output(self, job_output: WorkflowJobOutput) -> WorkflowJobInput:
# add referenced job as a 'needs' dependency, so we can read it.
self.needs.add(job_output.job)
input = WorkflowJobInput(f"needs.{job_output.job.job_id}.outputs.{job_output.name}")
input.job = self
return input
def add_condition_from_input(self, input: WorkflowJobInput, expression: str):
condition = WorkflowJobCondition(f"{input.value} {expression}")
self.conditions.append(condition)
return condition
def render_yaml(self) -> dict[str, object]:
job: dict[str, object] = {}
job["name"] = self.job_name
if len(self.envs) > 0:
job["env"] = self.envs
if len(self.needs) > 0:
job["needs"] = [n.job_id for n in self.needs]
if len(self.conditions) > 0:
job["if"] = wrap_with_gha_expression(f"always() && ( {' || '.join([c.condition for c in self.conditions])} ) ")
if len(self.outputs) > 0:
job["outputs"] = {o.name: o.render_yaml() for o in self.outputs.values()}
job["runs-on"] = self.runs_on
if len(self.steps) > 0:
job["steps"] = [s.render_yaml() for s in self.steps]
else:
raise Exception("No steps defined for job")
return job
class WorkflowFactory:
def __init__(self):
self.jobs: dict[str, BaseWorkflowJob] = {}
def add_job(self, job: BaseWorkflowJob) -> BaseWorkflowJob:
if job.job_id in self.jobs:
raise Exception(f"Double adding of job {job.job_id}")
self.jobs[job.job_id] = job
return job
def get_job(self, job_id: str) -> BaseWorkflowJob:
if job_id not in self.jobs:
raise Exception(f"Job {job_id} not found")
return self.jobs[job_id]
def render_yaml(self) -> dict[str, object]:
gha_workflow: dict[str, object] = dict()
gha_workflow["name"] = "build-targets"
gha_workflow["on"] = {"workflow_dispatch": {}}
gha_workflow["on"]["workflow_call"] = {}
# trigger when pushed to. wtf...
gha_workflow["on"]["push"] = {"branches": ["main"], "paths": [".github/workflows/build-targets.yaml"]}
jobs = {} # @TODO: maybe sort... maybe prepare...
for job in self.jobs.values():
jobs[job.job_id] = job.render_yaml()
gha_workflow["jobs"] = jobs
return gha_workflow

View File

@@ -1,185 +0,0 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
#
# This file is a part of the Armbian Build Framework
# https://github.com/armbian/build/
#
import concurrent.futures
import glob
import json
import multiprocessing
import os
import re
import subprocess
import sys
import traceback
from pathlib import Path
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_all_boards_list_from_armbian(src_path):
ret = {}
for file in glob.glob(src_path + "/config/boards/*.*"):
stem = Path(file).stem
if stem != "README":
ret[stem] = file
# return ret
return ret
def map_to_armbian_params(map_params):
ret = []
for param in map_params:
ret.append(param + "=" + map_params[param])
return ret
def run_armbian_compile_and_parse(path_to_compile_sh, armbian_src_path, compile_params):
exec_cmd = ([path_to_compile_sh] + ["config-dump-json"] + map_to_armbian_params(compile_params))
# eprint("Running command: '{}' ", exec_cmd)
result = None
logs = ["Not available"]
try:
result = subprocess.run(
exec_cmd,
stdout=subprocess.PIPE,
check=True,
universal_newlines=False, # universal_newlines messes up bash encoding, don't use, instead decode utf8 manually;
bufsize=-1, # full buffering
# Early (pre-param-parsing) optimizations for those in Armbian bash code, so use an ENV (not PARAM)
env={
"CONFIG_DEFS_ONLY": "yes", # Dont do anything. Just output vars.
"ANSI_COLOR": "none", # Do not use ANSI colors in logging output, don't write to log files
"WRITE_EXTENSIONS_METADATA": "no" # Not interested in ext meta here
},
stderr=subprocess.PIPE
)
except subprocess.CalledProcessError as e:
# decode utf8 manually, universal_newlines messes up bash encoding
lines_stderr = e.stderr.decode("utf8").split("\n")
eprint("Error calling Armbian: params: {}, return code: {}, stderr: {}".format(compile_params, e.returncode, "; ".join(lines_stderr[-5:])))
return {"in": compile_params, "out": {}, "logs": lines_stderr, "config_ok": False}
if result is not None:
if result.stderr:
# parse list, split by newline
lines = result.stderr.decode("utf8").split("\n")
# trim lines, remove empty ones
logs = [line.strip() for line in lines if line.strip()]
# parse the result.stdout as json
parsed = json.loads(result.stdout.decode("utf8"))
info = {"in": compile_params, "out": parsed, "config_ok": True}
# info["logs"] = logs
return info
# Find the location of compile.sh, relative to this Python script.
this_script_full_path = os.path.realpath(__file__)
# eprint("Real path to this script", this_script_full_path)
armbian_src_path = os.path.realpath(os.path.join(os.path.dirname(this_script_full_path), "..", ".."))
# eprint("Real path to Armbian SRC", armbian_src_path)
compile_sh_full_path = os.path.realpath(os.path.join(armbian_src_path, "compile.sh"))
# eprint("Real path to compile.sh", compile_sh_full_path)
# Make sure it exists
if not os.path.exists(compile_sh_full_path):
raise Exception("Can't find compile.sh")
common_compile_params = {
}
board_compile_params = {
}
# I've to read the first line from the board file, that's the hardware description in a pound comment.
# Also, 'KERNEL_TARGET="legacy,current,edge"' which we need to parse.
def parse_board_file_for_static_info(board_file, board_id):
file_handle = open(board_file, 'r')
file_lines = file_handle.readlines()
file_handle.close()
file_lines.reverse()
hw_desc_line = file_lines.pop()
hw_desc_clean = hw_desc_line.strip("# ").strip("\n")
# Parse KERNEL_TARGET line.
kernel_target_matches = re.findall(r"^(export )?KERNEL_TARGET=\"(.*)\"", "\n".join(file_lines), re.MULTILINE)
kernel_targets = kernel_target_matches[0][1].split(",")
# eprint("Possible kernel branches for board: ", board_id, " : ", kernel_targets)
return {
"BOARD_FILE_HARDWARE_DESC": hw_desc_clean,
"BOARD_POSSIBLE_BRANCHES": kernel_targets,
"BOARD_DESC_ID": board_id
}
def get_info_for_one_board(board_file, board_name, common_params, board_info, branch):
# eprint(
# "Getting info for board '{}' branch '{}' in file '{}'".format(
# board_name, common_params["BRANCH"], board_file
# )
# )
board_info = board_info | {"BOARD_DESC_ID": f"{board_name}-{branch}"}
# eprint("Running Armbian bash for board '{}'".format(board_name))
try:
parsed = run_armbian_compile_and_parse(compile_sh_full_path, armbian_src_path, common_params | {"BOARD": board_name})
return parsed | board_info
except BaseException as e:
eprint("Failed get info for board '{}': '{}'".format(board_name, e))
traceback.print_exc()
return board_info | {"ARMBIAN_CONFIG_OK": False, "PYTHON_INFO_ERROR": "{}".format(e)}
if True:
all_boards = get_all_boards_list_from_armbian(armbian_src_path)
# eprint(json.dumps(all_boards, indent=4, sort_keys=True))
# first, gather the board_info for every board. if any fail, stop.
info_for_board = {}
for board in all_boards.keys():
try:
board_info = parse_board_file_for_static_info(all_boards[board], board)
info_for_board[board] = board_info
except BaseException as e:
eprint("** Failed to parse board file {} static: {}".format(board, e))
raise e
# now loop over gathered infos
every_info = []
# get the number of processor cores on this machine
max_workers = multiprocessing.cpu_count() * 2 # use double the number of cpu cores, that's the sweet spot
eprint(f"Using {max_workers} workers for parallel processing.")
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
every_future = []
for board in all_boards.keys():
board_info = info_for_board[board]
for possible_branch in board_info["BOARD_POSSIBLE_BRANCHES"]:
all_params = common_compile_params | board_compile_params | {"BRANCH": possible_branch}
# eprint("Submitting future for board {} with BRANCH={}".format(board, possible_branch))
future = executor.submit(get_info_for_one_board, all_boards[board], board, all_params, board_info, possible_branch)
every_future.append(future)
eprint(f"Waiting for all {len(every_future)} configurations to be computed... this might take a long time.")
executor.shutdown(wait=True)
eprint("Done, all futures awaited")
for future in every_future:
info = future.result()
if info is not None:
every_info.append(info)
# info = get_info_for_one_board(board, all_params)
print(json.dumps(every_info, indent=4, sort_keys=True))

View File

@@ -0,0 +1,111 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("artifact-reducer")
# read the targets.json file passed as first argument as a json object
with open(sys.argv[1]) as f:
build_infos = json.load(f)
all_artifacts: list[dict] = []
# loop over the build infos. for each, construct a structure with the artifacts.
for build_info in build_infos:
if build_info["config_ok"] == False:
log.warning(f"Skipping failed config '{build_info['in']}'...")
continue
outvars = build_info["out"]
want_uppercase: list[str] = outvars["WANT_ARTIFACT_ALL_ARRAY"]
want_names: list[str] = outvars["WANT_ARTIFACT_ALL_NAMES_ARRAY"]
# create a dict with uppercase keys and names for values
want_dict: dict[str, str] = dict(zip(want_uppercase, want_names))
# loop over the uppercases
for uppercase in want_uppercase:
# if uppercase != "KERNEL":
# log.warning(f"Skipping artifact '{uppercase}'...")
# continue
inputs_keyname = f"WANT_ARTIFACT_{uppercase}_INPUTS_ARRAY"
inputs_raw_array = outvars[inputs_keyname]
artifact_name = want_dict[uppercase]
# check the pipeline config for artifacts...
if "pipeline" in build_info["in"]:
pipeline = build_info["in"]["pipeline"]
if "build-artifacts" in pipeline:
if pipeline["build-artifacts"] == False:
log.warning(f"Skipping artifact '{artifact_name}' (pipeline build-artifacts '{pipeline['build-artifacts']}' config)...")
continue
else:
log.warning(f"Keeping artifact '{artifact_name}' (pipeline build-artifacts '{pipeline['build-artifacts']}' config)...")
if "only-artifacts" in pipeline:
only_artifacts = pipeline["only-artifacts"]
if artifact_name not in only_artifacts:
log.warning(f"Skipping artifact '{artifact_name}' (pipeline only-artifacts '{','.join(only_artifacts)}' config)...")
continue
else:
log.warning(f"Keeping artifact '{artifact_name}' (pipeline only-artifacts '{','.join(only_artifacts)}' config)...")
inputs: dict[str, str] = {}
for input_raw in inputs_raw_array:
# de-quote the value. @TODO: fragile
input = input_raw[1:-1]
# split the input into a tuple
(key, value) = input.split("=", 1)
inputs[key] = value
# sort by key, join k=v again
inputs_sorted = "&".join([f"{k}={v}" for k, v in sorted(inputs.items())])
artifact_build_key = f"{artifact_name}?{inputs_sorted}"
all_artifacts.append({"artifact_name": artifact_name, "key": artifact_build_key, "inputs": inputs, "original_inputs": build_info["in"]})
log.info(f"Found {len(all_artifacts)} total artifacts... reducing...")
# deduplicate each artifact; keep a reference to the original input of one of the duplicates
deduplicated_artifacts: dict[str, dict] = {}
for artifact in all_artifacts:
artifact_build_key = artifact["key"]
if artifact_build_key not in deduplicated_artifacts:
deduplicated_artifacts[artifact_build_key] = artifact
deduplicated_artifacts[artifact_build_key]["needed_by"] = 0
deduplicated_artifacts[artifact_build_key]["wanted_by_targets"] = []
deduplicated_artifacts[artifact_build_key]["needed_by"] += 1
deduplicated_artifacts[artifact_build_key]["wanted_by_targets"].append(artifact["original_inputs"]["target_id"])
log.info(f"Found {len(deduplicated_artifacts)} unique artifacts combinations... reducing...")
# get a list of all the artifacts, sorted by how many needed_by
deduplicated_artifacts_sorted = sorted(deduplicated_artifacts.values(), key=lambda x: x["needed_by"], reverse=True)
# group again, this time by artifact name
artifacts_by_name: dict[str, list[dict]] = {}
for artifact in deduplicated_artifacts_sorted:
artifact_name = artifact["artifact_name"]
if artifact_name not in artifacts_by_name:
artifacts_by_name[artifact_name] = []
artifacts_by_name[artifact_name].append(artifact)
log.info(f"Found {len(artifacts_by_name)} unique artifacts... reducing...")
for artifact_name, artifacts in artifacts_by_name.items():
log.info(f"Reduced '{artifact_name}' artifact to: {len(artifacts)} instances.")
# dump as json
print(json.dumps(deduplicated_artifacts_sorted, indent=4, sort_keys=True))

View File

@@ -0,0 +1,23 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("board-inventory")
all = armbian_utils.armbian_get_all_boards_inventory()
log.info(f"Inventoried {len(all)} boards vs branch combinations.")
print(json.dumps(all, indent=4, sort_keys=True))

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("info-gatherer-artifact")
# read the targets.json file passed as first argument as a json object
with open(sys.argv[1]) as f:
targets = json.load(f)
# massage the targets into their full info invocations (sans-command)
artifacts = []
for target in targets:
one_artifact = target
one_artifact["vars"] = (target["original_inputs"]["vars"])
one_artifact["vars"]["WHAT"] = target["artifact_name"]
one_artifact["configs"] = (target["original_inputs"]["configs"])
artifacts.append(one_artifact)
every_info = armbian_utils.gather_json_output_from_armbian("artifact-config-dump-json", artifacts)
print(json.dumps(every_info, indent=4, sort_keys=True))

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("info-gatherer-image")
# read the targets.json file passed as first argument as a json object
with open(sys.argv[1]) as f:
targets = json.load(f)
every_info = armbian_utils.gather_json_output_from_armbian("config-dump-json", targets)
print(json.dumps(every_info, indent=4, sort_keys=True))

View File

@@ -1,17 +1,24 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2013-2023 Igor Pecovnik, igor@armbian.com
#
# This file is a part of the Armbian Build Framework
# https://github.com/armbian/build/
#
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import collections.abc
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("info-gatherer-image")
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)

View File

@@ -0,0 +1,172 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import datetime
import hashlib
import json
import logging
import os
import oras.client
import oras.logger
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("mapper-oci-up-to-date")
# Extra logging for ORAS library
oras.logger.setup_logger(quiet=(not armbian_utils.is_debug()), debug=(armbian_utils.is_debug()))
# Prepare Armbian cache
armbian_paths = armbian_utils.find_armbian_src_path()
cache_dir = armbian_paths["armbian_src_path"] + "/cache"
oci_cache_dir_positive = cache_dir + "/oci/positive"
os.makedirs(oci_cache_dir_positive, exist_ok=True)
oci_cache_dir_positive = os.path.abspath(oci_cache_dir_positive)
oci_cache_dir_negative = cache_dir + "/oci/negative"
os.makedirs(oci_cache_dir_negative, exist_ok=True)
oci_cache_dir_negative = os.path.abspath(oci_cache_dir_negative)
client = oras.client.OrasClient(insecure=False)
log.info(f"OCI client version: {client.version()}")
# the cutoff time for missed cache files; keep it low. positive hits are cached forever
cutoff_mtime = (datetime.datetime.now().timestamp() - 60 * 5) # 5 minutes ago
# global counters for final stats
stats = {"lookups": 0, "skipped": 0, "hits": 0, "misses": 0, "hits_positive": 0, "hits_negative": 0, "late_misses": 0, "miss_positive": 0,
"miss_negative": 0}
def check_oci_up_to_date_cache(oci_target: str, really_check: bool = False):
# increment the stats counter
stats["lookups"] += 1
if not really_check:
# we're not really checking, so just return a positive hit
stats["skipped"] += 1
return {"up-to-date": False, "reason": "oci-check-not-performed"}
log.info(f"Checking if '{oci_target}' is up-to-date...")
# init the returned obj
ret = {"up-to-date": False, "reason": "undetermined"}
# md5 hash of the oci_target. don't use any utils, just do it ourselves with standard python
md5_hash = hashlib.md5(oci_target.encode()).hexdigest()
cache_file_positive = f"{oci_cache_dir_positive}/{md5_hash}.json"
cache_file_negative = f"{oci_cache_dir_negative}/{md5_hash}.json"
cache_hit = False
if os.path.exists(cache_file_positive):
# increment the stats counter
stats["hits_positive"] += 1
cache_hit = True
log.debug(f"Found positive cache file for '{oci_target}'.")
with open(cache_file_positive) as f:
ret = json.load(f)
elif os.path.exists(cache_file_negative):
# increment the stats counter
stats["hits_negative"] += 1
cache_file_mtime = os.path.getmtime(cache_file_negative)
log.debug(f"Cache mtime: {cache_file_mtime} / Cutoff time: {cutoff_mtime}")
if cache_file_mtime > cutoff_mtime:
cache_hit = True
log.debug(f"Found still-valid negative cache file for '{oci_target}'.")
with open(cache_file_negative) as f:
ret = json.load(f)
else:
# increment the stats counter
stats["late_misses"] += 1
# remove the cache file
log.debug(f"Removing old negative cache file for '{oci_target}'.")
os.remove(cache_file_negative)
# increment the stats counter
stats["hits" if cache_hit else "misses"] += 1
if not cache_hit:
log.debug(f"No cache file for '{oci_target}'")
try:
container = client.remote.get_container(oci_target)
client.remote.load_configs(container)
manifest = client.remote.get_manifest(container)
log.debug(f"Got manifest for '{oci_target}'.")
ret["up-to-date"] = True
ret["reason"] = "manifest_exists"
ret["manifest"] = manifest
except Exception as e:
message: str = str(e)
ret["up-to-date"] = False
ret["reason"] = "exception"
ret["exception"] = message # don't store ValueError(e) as it's not json serializable
# A known-good cache miss.
if ": Not Found" in message:
ret["reason"] = "not_found"
else:
# log warning so we implement handling above. @TODO: some "unauthorized" errors pop up sometimes
log.warning(f"Failed to get manifest for '{oci_target}': {e}")
# increment stats counter
stats["miss_positive" if ret["up-to-date"] else "miss_negative"] += 1
# stamp it with milliseconds since epoch
ret["cache_timestamp"] = datetime.datetime.now().timestamp()
# write to cache, positive or negative.
cache_file = cache_file_positive if ret["up-to-date"] else cache_file_negative
with open(cache_file, "w") as f:
f.write(json.dumps(ret, indent=4, sort_keys=True))
return ret
# read the targets.json file passed as first argument as a json object
with open(sys.argv[1]) as f:
targets = json.load(f)
# Second argument is CHECK_OCI=yes/no, default no
check_oci = sys.argv[2] == "yes" if len(sys.argv) > 2 else False
# massage the targets into their full info invocations (sans-command)
uptodate_artifacts = []
oci_target_map = {}
for target in targets:
if not target["config_ok"]:
log.warning(f"Failed config up-to-date check target, ignoring: '{target}'")
# @TODO this probably should be a showstopper
continue
oci_target = target["out"]["artifact_full_oci_target"]
if oci_target in oci_target_map:
log.warning("Duplicate oci_target: {oci_target}")
continue
oci_target_map[oci_target] = target
# run through the targets and see if they are up-to-date.
oci_infos = []
for oci_target in oci_target_map:
orig_target = oci_target_map[oci_target]
orig_target["oci"] = {}
orig_target["oci"] = check_oci_up_to_date_cache(oci_target, check_oci)
oci_infos.append(orig_target)
# Go, Copilot!
log.info(
f"OCI cache stats 1: lookups={stats['lookups']} skipped={stats['skipped']} hits={stats['hits']} misses={stats['misses']} hits_positive={stats['hits_positive']} hits_negative={stats['hits_negative']} late_misses={stats['late_misses']} miss_positive={stats['miss_positive']} miss_negative={stats['miss_negative']}")
log.info(
f"OCI cache stats 2: hit_pct={stats['hits'] / stats['lookups'] * 100:.2f}% miss_pct={stats['misses'] / stats['lookups'] * 100:.2f}% late_miss_pct={stats['late_misses'] / stats['lookups'] * 100:.2f}%")
print(json.dumps(oci_infos, indent=4, sort_keys=True))

View File

@@ -0,0 +1,142 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("outdated-artifact-image-reducer")
# read the outdated-artifacts json file passed as first argument as a json object
with open(sys.argv[1]) as f:
artifacts = json.load(f)
# read the full images info json file passed as second argument as a json object
with open(sys.argv[2]) as f:
images = json.load(f)
# give an id to each artifact, store in dict (which is also an output)
artifacts_by_id = {}
counter = 1
for artifact in artifacts:
# id is the counter left-padded with zeros to 10 digits
artifact["id"] = str(counter).zfill(10)
counter = counter + 1
artifacts_by_id[artifact["id"]] = artifact
# lets find artifacts that have the same name, but different versions
tags_by_oci_name: dict[str, list] = {}
for artifact in artifacts:
artifact_full_oci_target = artifact["out"]["artifact_full_oci_target"]
# split at the colon (:)
oci_split = artifact_full_oci_target.split(":")
oci_name = oci_split[0]
oci_tag = oci_split[1]
log.debug(f"OCI name '{oci_name}' has tag '{oci_tag}'")
# add it to the dict
if oci_name not in tags_by_oci_name:
tags_by_oci_name[oci_name] = []
tags_by_oci_name[oci_name].append(oci_tag)
# loop over the dict, and warn if any have more than one instance
for oci_name in tags_by_oci_name:
tags = tags_by_oci_name[oci_name]
if len(tags) > 1:
list_tags_escaped_quoted = ', '.join([f"'{tag}'" for tag in tags])
log.warning(
f"Artifact '{oci_name}' has {len(tags)} different tags: {list_tags_escaped_quoted} - this is certainly a problem, go fix the artifact.")
# map images to in.target_id
images_by_target_id = {}
for image in images:
if "config_ok" not in image or not image["config_ok"]:
log.warning(f"Image {image['in']['target_id']} did not config OK, skipping")
continue
if "out" not in image:
log.warning(f"Image {image['in']['target_id']} has no out field, skipping")
continue
if "IMAGE_FILE_ID" not in image["out"]:
log.warning(f"Image {image['in']['target_id']} has no IMAGE_FILE_ID field, skipping")
continue
image["image_file_id"] = image["out"]["IMAGE_FILE_ID"]
images_by_target_id[image["in"]["target_id"]] = image
# map artifacts to in.wanted_by_targets array
artifacts_by_target_id = {}
for artifact in artifacts:
# optional: if the artifact is up-to-date, skip?
artifact_wanted_targets = artifact["in"]["wanted_by_targets"]
for linked_to_target in artifact_wanted_targets:
if linked_to_target not in artifacts_by_target_id:
artifacts_by_target_id[linked_to_target] = []
artifacts_by_target_id[linked_to_target].append(artifact)
artifacts_by_artifact_name = {}
for artifact in artifacts:
if artifact["in"]["artifact_name"] not in artifacts_by_artifact_name:
artifacts_by_artifact_name[artifact["in"]["artifact_name"]] = []
artifacts_by_artifact_name[artifact["in"]["artifact_name"]].append(artifact["id"])
outdated_artifacts_by_artifact_name = {}
for artifact in artifacts:
if artifact["oci"]["up-to-date"]:
continue
if artifact["in"]["artifact_name"] not in outdated_artifacts_by_artifact_name:
outdated_artifacts_by_artifact_name[artifact["in"]["artifact_name"]] = []
outdated_artifacts_by_artifact_name[artifact["in"]["artifact_name"]].append(artifact["id"])
images_with_artifacts = {}
for target_id, image in images_by_target_id.items():
# skip the images with such pipeline config. only their artifacts are relevant.
if "pipeline" in image["in"]:
if "build-image" in image["in"]["pipeline"]:
if not image["in"]["pipeline"]["build-image"]:
log.warning(f"Image {image['in']['target_id']} has a pipeline build-image false, skipping")
continue
else:
log.warning(f"Image {image['in']['target_id']} has a pipeline build-image true, processing")
if target_id not in artifacts_by_target_id:
continue
image_artifacts = artifacts_by_target_id[target_id]
image["artifact_ids"] = []
for artifact in artifacts_by_target_id[target_id]:
image["artifact_ids"].append(artifact["id"])
image["outdated_artifacts_count"] = 0
image["outdated_artifact_ids"] = []
for artifact in image_artifacts:
if not artifact["oci"]["up-to-date"]:
image["outdated_artifact_ids"].append(artifact["id"])
image["outdated_artifacts_count"] = image["outdated_artifacts_count"] + 1
images_with_artifacts[target_id] = image
# images with outdated artifacts
images_with_outdated_artifacts = []
for target_id, image in images_with_artifacts.items():
if image["outdated_artifacts_count"] > 0:
images_with_outdated_artifacts.append(target_id)
result = {"images": images_with_artifacts, "artifacts": artifacts_by_id,
"artifacts_by_artifact_name": artifacts_by_artifact_name,
"outdated_artifacts_by_artifact_name": outdated_artifacts_by_artifact_name,
"images_with_outdated_artifacts": images_with_outdated_artifacts}
print(json.dumps(result, indent=4, sort_keys=False))

View File

@@ -0,0 +1,151 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("output-gha-matrix")
def generate_matrix_images(info) -> list[dict]:
# each image
matrix = []
for image_id in info["images"]:
image = info["images"][image_id]
if armbian_utils.get_from_env("IMAGES_ONLY_OUTDATED_ARTIFACTS") == "yes":
skip = image["outdated_artifacts_count"] == 0
if skip:
log.info(f"Skipping image {image_id} because it has no outdated artifacts")
continue
if armbian_utils.get_from_env("SKIP_IMAGES") == "yes":
log.warning(f"Skipping image {image_id} because SKIP_IMAGES=yes")
continue
desc = f"{image['image_file_id']} {image_id}"
runs_on = "ubuntu-latest"
image_arch = image['out']['ARCH']
if image_arch in ["arm64"]: # , "armhf"
runs_on = ["self-hosted", "Linux", 'armbian', f"image-{image_arch}"]
inputs = image['in']
cmds = (armbian_utils.map_to_armbian_params(inputs["vars"]) + inputs["configs"]) # image build is "build" command, omitted here
invocation = " ".join(cmds)
item = {"desc": desc, "runs_on": runs_on, "invocation": invocation}
matrix.append(item)
return matrix
def generate_matrix_artifacts(info):
# each artifact
matrix = []
for artifact_id in info["artifacts"]:
artifact = info["artifacts"][artifact_id]
skip = not not artifact["oci"]["up-to-date"]
if skip:
continue
artifact_name = artifact['in']['artifact_name']
desc = f"{artifact['out']['artifact_final_file_basename']}"
# runs_in = ["self-hosted", "Linux", 'armbian', f"artifact-{artifact_name}"]
runs_on = "ubuntu-latest"
# @TODO: externalize this logic.
# rootfs's fo arm64 are built on self-hosted runners tagged with "rootfs-<arch>"
if artifact_name in ["rootfs"]:
rootfs_arch = artifact['in']['inputs']['ARCH'] # @TODO we should resolve arch _much_ ealier in the pipeline and make it standard
if rootfs_arch in ["arm64"]: # (future: add armhf)
runs_on = ["self-hosted", "Linux", 'armbian', f"rootfs-{rootfs_arch}"]
# all kernels are built on self-hosted runners.
if artifact_name in ["kernel"]:
runs_on = ["self-hosted", "Linux", 'armbian', f"artifact-{artifact_name}"]
inputs = artifact['in']['original_inputs']
cmds = (["artifact"] + armbian_utils.map_to_armbian_params(inputs["vars"]) + inputs["configs"])
invocation = " ".join(cmds)
item = {"desc": desc, "runs_on": runs_on, "invocation": invocation}
matrix.append(item)
return matrix
# generate images or artifacts?
type_gen = sys.argv[1]
# read the outdated artifacts+imaes json file passed as first argument as a json object
with open(sys.argv[2]) as f:
info = json.load(f)
matrix = None
if type_gen == "artifacts":
matrix = generate_matrix_artifacts(info)
elif type_gen == "images":
matrix = generate_matrix_images(info)
else:
log.error(f"Unknown type: {type_gen}")
sys.exit(1)
# third argument is the number of chunks wanted.
ideal_chunk_size = 150
max_chunk_size = 250
# check is sys.argv[3] exists...
if len(sys.argv) >= 4:
num_chunks = int(sys.argv[3])
else:
log.warning(f"Number of chunks not specified. Calculating automatically, matrix: {len(matrix)} chunk ideal: {ideal_chunk_size}.")
# calculate num_chunks by dividing the matrix size by the ideal chunk size, and rounding always up.
num_chunks = int(len(matrix) / ideal_chunk_size) + 1
log.warning(f"Number of chunks: {num_chunks}")
# distribute the matrix items equally along the chunks. try to keep every chunk the same size.
chunks = []
for i in range(num_chunks):
chunks.append([])
for i, item in enumerate(matrix):
chunks[i % num_chunks].append(item)
# ensure chunks are not too big
for i, chunk in enumerate(chunks):
if len(chunk) > ideal_chunk_size:
log.warning(f"Chunk '{i + 1}' is bigger than ideal: {len(chunk)}")
if len(chunk) > max_chunk_size:
log.error(f"Chunk '{i + 1}' is too big: {len(chunk)}")
sys.exit(1)
# Ensure matrix is sane...
if len(chunk) == 0:
log.warning(f"Chunk '{i + 1}' for '{type_gen}' is empty, adding fake invocation.")
chunks[i] = [{"desc": "Fake matrix element so matrix is not empty", "runs_on": "ubuntu-latest", "invocation": "none", "really": "no"}]
else:
for item in chunk:
item["really"] = "yes"
# massage the chunks so they're objects with "include" key, the way GHA likes it.
all_chunks = {}
for i, chunk in enumerate(chunks):
log.info(f"Chunk {i + 1} has {len(chunk)} elements.")
all_chunks[f"chunk{i + 1}"] = {"include": chunk}
print(json.dumps(all_chunks))

View File

@@ -0,0 +1,222 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import json
import logging
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
from common import gha
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("output-gha-matrix")
class BuildJob(gha.BaseWorkflowJob):
def __init__(self, id: str, name: str):
super().__init__(id, name)
self.add_default_envs()
self.add_ghcr_login_step()
def add_default_envs(self):
self.envs["OCI_TARGET_BASE"] = "ghcr.io/${{ github.repository }}/" # This is picked up by the Docker launcher automatically
self.envs["DOCKER_ARMBIAN_BASE_COORDINATE_PREFIX"] = "ghcr.io/${{ github.repository }}:armbian-next-" # Use Docker image in same repo
self.envs[
"DOCKER_SKIP_UPDATE"] = "yes" # Do not apt update/install/requirements/etc during Dockerfile build, trust DOCKER_ARMBIAN_BASE_COORDINATE_PREFIX's images are up-to-date
def add_ghcr_login_step(self):
# Login to ghcr.io, we're gonna do a lot of OCI lookups.
login_step = self.add_step("docker-login-ghcr", "Docker Login to GitHub Container Registry")
login_step.uses = "docker/login-action@v2"
login_step.withs["registry"] = "ghcr.io"
login_step.withs["username"] = "${{ github.repository_owner }}" # GitHub username or org
login_step.withs["password"] = "${{ secrets.GITHUB_TOKEN }}" # GitHub actions builtin token. repo has to have pkg access.
# # Login to ghcr.io, we're gonna do a lot of OCI lookups.
# - name: Docker Login to GitHub Container Registry
# uses: docker/login-action@v2
# with:
# registry: ghcr.io
# username: ${{ github.repository_owner }} # GitHub username or org
# password: ${{ secrets.GITHUB_TOKEN }} # GitHub actions builtin token. repo has to have pkg access.
class ArtifactJob(BuildJob):
def __init__(self, id: str, name: str):
super().__init__(id, name)
class ImageJob(BuildJob):
def __init__(self, id: str, name: str):
super().__init__(id, name)
class PrepareJob(BuildJob):
def __init__(self, id: str, name: str):
super().__init__(id, name)
def add_initial_checkout(self):
# Checkout the build repo
checkout_step = self.add_step("checkout-build-repo", "Checkout build repo")
checkout_step.uses = "actions/checkout@v3"
checkout_step.withs["repository"] = "${{ github.repository_owner }}/armbian-build"
checkout_step.withs["ref"] = "extensions"
checkout_step.withs["fetch-depth"] = 1
checkout_step.withs["clean"] = "false"
# Now grab the SHA1 from the checked out copy
grab_sha1_step = self.add_step("git-info", "Grab SHA1")
grab_sha1_step.run = 'echo "sha1=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT'
self.add_job_output_from_step(grab_sha1_step, "sha1")
def add_cache_restore_step(self):
# Restore the cache
restore_cache_step = self.add_step("restore-cache", "Restore cache")
restore_cache_step.uses = "actions/cache@v3"
restore_cache_step.withs["path"] = "cache/memoize\ncache/oci/positive"
restore_cache_step.withs["key"] = '${{ runner.os }}-cache-${{ github.sha }}-${{ steps.git-info.outputs.sha1 }}'
restore_cache_step.withs["restore-keys"] = '${{ runner.os }}-matrix-cache-'
def add_cache_chown_step(self):
# chown the cache back to normal user
chown_cache_step = self.add_step("chown-cache", "Chown cache")
chown_cache_step.run = 'sudo chown -R $USER:$USER cache/memoize cache/oci/positive'
def prepare_gh_releases_step(self):
# @TODO this is outdated, needs replacement. Also it deletes the release if it already exists, which is not what we want. Might be necessary to move the tag.
gh_releases_step = self.add_step("gh-releases", "Prepare GitHub Releases")
gh_releases_step.uses = "marvinpinto/action-automatic-releases@latest"
gh_releases_step.withs["repo_token"] = "${{ secrets.GITHUB_TOKEN }}"
gh_releases_step.withs["automatic_release_tag"] = "latest-images"
gh_releases_step.withs["prerelease"] = "false"
gh_releases_step.withs["title"] = "Latest images"
# read the outdated artifacts+imaes json file passed as first argument as a json object
with open(sys.argv[1]) as f:
info = json.load(f)
# Create a WorkflowFactory
wfFactory: gha.WorkflowFactory = gha.WorkflowFactory()
# Create prepare job
pJob: PrepareJob = PrepareJob(f"prepare", f"prepare all")
pJob.set_runs_on(["self-hosted", "Linux", 'armbian', "matrix-prepare"]) # @TODO: de-hardcode?
pJob.add_initial_checkout()
pJob.add_cache_restore_step()
pJobUpToDateStep = pJob.add_step(f"check-up-to-date", f"Check up to date")
pJobUpToDateStep.run = f'rm -rfv output/info; bash ./compile.sh workflow rpardini-generic # DEBUG=yes'
# The outputs are added later, for each artifact.
pJob.add_cache_chown_step()
pJob.prepare_gh_releases_step()
wfFactory.add_job(pJob)
all_artifact_jobs = {}
u2date_artifact_outputs = {}
for artifact_id in info["artifacts"]:
artifact = info["artifacts"][artifact_id]
skip = not not artifact["oci"]["up-to-date"]
# if skip:
# continue
artifact_name = artifact['in']['artifact_name']
# desc = f"{artifact['out']['artifact_final_file_basename']}"
desc = f"{artifact['out']['artifact_name']}"
# runs_in = ["self-hosted", "Linux", 'armbian', f"artifact-{artifact_name}"]
runs_on = "ubuntu-latest"
# @TODO: externalize this logic.
# rootfs's fo arm64 are built on self-hosted runners tagged with "rootfs-<arch>"
if artifact_name in ["rootfs"]:
rootfs_arch = artifact['in']['inputs']['ARCH'] # @TODO we should resolve arch _much_ ealier in the pipeline and make it standard
if rootfs_arch in ["arm64"]: # (future: add armhf)
runs_on = ["self-hosted", "Linux", 'armbian', f"rootfs-{rootfs_arch}"]
# all kernels are built on self-hosted runners.
if artifact_name in ["kernel"]:
runs_on = ["self-hosted", "Linux", 'armbian', f"artifact-{artifact_name}"]
inputs = artifact['in']['original_inputs']
cmds = (["artifact"] + armbian_utils.map_to_armbian_params(inputs["vars"]) + inputs["configs"])
invocation = " ".join(cmds)
item = {"desc": desc, "runs_on": runs_on, "invocation": invocation}
aJob: ArtifactJob = ArtifactJob(f"artifact-{artifact_id}", f"{desc}")
aJob.set_runs_on(runs_on)
build_step = aJob.add_step(f"build-artifact", f"Build artifact {desc}")
build_step.run = f'echo "fake artifact: {invocation}"'
# Add output to prepare job... & set the GHA output, right here. Hey us, it's us from the future. We're so smart.
# write to a github actions output variable. use the filesystem.
gha.set_gha_output(f"u2d-{artifact_id}", ("yes" if skip else "no"))
output: gha.WorkflowJobOutput = pJob.add_job_output_from_step(pJobUpToDateStep, f"u2d-{artifact_id}")
input: gha.WorkflowJobInput = aJob.add_job_input_from_needed_job_output(output)
aJob.add_condition_from_input(input, "== 'no'")
u2date_output: gha.WorkflowJobOutput = aJob.add_job_output_from_input(f"up-to-date-artifact", input)
all_artifact_jobs[artifact_id] = aJob
u2date_artifact_outputs[artifact_id] = u2date_output
wfFactory.add_job(aJob)
# Ok now the images...
for image_id in info["images"]:
image = info["images"][image_id]
# skip = image["outdated_artifacts_count"] == 0
# if skip:
# continue
desc = f"{image['image_file_id']} {image_id}"
runs_on = "ubuntu-latest"
image_arch = image['out']['ARCH']
if image_arch in ["arm64"]: # , "armhf"
runs_on = ["self-hosted", "Linux", 'armbian', f"image-{image_arch}"]
inputs = image['in']
cmds = (armbian_utils.map_to_armbian_params(inputs["vars"]) + inputs["configs"]) # image build is "build" command, omitted here
invocation = " ".join(cmds)
iJob: ImageJob = ImageJob(f"image-{image_id}", f"{desc}")
iJob.set_runs_on(runs_on)
build_step = iJob.add_step(f"build-image", f"Build image {desc}")
build_step.run = f'echo "fake image: {invocation}"'
# Make it use the outputs from the artifacts needed for this image
for artifact_id in image["artifact_ids"]:
log.info(f"Image {image_id} wants artifact {artifact_id}")
aJob = all_artifact_jobs[artifact_id]
aJobU2dOutput = u2date_artifact_outputs[artifact_id]
u2dinput = iJob.add_job_input_from_needed_job_output(aJobU2dOutput)
iJob.add_condition_from_input(u2dinput, "== 'no'")
wfFactory.add_job(iJob)
# Convert gha_workflow to YAML
gha_workflow_yaml = armbian_utils.to_yaml(wfFactory.render_yaml())
# Write the YAML the target file
with open(sys.argv[2], "w") as f:
f.write(gha_workflow_yaml)

View File

@@ -0,0 +1,135 @@
#!/usr/bin/env python3
#
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2023 Ricardo Pardini <ricardo@pardini.net>
# This file is a part of the Armbian Build Framework https://github.com/armbian/build/
#
import json
import logging
import os
import sys
import yaml
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from common import armbian_utils
# Prepare logging
armbian_utils.setup_logging()
log: logging.Logger = logging.getLogger("targets-compositor")
# if targets.yaml is not present, process the board inventory:
# - if userpatched boards present, only include those, in all branches. use a fixed RELEASE.
# - if no userpatched boards present, include all core boards, in all branches. use a fixed RELEASE.
# if targets.yaml is present, process it. load the templates, the items in each, and produce a list of invocations to build.
# get the first argv, which is the board inventory file.
board_inventory_file = sys.argv[1]
# read it as json, modern way
with open(board_inventory_file, 'r') as f:
board_inventory = json.load(f)
# Lets resolve the all-boards-all-branches list
all_boards_all_branches = []
boards_by_support_level_and_branches = {}
not_eos_boards_all_branches = []
for board in board_inventory:
for branch in board_inventory[board]["BOARD_POSSIBLE_BRANCHES"]:
all_boards_all_branches.append({"BOARD": board, "BRANCH": branch})
if board_inventory[board]["BOARD_SUPPORT_LEVEL"] not in boards_by_support_level_and_branches:
boards_by_support_level_and_branches[board_inventory[board]["BOARD_SUPPORT_LEVEL"]] = []
boards_by_support_level_and_branches[board_inventory[board]["BOARD_SUPPORT_LEVEL"]].append({"BOARD": board, "BRANCH": branch})
if board_inventory[board]["BOARD_SUPPORT_LEVEL"] != "eos":
not_eos_boards_all_branches.append({"BOARD": board, "BRANCH": branch})
# get the third argv, which is the targets.yaml file.
targets_yaml_file = sys.argv[3]
# read it as yaml, modern way
with open(targets_yaml_file, 'r') as f:
targets = yaml.load(f, Loader=yaml.FullLoader)
# Keep a running of all the invocations we want to make.
invocations_dict: list[dict] = []
# Loop over targets
for target_name in targets["targets"]:
target_obj = targets["targets"][target_name]
all_items = []
all_expansions = []
if "expand" in target_obj:
for one_expand_name in target_obj["expand"]:
one_expand = target_obj["expand"][one_expand_name]
one_expansion = {"vars": {}, "configs": (target_obj["configs"] if "configs" in target_obj else []),
"pipeline": (target_obj["pipeline"] if "pipeline" in target_obj else {})}
one_expansion["vars"].update(target_obj["vars"])
one_expansion["vars"].update(one_expand)
all_expansions.append(one_expansion)
else: # single expansion with the vars
one_expansion = {"vars": {}, "configs": (target_obj["configs"] if "configs" in target_obj else []),
"pipeline": (target_obj["pipeline"] if "pipeline" in target_obj else {})}
one_expansion["vars"].update(target_obj["vars"])
all_expansions.append(one_expansion)
# loop over the items, which can themselves be lists
if "items" in target_obj:
for item in target_obj["items"]:
if isinstance(item, list):
for item_item in item:
all_items.append(item_item)
else:
all_items.append(item)
# Now add to all_items by resolving the "items-from-inventory" key
if "items-from-inventory" in target_obj:
# loop over the keys
for key in target_obj["items-from-inventory"]:
to_add = []
if key == "all":
to_add.extend(all_boards_all_branches)
elif key == "not-eos":
to_add.extend(not_eos_boards_all_branches)
else:
to_add.extend(boards_by_support_level_and_branches[key])
log.info(f"Adding '{key}' from inventory to target '{target_name}': {len(to_add)} targets")
all_items.extend(to_add)
for one_expansion in all_expansions:
# loop over the items
for item in all_items:
one_invocation_vars = {}
one_invocation_vars.update(one_expansion["vars"])
one_invocation_vars.update(item)
expanded = {"vars": one_invocation_vars, "configs": one_expansion["configs"], "pipeline": one_expansion["pipeline"]}
invocations_dict.append(expanded)
# de-duplicate invocations_dict
invocations_unique = {}
for invocation in invocations_dict:
invocation_key = json.dumps(invocation, sort_keys=True) # this sorts the keys, so that the order of the keys doesn't matter. also, heavy.
invocations_unique[invocation_key] = invocation
log.info(
f"Generated {len(invocations_dict)} invocations from {len(targets['targets'])} target groups, de-duped to {len(invocations_unique)} invocations.")
if len(invocations_dict) != len(invocations_unique):
log.warning(f"Duplicate invocations found, de-duped from {len(invocations_dict)} to {len(invocations_unique)}")
all_invocations = list(invocations_unique.values())
counter = 1
for one_invocation in all_invocations:
# target_id is the counter left-padded with zeros to 10 digits, plus the total number of invocations, left-padded with zeros to 10 digits.
one_invocation["target_id"] = f"{counter:010d}" + f"{len(all_invocations):010d}"
counter += 1
# dump invocation list as json
invocations_json = json.dumps(all_invocations, indent=4, sort_keys=True)
print(invocations_json)
# enough
sys.exit(0)