armbian-next: json-info: cleanup info/json/csv/opensearch, add logging, add some very basic instructions; add Kibana dashboards & docker-compose to bring it OS+Kibana up

This commit is contained in:
Ricardo Pardini
2023-01-30 16:51:54 +01:00
parent e3a0f949e1
commit 1fc13a57a7
6 changed files with 104 additions and 30 deletions

View File

@@ -6,12 +6,28 @@ function cli_json_info_pre_run() {
function cli_json_info_run() {
display_alert "Generating JSON info" "for all boards; wait" "info"
obtain_and_check_host_release_and_arch # sets HOSTRELEASE
prepare_python_and_pip # requires HOSTRELEASE
prep_conf_main_minimal_ni
# The info extractor itself...
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${SRC}"/lib/tools/info.py ">" "${SRC}/output/info.json"
function json_info_only() {
prepare_python_and_pip # requires HOSTRELEASE
# The info extractor itself...
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${SRC}"/lib/tools/info.py ">" "${SRC}/output/info.json"
# Also convert output to CSV for easy import into Google Sheets etc
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${SRC}"/lib/tools/json2csv.py "<" "${SRC}/output/info.json" ">" "${SRC}/output/info.csv"
}
do_with_default_build do_with_logging json_info_only
display_alert "JSON info generated" "in output/info.json" "info"
display_alert "CSV info generated" "in output/info.csv" "info"
display_alert "To load the OpenSearch dashboards:" "
pip3 install opensearch-py # install needed lib to talk to OS
docker-compose --file tools/dashboards/docker-compose-opensearch.yaml up -d # start up OS in docker-compose
python3 lib/tools/index-opensearch.py < output/info.json # index the info.json into OS
# go check out http://localhost:5601
docker-compose --file tools/dashboards/docker-compose-opensearch.yaml down # shut down OS when you're done
" "info"
# Also convert output to CSV for easy import into Google Sheets etc
run_host_command_logged "${PYTHON3_VARS[@]}" "${PYTHON3_INFO[BIN]}" "${SRC}"/lib/tools/json2csv.py "<" "${SRC}/output/info.json" ">" "${SRC}/output/info.csv"
}

View File

@@ -2,20 +2,15 @@
import json
import sys
from opensearchpy import OpenSearch # pip install opensearch-py
from opensearchpy import OpenSearch # pip3 install opensearch-py
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# info = get_info_for_one_board(board, all_params)
print(json.dumps({}, indent=4, sort_keys=True))
eprint("Hello")
# Read JSON from stdin
# - should be array of objects
# - should be an array of objects
# - loop over array and index each obj into OS in to the passed index
# read_from_stdin = sys.stdin.read()
@@ -23,8 +18,8 @@ json_object = json.load(sys.stdin)
eprint("Loaded {} objects from stdin...".format(len(json_object)))
host = '192.168.66.55'
port = 31920
host = '127.0.0.1'
port = 9200
# Create the OpenSearch client.
client = OpenSearch(hosts=[{'host': host, 'port': port}], http_compress=False, use_ssl=False)
@@ -35,18 +30,21 @@ index_body = {'settings': {'index': {'number_of_shards': 1, 'number_of_replicas'
# Delete the index; remove old data.
try:
response = client.indices.delete(index=index_name)
print('\nDeleting index:')
print(response)
delete_response = client.indices.delete(index=index_name)
eprint('\nDeleting index...')
# print(delete_response)
except:
eprint("Failed to delete index {}".format(index_name))
response = client.indices.create(index_name, body=index_body)
print('\nCreating index:')
print(response)
eprint('\nCreating index...')
response_create = client.indices.create(index_name, body=index_body)
# print(response_create)
for obj in json_object:
# print(obj)
response = client.index(index=index_name, body=obj, refresh=True)
print('\nAdding document:')
print(response)
response = client.index(index=index_name, body=obj)
eprint("\nRefreshing index...")
client.indices.refresh(index=index_name)
eprint("\nDone.")

View File

@@ -112,13 +112,13 @@ def run_armbian_compile_and_parse(path_to_compile_sh, armbian_src_path, compile_
# Find the location of compile.sh, relative to this Python script.
this_script_full_path = os.path.realpath(__file__)
eprint("Real path to this script", this_script_full_path)
# eprint("Real path to this script", this_script_full_path)
armbian_src_path = os.path.realpath(os.path.join(os.path.dirname(this_script_full_path), "..", ".."))
eprint("Real path to Armbian SRC", armbian_src_path)
# eprint("Real path to Armbian SRC", armbian_src_path)
compile_sh_full_path = os.path.realpath(os.path.join(armbian_src_path, "compile.sh"))
eprint("Real path to compile.sh", compile_sh_full_path)
# eprint("Real path to compile.sh", compile_sh_full_path)
# Make sure it exists
if not os.path.exists(compile_sh_full_path):
@@ -213,7 +213,7 @@ if True:
board_info, possible_branch)
every_future.append(future)
eprint("Waiting for all futures...")
eprint(f"Waiting for all {len(every_future)} configurations to be computed... this might take a long time.")
executor.shutdown(wait=True)
eprint("Done, all futures awaited")

View File

@@ -51,7 +51,7 @@ for column in columns:
if len(set(values)) == 1:
columns_to_remove.append(column)
eprint("columns with all-identical values: {}: '{}'".format(len(columns_to_remove), columns_to_remove))
# eprint("columns with all-identical values: {}: '{}'".format(len(columns_to_remove), columns_to_remove))
# Now actually filter columns, removing columns_to_remove
columns = [column for column in columns if column not in columns_to_remove]
@@ -64,4 +64,4 @@ writer.writeheader()
for obj in flat:
writer.writerow(obj)
eprint("Done writing to stdout.")
eprint("Done writing CSV to stdout.")

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,43 @@
version: '3'
services:
opensearch-node1:
image: ghcr.io/rpardini/opensearch-minimal-multiarch:1.3
container_name: opensearch-node1
environment:
- cluster.name=opensearch-cluster
- node.name=opensearch-node1
- discovery.seed_hosts=opensearch-node1
- cluster.initial_master_nodes=opensearch-node1
- bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
- "OPENSEARCH_JAVA_OPTS=-Xms2048m -Xmx2048m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536 # maximum number of open files for the OpenSearch user, set to at least 65536 on modern systems
hard: 65536
volumes:
- opensearch-data1:/usr/share/opensearch/data
ports:
- 9200:9200
- 9300:9300
networks:
- opensearch-net
opensearch-dashboards:
image: ghcr.io/rpardini/opensearch-dashboards-minimal-multiarch:1.3
container_name: opensearch-dashboards
ports:
- 5601:5601
expose:
- "5601"
environment:
OPENSEARCH_HOSTS: '["http://opensearch-node1:9200"]'
networks:
- opensearch-net
volumes:
opensearch-data1:
networks:
opensearch-net: