From 534f8f81cc818ebcf65566191e2412653978df3b Mon Sep 17 00:00:00 2001 From: Edwin Joassart Date: Fri, 11 Nov 2022 11:22:02 +0100 Subject: [PATCH] first working draft of .etch / preloading lib w/ forked gunzip-maybe and @types/gunzip-maybe so we don't need esinterop change-type: minor --- lib/dotetch-preloading/appsJson.ts | 79 ++++ lib/dotetch-preloading/baseImage.ts | 31 ++ lib/dotetch-preloading/digestStream.ts | 30 ++ lib/dotetch-preloading/docker-parse-image.ts | 47 +++ lib/dotetch-preloading/expandImg.mjs | 170 ++++++++ lib/dotetch-preloading/images.ts | 23 + lib/dotetch-preloading/index.ts | 3 + lib/dotetch-preloading/interface-manifest.ts | 126 ++++++ lib/dotetch-preloading/layers.ts | 364 ++++++++++++++++ lib/dotetch-preloading/packer.ts | 42 ++ lib/dotetch-preloading/registry.ts | 332 +++++++++++++++ lib/dotetch-preloading/repositoriesjson.ts | 64 +++ .../streamPreloadingAssets.ts | 161 +++++++ lib/dotetch-preloading/supervisor.ts | 61 +++ lib/index.ts | 6 +- package-lock.json | 394 +++++++++++++++++- package.json | 5 + tsconfig.json | 31 +- typings/gunzip-maybe/index.d.ts | 1 + 19 files changed, 1941 insertions(+), 29 deletions(-) create mode 100644 lib/dotetch-preloading/appsJson.ts create mode 100644 lib/dotetch-preloading/baseImage.ts create mode 100644 lib/dotetch-preloading/digestStream.ts create mode 100644 lib/dotetch-preloading/docker-parse-image.ts create mode 100644 lib/dotetch-preloading/expandImg.mjs create mode 100644 lib/dotetch-preloading/images.ts create mode 100644 lib/dotetch-preloading/index.ts create mode 100644 lib/dotetch-preloading/interface-manifest.ts create mode 100644 lib/dotetch-preloading/layers.ts create mode 100644 lib/dotetch-preloading/packer.ts create mode 100644 lib/dotetch-preloading/registry.ts create mode 100644 lib/dotetch-preloading/repositoriesjson.ts create mode 100755 lib/dotetch-preloading/streamPreloadingAssets.ts create mode 100644 lib/dotetch-preloading/supervisor.ts create mode 100644 typings/gunzip-maybe/index.d.ts diff --git a/lib/dotetch-preloading/appsJson.ts b/lib/dotetch-preloading/appsJson.ts new file mode 100644 index 00000000..3ef37690 --- /dev/null +++ b/lib/dotetch-preloading/appsJson.ts @@ -0,0 +1,79 @@ +/** + * `Apps.json` is the file that will inform the supervisor of what's has been preloaded, which services should be started and with which config. + * + * `Apps.json` content is a subset of the `target state` for a device in a fleet running a given release. + * Once we have that target fleeet, we need to go down one level to `apps` and keep only that element. + * + * In Apps.json we have the list of all the images that makes up a release. + */ + +import axios from "axios" + +/** + * Derives Apps.json from target state obtained from the api + * + * This requires merge of https://github.com/balena-io/open-balena-api/pull/1081 in open-balena-api + * + * @param {string} app_id - app_id + * @param {string} release_id - release_id + * @param {string} app_id - app_id === fleet_id /!\ fleet_id != fleet_uuid + * @param {string} api - api server url + * @param {string} apiToken - token to access api + * @returns {json} - apps.json object + */ +const getAppsJson = async ({ app_id, api, token }: any) => { + const headers = { + headers: { + "Content-Type": "application/json", + Authorization: `${token}`, + }, + } + + try { + const appRes = await axios({ + url: `${api}/v6/application(${app_id})`, + ...headers, + }) + const uuid = appRes?.data?.d?.[0]?.uuid + console.log("GOT UUID ", uuid) + // if (!uuid) throw Error(`Error: can't find fleet (${app_id}`); + + const { data } = await axios({ + url: `${api}/device/v3/fleet/${uuid}/state`, + ...headers, + }) + + return data[uuid] // get down one level to transform the target state into a valid Apps.json + } catch (error) { + console.error("\n\n==> getAppsJson error:", error) + } +} + +/** + * Takes a apps.json and returns the list of images for an app & release. + * If apps_id and/or release_id is unkown it will return first. + * // TODO: return all instead of first when no app or release is specified. + */ +interface ImageIdsInput { + appsJson: any //TODO: get propertype for appsJson V3 +} + +interface Image { + image_name: string + image_hash: string +} + +const getImageIds = ({ appsJson }: ImageIdsInput): Image[] => { + //TODO: prepare for multiapps and loop on apps instead of getting only 1st + const appId = Object.keys(appsJson.apps)[0] + const releaseId = Object.keys(appsJson.apps?.[appId]?.releases)[0] + console.log(`==> appId: ${appId} & releaseId: ${releaseId}`) + const imageKeys = Object.keys(appsJson.apps?.[appId]?.releases?.[releaseId]?.services) + const imageNames = imageKeys.map((key) => appsJson.apps?.[appId]?.releases?.[releaseId]?.services[key].image) + return imageNames.map((image) => { + const [image_name, image_hash] = image.split("@") + return { image_name, image_hash } + }) +} + +export { getAppsJson, getImageIds } diff --git a/lib/dotetch-preloading/baseImage.ts b/lib/dotetch-preloading/baseImage.ts new file mode 100644 index 00000000..724b49c2 --- /dev/null +++ b/lib/dotetch-preloading/baseImage.ts @@ -0,0 +1,31 @@ +/** + * Get the base image we're going to preload assets in (balenaos.img) + * */ + +interface StreamBaseImageIn { + pipeStreamFrom: NodeJS.ReadableStream + pipeStreamTo: NodeJS.WritableStream +} + +/** + * Awaitable pipe stream from input to output + */ +const streamBaseImage = ({ pipeStreamFrom, pipeStreamTo }: StreamBaseImageIn): Promise => + new Promise((resolve, reject) => { + console.log("== Start streaming base image (balenaOs) @streamBaseImage ==") + + pipeStreamFrom.pipe(pipeStreamTo) + + pipeStreamFrom.on("end", function () { + // we're good we can continue the process + console.log("== End of base image streaming (balenaOs) @streamBaseImage ==") + resolve(true) + }) + + pipeStreamFrom.on("error", function (error) { + // something went wrong + reject(error) + }) + }) + +export { streamBaseImage } diff --git a/lib/dotetch-preloading/digestStream.ts b/lib/dotetch-preloading/digestStream.ts new file mode 100644 index 00000000..cb05de91 --- /dev/null +++ b/lib/dotetch-preloading/digestStream.ts @@ -0,0 +1,30 @@ +/** + * minimal typescript reimplementation of https://github.com/jeffbski/digest-stream/blob/master/lib/digest-stream.js + * + * This will let a stream pass-thru then returns a sha256 hash + size of the content. + * + */ +import { Transform } from "stream"; +import { createHash } from "crypto"; + +const digestStream = (exfiltrate: Function): Transform => { + const digester = createHash("sha256"); + let length = 0; + + const hashThrough = new Transform({ + transform(chunk: Buffer, _, callback) { + digester.update(chunk); + length += chunk.length; + this.push(chunk); + callback(); + }, + }); + + hashThrough.on("end", () => { + exfiltrate(digester.digest("hex"), length); + }); + + return hashThrough; +}; + +export { digestStream }; diff --git a/lib/dotetch-preloading/docker-parse-image.ts b/lib/dotetch-preloading/docker-parse-image.ts new file mode 100644 index 00000000..1579f49e --- /dev/null +++ b/lib/dotetch-preloading/docker-parse-image.ts @@ -0,0 +1,47 @@ +/** + * Typescript version + * https://github.com/mafintosh/docker-parse-image/blob/master/index.js + */ + +export interface DockerParsedImage { + registry: string | null + namespace?: string | null + repository: string | null + tag?: string | null + name: string + fullname: string +} + +const dockerParseImage = (image: string): DockerParsedImage => { + const registryArray = image.split("/") + + let registry = registryArray[0] + let namespace = registryArray[1] + const repository = registryArray[2].split("@")[0] + let tag = registryArray[2].split("@")[1] + + if (!namespace && registry && !registry.includes(":") && !registry.includes(".")) { + namespace = registry + registry = "" + } + + registry = registry ? `${registry}` : "" + namespace = namespace && namespace !== "library" ? `${namespace}` : "" + tag = tag && tag !== "latest" ? `:${tag}` : "" + + const name = `${registry}${namespace}${repository}${tag}` + const fullname = `${registry}${namespace || "library/"}${repository}${tag || ":latest"}` + + const result = { + registry: registry || null, + namespace: namespace || null, + repository: repository || null, + tag: tag || null, + name, + fullname, + } + + return result +} + +export { dockerParseImage } diff --git a/lib/dotetch-preloading/expandImg.mjs b/lib/dotetch-preloading/expandImg.mjs new file mode 100644 index 00000000..003cb243 --- /dev/null +++ b/lib/dotetch-preloading/expandImg.mjs @@ -0,0 +1,170 @@ +import fs from 'fs'; +import path from 'path'; +import { spawn, spawnSync } from 'child_process'; +// import pLimit from 'p-limit'; +// import gunzip from "extract-zip"; +import gunzip from "gunzip-maybe" +import { promisify } from "util"; + +const IMAGES_BASE = path.resolve('images-base'); +const IMAGES_EXPANDED = path.resolve('images-expanded'); +console.log(`IMAGES_BASE: ${IMAGES_BASE}`); +console.log(`IMAGES_EXPANDED: ${IMAGES_EXPANDED}`); + +// In bytes: +const SECTOR_SIZE = 512 +const MBR_SIZE = 512 +const GPT_SIZE = SECTOR_SIZE * 34 +const MBR_BOOTSTRAP_CODE_SIZE = 446 + +/** + * createDDArgs + * dd helper + * @param {string} partitionTableDiskImage + * @param {string} nameImage + * @param {int} resizeMultiplier + * return {Array} argsList + * // obs is the output block size and ibs is the input block size. If you specify bs without ibs or obs this is used for both. + // Seek will just "inflate" the output file. + // Seek=7 means that at the beginning of the output file, + // 7 "empty" blocks with output block size=obs=4096bytes will be inserted. + // This is a way to create very big files quickly. + // Or to skip over data at the start which you do not want to alter. + // Empty blocks only result if the output file initially did not have that much data. + */ +function createDDArgs(inImageName, outImageName, resizeMultiplier, partitionStartBytes) { + const partitionTableLabel = 'GPT' || 'DOS'; + const argsListMore = {} + argsListMore.sizing = [`count=${MBR_BOOTSTRAP_CODE_SIZE}`, 'seek=5']; + if (partitionTableLabel === 'DOS') { + argsListMore.sizing = [ `skip=${MBR_SIZE}`, `seek=${MBR_SIZE}`, `count=${partitionStartBytes - MBR_SIZE}`]; + } + if (partitionTableLabel === 'GPT') { + argsListMore.sizing = [ `skip=${GPT_SIZE}`, `seek=${GPT_SIZE}`, `count=${partitionStartBytes - GPT_SIZE}`]; + } + console.log(partitionTableLabel,'partitionTableLabel', argsListMore.sizing, 'partitionStartBytes', partitionStartBytes); + + const argsList = [ + `if=${inImageName}`, + `of=${outImageName}`, + + `ibs=${1024 * resizeMultiplier}`, + // `bs=${resizeMultiplier}M`, // one MiB * resizeMultiplier + `obs=1024`, + 'conv=notrunc', + 'status=progress', + // `iflag=count_bytes, skip_bytes`, // count and skip in bytes + // `oflag=seek_bytes`// seek in bytes + ...argsListMore.sizing + ]; + return argsList; +} + +// fork() exec() spawn() spawnSync() +//https://github.com/adriano-di-giovanni/node-df/blob/master/lib/index.js +const getPartitions = async (image) => { + // const diskutilResults = await spawn('diskutil', ['list']); + // console.log('diskutil', await diskutilResults); + const partitions = spawn('df', ['-hkP'], { + // cwd: '/', + // windowsHide: true, + stdio: [ + /* Standard: stdin, stdout, stderr */ + // 'inherit', + 'ignore', + /* Custom: pipe:3, pipe:4, pipe:5 */ + 'pipe', process.stderr + ]}); + const partitionsResults = {partitions: [], partitionsLength: 0}; + + partitions.stdout.on('data', data => { + const parsedDf = parseDf(data); + // const strData = splitDf(data); + // partitionsResults.partitionArrayLength = strData.length; + // // console.log('strData.length', strData.length); + // const columnHeaders = strData.shift(); + // console.log('columnHeaders', columnHeaders); + // const formatted = formatDf(strData, columnHeaders); + partitionsResults.partitions = parsedDf.partitions; + partitionsResults.partitionsLength = parsedDf.partitionsLength; + return partitionsResults; + }); + + // partitions.stderr.on('data', data => { + // assert(false, 'NOPE stderr'); + // }); + + partitions.on('close', code => { + console.log('Child exited with', code, 'and stdout has been saved'); + console.log('partitionsResults', partitionsResults); + return partitionsResults; + }); + return partitionsResults; +} + +const parseDf = (data) => { + const strData = splitDf(data); + const columnHeaders = strData.shift(); + const formatted = formatDf(strData, columnHeaders); + return {partitions: formatted, partitionsLength: formatted.length}; +} + +const splitDf = (data) => { + return data.toString() + .replace(/ +(?= )/g,'') //replace multiple spaces between device parameters with one space + .split('\n') //split by newline + .map((line) => line.split(' ')); //split each device by one space +} + +const formatDf = (strData, columnHeaders) => { + return strData.map((devDisk) => { + const partitionObj = {}; + for ( const [index,value] of devDisk.entries()) { + partitionObj[columnHeaders[index]] = value + } + return partitionObj; + }); +} + +export const expandImg = async (img, partitionSizeStart = 1) => { + if (!img) { + throw new Error(`No img: "${img}"`); + } + const unzippedPath = `${IMAGES_BASE}/unzipped/` + if (img.includes("zip")) { + // await gunzip(img, {dir: `${IMAGES_BASE}/unzipped/`}); + await gunzip(`${IMAGES_BASE}/zipped/${img}`, {dir: unzippedPath}); + } + // else { + + // const diskutilResults = await spawn('diskutil', ['list']); + // console.log('diskutil', await diskutilResults); + const generateRandomName = Math.random().toString(36).substring(2, 15); + + const inImageName = `${unzippedPath}${img.split('.').slice(0, -1).join('.')}`; + const outImageName = `${IMAGES_EXPANDED}/${generateRandomName}.img`; + const argsList = await createDDArgs(inImageName, outImageName, 7, partitionSizeStart); + await spawn('dd', argsList, { + cwd: '/', + windowsHide: true, + stdio: [ + /* Standard: stdin, stdout, stderr */ + 'ignore', + /* Custom: pipe:3, pipe:4, pipe:5 */ + 'pipe', process.stderr + ]}); + return generateRandomName; +}; + +// strace dd if=/dev/disk5 of=./images-expanded/tuckers.img bs=4M conv=notrunc +// dd if=/dev/disk5 of=./images-expanded/tuckers.img bs=4M conv="notrunc" +// bs=4M + +const getImages = async () => { + const image = 'balena-cloud-preloaded-raspberrypi4-64-2022.1.1-v12.11.0.img.zip' + const {partitions, partitionsLength} = await getPartitions(image); + console.log('partitions', await partitions, 'partitionsLength', partitionsLength); + const imageName = await expandImg(image) + console.log('imageName', await imageName); +} +getImages() \ No newline at end of file diff --git a/lib/dotetch-preloading/images.ts b/lib/dotetch-preloading/images.ts new file mode 100644 index 00000000..f03a0961 --- /dev/null +++ b/lib/dotetch-preloading/images.ts @@ -0,0 +1,23 @@ +/** Prepare injectable files for all images */ + +const getImagesConfigurationFiles = (manifests: any) => { + const dockerImageOverlay2Imagedb = "docker/image/overlay2/imagedb" + console.log("MANIFESTS => ", manifests) + return manifests + .map(({ configManifestV2, imageId }: any) => { + const shortImage_id = imageId.split(":")[1] + return [ + { + header: { name: `${dockerImageOverlay2Imagedb}/content/sha256/${shortImage_id}`, mode: 644 }, + content: JSON.stringify(configManifestV2), + }, + { + header: { name: `${dockerImageOverlay2Imagedb}/metadata/sha256/${shortImage_id}/lastUpdated`, mode: 644 }, + content: new Date().toISOString(), + }, + ] + }) + .flat() +} + +export { getImagesConfigurationFiles } diff --git a/lib/dotetch-preloading/index.ts b/lib/dotetch-preloading/index.ts new file mode 100644 index 00000000..72d45a98 --- /dev/null +++ b/lib/dotetch-preloading/index.ts @@ -0,0 +1,3 @@ +import { streamPreloadingAssets } from "./streamPreloadingAssets"; + +export { streamPreloadingAssets }; diff --git a/lib/dotetch-preloading/interface-manifest.ts b/lib/dotetch-preloading/interface-manifest.ts new file mode 100644 index 00000000..58b41c3b --- /dev/null +++ b/lib/dotetch-preloading/interface-manifest.ts @@ -0,0 +1,126 @@ +export interface Manifests { + manifests: Manifest[] +} + +export interface Manifest { + schemaVersion: number + mediaType: string + config: ManifestConfig + layers: ManifestConfig[] +} + +export interface ManifestConfig { + mediaType: MediaType + size: number + digest: string +} + +export enum MediaType { + ApplicationVndDockerContainerImageV1JSON = "application/vnd.docker.container.image.v1+json", + ApplicationVndDockerImageRootfsDiffTarGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip", +} + +export interface ConfigManifestV2 { + architecture: string + config: ContainerConf + container: string + container_config: ContainerConf + created: string + docker_version: string + history: History[] + os: string + rootfs: Rootfs +} + +export interface ContainerConf { + Hostname: string + Domainname: string + User: string + AttachStdin: boolean + AttachStdout: boolean + AttachStderr: boolean + Tty: boolean + OpenStdin: boolean + StdinOnce: boolean + Env: string[] + Cmd: string[] + Image: string + Volumes: null + WorkingDir: string + Entrypoint: string[] + OnBuild: null + Labels: Labels +} + +export interface Labels { + "io.balena.architecture": string + "io.balena.device-type": string + "io.balena.qemu.version": string +} + +export interface History { + created: string + created_by: string + empty_layer?: boolean +} + +export interface Rootfs { + type: string + diff_ids: string[] +} + +export interface Manifest { + schemaVersion: number + mediaType: string + config: ManifestConfig + layers: ManifestConfig[] +} + +export interface ManifestConfig { + mediaType: MediaType + size: number + digest: string +} + +export interface ManifestsAll { + manifestInfosFromRegistry: ManifestInfosFromRegistry[] +} + +export interface ManifestInfosFromRegistry { + manifest: Manifest + digests: ManifestConfig[] + configDigest: string + configManifestV2: ConfigManifestV2 + imageId: string + imageName: string + diffIds: string[] + imageUrl: string + token: string +} + +export interface ManifestInfosRepos extends ManifestInfosFromRegistry { + imageHash: string + imageName: string + isSupervisor: boolean + supervisorVersion: string +} + +export interface Image { + imageName: string + imageHash: string +} + +export interface ImagesbaseAndPreload { + imageHash: string + imageName: string + isSupervisor?: boolean + supervisorVersion?: string +} + +export interface RepositoriesBase { + Repositories: Repositories +} + +export interface Repositories { + [key: string]: { [key: string]: string } +} diff --git a/lib/dotetch-preloading/layers.ts b/lib/dotetch-preloading/layers.ts new file mode 100644 index 00000000..686924e1 --- /dev/null +++ b/lib/dotetch-preloading/layers.ts @@ -0,0 +1,364 @@ +import * as crypto from "crypto" +import * as gunzip from "gunzip-maybe" +import { digestStream } from "./digestStream" +import { getUrls, getBlob } from "./registry" +import { inspect } from "util" +import * as tar from "tar-stream" +import { Pack, Headers } from "tar-stream" + +interface LayerMeta { + size: number + diff_id: string | null +} + +interface Layer { + diff_id: string + chain_id: string + isDuplicate: boolean + token: string + parent: string | null + link: string + lower?: string + size?: number + cache_id?: string +} + +interface LayerRaw { + size: number + digest: string +} + +/** + * Precompute _Layers_ array + * A flat array of objects representing all layer from all images for all the apps/releases to preload + * + * Object contains : + * - `token` used to auth to the registry + * - `diff_id` from the image config manifest + * - `chain_id` computed for the layer + * - `isDuplicate` boolean, true if an indentical layer has been found + * - `parent` chain id of the parent layer (if not first layer in the chain) + * - `link` 13 char all caps random string, unless it's a duplicated layer, in that case use the same link as the first encounter + * - `lower` chain of all links up to the topmost layer in the chain + * + * Note : here we precomputes almost all values we'll need later to create the files/folders for the layer. + * `cache-id` is not precompute at this stage for performance reasons. + * + * `chache-id` will be randomly assigned to each layer when downloading. + * While downlaoding we'll compute the `diff_id` (sha256 hash of gunziped data) of the layer we're downloading + * and find the matching pre-computed meta-data in this `layers` array. + * + * @param {[Object]} manifests - array of image config manifests + * */ + +async function getLayers(manifests: any) { + console.log(`== getting Layers @getLayers ==`) + return manifests + .map(({ diff_ids, token }: { diff_ids: string[]; token: string }) => { + // loops on images and compute / generate values all layers + // use same `cache` and `link` in case of duplicated layers (layers with same chain_id in two images) + // note : we'll generate `cache_id` later when processing the layer and link back then + const computedLayers: Layer[] = [] + for (const key in diff_ids) { + const diff_id = diff_ids[parseInt(key)] + const chain_id = + parseInt(key) == 0 + ? diff_id.split(":")[1] + : computeChainId({ + previousChainId: computedLayers[parseInt(key) - 1].chain_id, + diff_id, + }) + const duplicateOf = computedLayers.find((layer) => layer.chain_id === chain_id) + computedLayers.push({ + token, + diff_id, + chain_id, + parent: parseInt(key) > 0 ? computedLayers[parseInt(key) - 1].chain_id : null, + isDuplicate: Boolean(duplicateOf), + link: duplicateOf ? duplicateOf.link : crypto.randomBytes(13).toString("hex").toUpperCase(), + }) + } + return computedLayers + }) + .map((layers: Layer[]) => { + // 7. compute the lower link chain + // `lower` chain is a string composed of the path to the `link` of all lower layers in the chain + // i.e. : `l/*sublayer1link*:l/*sublayer2link:l/*sublayer3link` + // lowest layer doesn't have any (empty lower) + const chain = layers.map((layer) => `l/${layer.link}`) + return layers.map((layer, key) => ({ + ...layer, + lower: key > 0 ? chain.slice(0, key).join(":") : null, + })) + }) + .flat() +} + +/** + * Given a list of distribution manifests, return a flatten list of deduped layer blob digests ready to be downloaded + * Note: these digests are not `diff_id` as these are from *compressed* layers (tar.gz) while diff_id are from *uncompressed* (tar) + * @param {[Object]} manifests - array of distribution manifests with auth + * @return {[Object]} layerUrls - array of layers blob digests with athentication token + */ +const getLayerDistributionDigests = (manifests: any) => { + return manifests + .map(({ manifest, image_name, token }: any) => + manifest.layers.map((layer: LayerRaw) => ({ + image_name, + token, + compressedSize: layer.size, + layer: layer.digest.split(":")[1], + })) + ) + .flat() + .filter((layer: Layer, index: number, layers: Layer[]) => layers.indexOf(layer) === index) // dedupe to prevent downloading twice layers shared across images +} + +/** + * Generate random 32 char lowercase `chain_id` + * + * we generate the random chain_id for the layer here (instead of doig so when pre-computing layer infos) + * like this we can stream the layer prior to knowing it's `diff_id` + * getting the diff_id require to hash (sha256) layer's `tarball`, which means we need to get the whole layer first + * + * As we don't want to keep the whole layer in memory, we'll hash while streaming (on the wire) + * and link the `cache` with all layers having matching `diff_id` (there might be duplicate layers with same `diff_id` but different `chain_id`) + */ +const getRandomDiffId = (): string => crypto.randomBytes(32).toString("hex") + +/** + * Prepare files from layer metadata + * */ +const generateFilesForLayer = ({ chain_id, diff_id, parent, lower, link, size, cache_id }: Layer) => { + // compute useful paths + const dockerOverlay2CacheId = `docker/overlay2/${cache_id}` + const dockerOverlay2l = "docker/overlay2/l" + const dockerImageOverlay2LayerdbSha256 = "docker/image/overlay2/layerdb/sha256" + const dockerImageOverlay2LayerdbSha256ChainId = `${dockerImageOverlay2LayerdbSha256}/${chain_id}` + + const files = [ + // `link` symlink from `l/_link_` to `../_cache_id_/diff` + { + header: { + name: `${dockerOverlay2l}/${link}`, + type: "symlink", + linkname: `../${cache_id}/diff`, + }, + }, + // emtpy `commited` file + { + header: { name: `${dockerOverlay2CacheId}/commited`, mode: 600 }, + content: "", + }, + // emtpy `work` directory + { + header: { + name: `${dockerOverlay2CacheId}/work`, + mode: 777, + type: "directory", + }, + }, + // `link` file + { + header: { name: `${dockerOverlay2CacheId}/link`, mode: 644 }, + content: link, + }, + // `diff` file + { + header: { + name: `${dockerImageOverlay2LayerdbSha256ChainId}/diff`, + mode: 755, + }, + content: diff_id, + }, + // `cache_id` file + { + header: { + name: `${dockerImageOverlay2LayerdbSha256ChainId}/cache-id`, + mode: 755, + }, + content: cache_id, + }, + // `size` file + { + header: { + name: `${dockerImageOverlay2LayerdbSha256ChainId}/size`, + mode: 755, + }, + content: String(size), + }, + ] + + // `parent` file; first layer doens't have any parent + if (parent) + files.push({ + header: { + name: `${dockerImageOverlay2LayerdbSha256ChainId}/parent`, + mode: 755, + }, + content: parent, + }) + + // `lower` chain; last layer doesn't have any lower + if (lower) + files.push({ + header: { name: `${dockerOverlay2CacheId}/lower`, mode: 644 }, + content: lower, + }) + + return files +} + +/** DownloadProcessLayers + * // 8. download and process layers + * + * This is the meaty part of the process. + * For each layer it will (on stream) : + * - stream from registry + * - gunzip + * - digest (on the fly) + * - untar + * - rename files to match the destination directory + * - tar (`pack`) + * - stream to output + * + * Then create all metadata files, `tar` and stream them to output using `packFile` + */ + +interface ProcessLayerIn { + manifests: any[] + layers: Layer[] + packStream: Pack + injectPath: string +} + +const downloadProcessLayers = async ({ manifests, layers, packStream, injectPath }: ProcessLayerIn) => { + console.log(`== Processing Layers @downloadProcessLayers ==`) + + const processingLayers = getLayerDistributionDigests(manifests) + const injectableFiles = [] + + for (const key in processingLayers) { + const { layer, image_name, compressedSize, token } = processingLayers[key] + console.log(`=> ${parseInt(key) + 1} / ${processingLayers.length} : ${layer}`) + + try { + const cache_id = getRandomDiffId() + + // get the url + const { imageUrl } = getUrls(image_name) + + // get the stream + const layerStream: any = await getBlob(imageUrl, token, { + digest: `sha256:${layer}`, + size: compressedSize, + }) + + // process the stream and get back `size` (uncompressed) and `diff_id` (digest) + const { size, diff_id }: LayerMeta = await layerStreamProcessing({ + layerStream, + packStream, + cache_id, + injectPath, + }) + + // find all layers related to this archive + const relatedLayers = layers.filter((layer: Layer) => layer.diff_id === diff_id) + + // create the metadata and link files for all related layers + for (const layer of relatedLayers) { + injectableFiles.push(generateFilesForLayer({ ...layer, size, cache_id })) + } + } catch (error) { + console.log("downloadProcessLayers CATCH", error) + } + } + return injectableFiles.flat() +} + +interface ComputeChainInput { + previousChainId: string + diff_id: string +} +/** Compute Chain Id + * + * formula is : sha256 of a string composed of the chainid of the parent layer, a space, and the diffid of the layer + * i.e. sha256("sha256:e265835b28ac16782ef429b44427c7a72cdefc642794515d78a390a72a2eab42 sha256:573a4eb582cc8a741363bc2f323baf020649960822435922c50d956e1b22a787") + * + */ +const computeChainId = ({ previousChainId, diff_id }: ComputeChainInput): string => + crypto.createHash("sha256").update(`sha256:${previousChainId} ${diff_id}`).digest("hex") + +interface LayerStreamInput { + layerStream: NodeJS.ReadableStream + packStream: Pack + cache_id: string + injectPath: string +} + +/** + * Promise : Layer Stream Processing + */ +async function layerStreamProcessing({ layerStream, packStream, cache_id, injectPath }: LayerStreamInput): Promise { + const extract = tar.extract() + + // Promisify the event based control flow + return new Promise((resolve) => { + // 0. Setup the digester + const layerMeta: LayerMeta = { + diff_id: null, + size: -1, + } + + const digesterCb = (resultDigest: string, length: number): void => { + // logger.log(`=> digesterCb resultDigest: ${resultDigest}, ${length}`, packStream, cache_id) + layerMeta.diff_id = `sha256:${resultDigest}` + layerMeta.size = length + } + + const digester = digestStream(digesterCb) + + // 4. tar extract happens here + extract.on("entry", (header: Headers & { pax: any }, stream: NodeJS.ReadableStream, next: Function) => { + if (header.pax) { + /** + * DELETE header.pax here, if it exists, as it is causing problems with the symlink handling. + * header.pax overrides over the from/to name path for the symlinks so ends up at root level + */ + console.log(`=> @layerStreamProcessing header ${inspect(header, true, 2, true)}`) + delete header.pax + } + + // change the name of the file to place it at the right position in tar archive folder tree + const headerNewName = { + ...header, + name: `${injectPath}/docker/overlay2/${cache_id}/diff/${header.name}`, + } + + // 5. change header name to give file its destination folder in the output tarball + const filePack = packStream.entry(headerNewName) + + stream.pipe(filePack) + + // TODO: better error handling + + // we cannot just wait on the readable stream to end (stream) but for writable one to finish before processing the next file + filePack.on("finish", () => { + next() + }) + }) + + // 7. when this layer finish extraction, we get the digest (diff_id) and size from the digester + // then resolve the promise to allow moving on to the next layer + extract.on("finish", () => { + resolve(layerMeta) + }) + + layerStream + .pipe(gunzip()) // 1. uncompress if necessary, will pass thru if it's not gziped + .pipe(digester) // 2. compute hash and forward (this is a continuous process we'll get the result at the end) + .pipe(extract) // 3. extract from the layer tar archive (generate `entry` events cf 4.) + }) +} + +export { getLayers, downloadProcessLayers } diff --git a/lib/dotetch-preloading/packer.ts b/lib/dotetch-preloading/packer.ts new file mode 100644 index 00000000..0eecfd9f --- /dev/null +++ b/lib/dotetch-preloading/packer.ts @@ -0,0 +1,42 @@ +import * as tar from "tar-stream" +// import { Pack, Headers } from "tar-stream"; + +/** + * PromisePacker + * Promisify tar-stream.pack.entry ( https://www.npmjs.com/package/tar-stream ) + * + * @param {tar-stream.pack} pack - tar-stream.pack.entry + * @returns {Function} packer - function to return the promisified packer + * + * @param {object} header - tar-stream.pack.entry header + * @param {string} value - tar-stream.pack.entry value + * @param {function} cb - optional callback to call after packing the entry + * @returns {Promise} + * */ +const promisePacker = (pack: any, injectFolder?: string) => (header: any, value: any) => + new Promise((resolve, reject) => { + if (header.name.includes("sha256:")) { + console.log(`=> FIXME!! pack header.name: ${header.name}`) + } + // add the root injectable folder in front of the name when injecting files + if (injectFolder) header.name = `${injectFolder}/${header.name}` + pack.entry(header, value, (error: any) => { + if (error) reject(error) + resolve(true) + }) + }) + +/** + * Streamable tar packer + * // TODO : add compression to the stream + * @param {Stream} outputStream + * @returns Streamable tar packer + */ +const getTarballStream = (outputStream: NodeJS.WritableStream): any => { + // logger.log(`=> prepareTarball outputStream: ${inspect(outputStream,true,5,true)}`) + const pack = tar.pack() + pack.pipe(outputStream) + return pack +} + +export { promisePacker, getTarballStream } diff --git a/lib/dotetch-preloading/registry.ts b/lib/dotetch-preloading/registry.ts new file mode 100644 index 00000000..b57b4d01 --- /dev/null +++ b/lib/dotetch-preloading/registry.ts @@ -0,0 +1,332 @@ +import axios, { AxiosRequestConfig } from "axios" +import { dockerParseImage, DockerParsedImage } from "./docker-parse-image" +import { Manifest, ManifestInfosFromRegistry, ConfigManifestV2 } from "./interface-manifest" +// import { ManifestInfosRepos } from "./appsJson"; +import { inspect } from "util" + +/** + * This should authenticate to the registry api, get a token, + * Get the distribution manifest, the manifest from the registry and get the blobs. + * How to run: + * npm i + * node getManifest.mjs + * You can also replace scopeo with this. + */ + +/** + * /v2//manifests/ + * /v2//blobs/ + * + * Proof of Concept + * Used a reverse proxy to catch the HTTP requests made by the + * docker client when pulling a docker image from the registry, here is the output: + * + * 1. Get a valid token and make a HEAD request to verify that the manifest exists. Checking manifest + * Notice how we get back the image Id on Docker-Content-Digest header, as specified in the standard. + * + * 2. After validating that the manifest exists just requests a token for each of the layers and it's data: First layer + */ + +/** + * Helpful links to understand the registry and other versions of registry code: + * https://docs.docker.com/registry/spec/api/ + * https://github.com/dlgmltjr0925/docker-registry-web/blob/cbab3f214d3d47be3c93d1b5ab969f7b711663fc/utils/dockerRegistry.ts + * https://github.com/TritonDataCenter/node-docker-registry-client/blob/master/lib/registry-client-v2.js + * https://github.com/moby/moby/issues/9015 + * https://github.com/containers/skopeo/blob/main/cmd/skopeo/copy.go + * https://github.com/mafintosh/docker-parse-image/blob/master/index.js + * https://gist.github.com/leodotcloud/9cd3dabdc73ccb498777073a0c8df64a + * https://github.com/moby/moby/blob/0910306bf970603ce787466a98e4294ba81af841/layer/layer_store.go#L102 + * https://programmer.ink/think/container-principle-understand-layerid-diffid-chainid-cache-id.html + * https://github.com/productionwentdown/dri/blob/e7a85c5666f45b716be47d112be2578638143fbf/src/api.js + * https://github.com/viraja1/decentralized_docker_hub_registry/blob/782de6b84532c70c51049b3aec35a177998f089a/daemon/server.js + * https://github.com/bmonty/docker-manifest + * https://github.com/viraja1/decentralized_docker_hub_registry/blob/782de6b84532c70c51049b3aec35a177998f089a/hub/server.js + * https://github.com/plurid/hypod/blob/c69c53ef8c9aa41741144b416d2109c55a5eb7e1/packages/hypod-server/source/server/data/constants/docker/index.ts + * https://stackoverflow.com/questions/71534322/http-stream-using-axios-node-js + */ + +/** + * + * @param {string} registry + * @param {string} namespace + * @returns + */ +function getRegistryUrl({ registry, namespace }: DockerParsedImage): string { + if (!registry) { + return `https://registry2.balena-cloud.com/${namespace}/` + } + return `https://${registry}/${namespace}/` +} + +/** + * getImageUrl + * @param {string} registry + * @param {string} namespace + * @param {string} repository + * @returns {string} imageUrl + */ +// NOTE the double namespace here, the 1st v2 is for docker Version2, the second is for image release Version2 +// Not sure how to get the image rel +function getImageUrl({ registry, namespace, repository }: DockerParsedImage): string { + // we're only supporting docker api v2 for now + return `https://${registry}/v2/${namespace}/${repository}` +} + +/** getAllBlobs + * /v2//blobs/ + */ + +/** + * getBlob + * @param imageUrl + * @param token + * @param layer + * @returns + */ +export async function getBlob(imageUrl: string, token: string, layer: { [key: string]: number | string }): Promise { + const options: AxiosRequestConfig = { + method: "GET", + responseType: "stream", + headers: { + Authorization: `Bearer ${token}`, + Accept: "application/vnd.docker.image.rootfs.diff.tar.gzip", + "Accept-Encoding": "gzip", + "Docker-Distribution-API-Version": "registry/2.0", + }, + url: `${imageUrl}/blobs/${layer.digest}`, + } + try { + const { data, headers } = await axios(options) + if (parseInt(headers["content-length"]!, 10) === layer.size && headers["docker-content-digest"] === layer.digest) { + console.log("==> getBlob stream header/layer sizes match", parseInt(headers["content-length"]!, 10), layer.size) + console.log("==> getBlob stream header/layer digests match", headers["docker-content-digest"], layer.digest) + } + return data + } catch (error) { + console.error("\n\n==> getBlob error:", inspect(error, true, 2, true)) + throw error + } +} + +/** + * getAllBlobs + * Iterate through the layers and get the blobs. + * This is getting moved over the the mainland. Should it tho? + * @param {object} imageUrl - imageUrl per image + * @param {string} token - token per image + * @param {string} manifest - manifest per image + * @returns {Promise} + */ +// async function getAllBlobs(imageUrl: string, token: string, manifest: Manifest): Promise { +// try { +// const tgzLayersDigest = await Promise.all( +// manifest.layers.map(async (layer) => { +// const dataBlob = await getBlob(imageUrl, token, layer) +// return await dataBlob +// }) +// ) +// return tgzLayersDigest +// } catch (error) { +// console.error("\n\n==> getAllBlob error:", inspect(error, true, 2, true)) +// throw error +// } +// } + +/** + * getConfigManifest + * This should pull the blob from the registry after checking head. + * GET /v2//blobs/ + * GET example /v2/53b00bed7a4c6897db23eb0e4cf620e3/blobs/sha256:1aa86408ad62437344cee93c2be884ad802fc63e05795876acec6de0bb21f3cc + * @param imageUrl + * @param token + * @param digest + * @returns + */ +async function getConfigManifest(imageUrl: string, token: string, digest: string): Promise { + const options = { + method: "GET", + url: `${imageUrl}/blobs/${digest}`, + headers: { + Authorization: `Bearer ${token}`, + Accept: "application/vnd.docker.image.rootfs.diff.tar.gzip", + "Docker-Distribution-API-Version": "registry/2.0", + }, + } + try { + const { data } = await axios(options) + return await data + } catch (error) { + console.error("==> getBlob error", error) + throw error + } +} + +// /** +// * GET /v2//blobs/ +// * @param imageUrl +// * @param token +// * @param digest +// * @returns +// */ +// async function getHeadBlob(imageUrl: string, token: string, digest: string): Promise { +// const options = { +// method: "HEAD", +// url: `${imageUrl}/blobs/${digest}`, +// headers: { +// Accept: "application/vnd.docker.distribution.manifest.v2+json", +// Authorization: `Bearer ${token}`, +// "Docker-Distribution-API-Version": "registry/2.0", +// }, +// } +// try { +// const { data, headers } = await axios(options) +// // Not possible to check `headers['docker-content-digest'] === digest` +// // since cloudfront frontend doesn't forward dockers headers +// return headers["content-length"] ?? 0 +// } catch (error) { +// throw new Error(`==> getHeadBlob CATCH: ${error}`) +// } +// } + +/** + * + * @param imageUrl + * @param token + * @returns + */ +async function getManifest(imageUrl: string, token: string): Promise { + const options = { + method: "GET", + url: `${imageUrl}/manifests/latest`, + withCredentials: true, + credentials: "include", + headers: { + Accept: "application/vnd.docker.distribution.manifest.v2+json", + Authorization: `Bearer ${token}`, + "Docker-Distribution-API-Version": "registry/2.0", + }, + } + + console.log("options", options) + + try { + const { data } = await axios(options) + return data + } catch (error) { + console.error("==> getManifest error", error) + throw new Error(`==> NOPE did not get registry manifest. CATCH: ${error}`) + } +} + +/** + * @param image + * @param layer + * @returns + */ +export function getUrls(image: string) { + console.log("get URLS ", image) + const parsedImage = dockerParseImage(image) + const registryUrl = getRegistryUrl(parsedImage) + const imageUrl = getImageUrl(parsedImage) + return { registryUrl, imageUrl, parsedImage } +} + +/** + * @param image + * @param auth + * @returns + */ +export async function pullManifestsFromRegistry( + image: string, + api: string, + bearer: string, + isSupervisor: boolean +): Promise { + const { registryUrl, imageUrl, parsedImage } = getUrls(image) + + // const authRealmServiceResponse = await getRealmResponse(registryUrl, authHeaders) + + const token = await getToken(parsedImage, registryUrl, api, bearer, isSupervisor) + + const manifest = await getManifest(imageUrl, token) + console.log("==> manifest", inspect(manifest, true, 10, true)) + const configDigest = manifest?.config?.digest + const digests = manifest?.layers + + // We are using this manifest as its V2 + const configManifestV2 = await getConfigManifest(imageUrl, token, configDigest) + const diffIds = await configManifestV2.rootfs.diff_ids + + // if (featureFlags.justDownload) { + // const allBlobAlltheTime = await getAllBlobs(imageUrl, await token, manifest) + // } + + const imageId = configDigest + const imageName = image + + return { + manifest, + digests, + configDigest, + configManifestV2, + imageId, + imageName, + diffIds, + imageUrl, + token, + } +} + +/** + * Download Distribution Manifest + * @param images - array of images + * @returns - array of distribution manifests + */ +export const getManifests = async (images: any[], api: string, token: string): Promise => { + const manifests: ManifestInfosFromRegistry[] = [] + console.log(`== Downloading Manifests @getManifests ==`) + for (const image in images) { + if (Object.prototype.hasOwnProperty.call(images, image)) { + const imageName = images[image].image_name + const isSupervisor = images[image].isSupervisor + console.log(`=> ${parseInt(image, 10) + 1} / ${images.length} : ${imageName}`) + const manifestInfo = await pullManifestsFromRegistry(imageName, api, token, isSupervisor) + manifests.push({ + ...manifestInfo, + ...images[image], + }) + } + } + console.log(`== Downloading Manifests @getManifests DONE ==`) + return manifests +} + +async function getToken(parsedImage: DockerParsedImage, registryUrl: string, api: string, bearer: string, isSupervisor: boolean, tag?: string) { + try { + const { repository, namespace } = parsedImage + const options = { + method: "GET", + url: `${api}/auth/v1/token`, + params: { + service: registryUrl, + scope: `repository:${namespace ? `${namespace}/` : "library/"}${repository}:${tag || "pull"}`, + }, + headers: { + Authorization: bearer, + }, + } + // FIXME: this workaround to get supervisor from prod needs to be refactored out (when no longer needed) + if (isSupervisor) { + options.url = "https://api.balena-cloud.com/auth/v1/token" + options.headers.Authorization = "" + } + const { data } = await axios(options) + if (!data.token) { + throw new Error("token registry fail.") + } + return await data.token + } catch (error) { + throw new Error(`Failed to get authentication token from registry : ${error}`) + } +} diff --git a/lib/dotetch-preloading/repositoriesjson.ts b/lib/dotetch-preloading/repositoriesjson.ts new file mode 100644 index 00000000..9fb44957 --- /dev/null +++ b/lib/dotetch-preloading/repositoriesjson.ts @@ -0,0 +1,64 @@ +/** + * Deal with /var/lib/docker/image/overlay2/repositories.json + * That file informs balena-engine of what images are availble in its local store + * and maps images name(s) (including tag) to an image digest. + * + * Here we generate a complete repositories.json for all the preloaded images, including the supervisor. + * + * We will overwrite the orignal repositories.json which has been created at the balenaos build. + * + * One small difference between the original and the one we create is that we don't tag the supevisor with its hash. + * Which shouldn't have any impact, but is worth noting "au cas où" + */ + +/** + * Relative path of repositories.json as injected in the resin-data partition + * On a running device it would be /var/lib/docker/image/overlay2/repositories.json + */ +const repositoriesJsonInjectionPath = "docker/image/overlay2/repositories.json" + +// TODO repositories.json types +// interface Repositories { +// [image_name: string]: +// } + +/** + * createAllRepositoriesFragments + */ +const createAllRepositoriesFragments = (manifests: any) => { + const repositories: any = {} + for (const { image_id, image_name, image_hash, isSupervisor, supervisorVersion } of manifests) { + // prepare repositories + repositories[image_name] = { + [`${image_name}:latest`]: `sha256:${image_id}`, + } + if (image_hash !== "latest") repositories[image_name][`${image_name}:@${image_hash}`] = `sha256:${image_id}` + + if (isSupervisor) + repositories["balena_supervisor"] = { + [`balena_supervisor:${supervisorVersion}`]: image_id, + } + } + console.log("==> @createAllRepositoriesFragments repositories") + return repositories +} + +/** + * Return a repositories.json augmented by fragments for all images + * @param {Array} manifests - images manifests + * @param {JSON} repositoriesJson - origal repositories.json + */ +const buildRepositories = ({ manifests }: any) => { + console.log("== Build Repositories @buildRepositories ==") + + // generate repositories fragments for preloaded images + const repositories = { + Repositories: createAllRepositoriesFragments(manifests), + } + + console.log("repositories.json", repositories) + + return repositories +} + +export { buildRepositories, repositoriesJsonInjectionPath } diff --git a/lib/dotetch-preloading/streamPreloadingAssets.ts b/lib/dotetch-preloading/streamPreloadingAssets.ts new file mode 100755 index 00000000..8b824549 --- /dev/null +++ b/lib/dotetch-preloading/streamPreloadingAssets.ts @@ -0,0 +1,161 @@ +import { getManifests } from "./registry" +import { buildRepositories, repositoriesJsonInjectionPath } from "./repositoriesjson" +import { streamBaseImage } from "./baseImage" +import { getAppsJson, getImageIds } from "./appsJson" +import { getLayers, downloadProcessLayers } from "./layers" +import { promisePacker, getTarballStream } from "./packer" +import { getImagesConfigurationFiles } from "./images" +import { getSupervisorImageNameFor } from "./supervisor" + +interface PreloadOptions { + outputStream: NodeJS.WritableStream + balenaosStream: NodeJS.ReadableStream + balenaosSize: number + app_id: string + api: string + registry: string + token: string + arch: string + balenaosRef: string + dataPartition: number + supervisorVersion: string + password?: string + callback?: Function +} + +/** + * Main Processing function + * + * Beware that, as we're outputing to a tar stream, order of operation is important. + * So most operations can't be made async, while some could be made in parrallel to speed things up + * but should be eventually written synchroneous. + * + * Order of files in an `.etch` tar stream is : + * 0. manifest.json - contains the name of the baseimage and a list of the inject folders + * 1. base os image - can be zipped; name of the file should match the one specified in the manifest + * 2. /inject/_partitions_/_foldersOrFilesToInject_ - injectables assets + */ + +const streamPreloadingAssets = async ({ + outputStream, + balenaosStream, + balenaosSize, + supervisorVersion, + arch, + app_id, + balenaosRef, + dataPartition = 6, + api, + token, + callback, +}: PreloadOptions): Promise => { + // ############## + // Processing + // ############## + console.log("==> STARTING @streamPreloadingAssets") + + // prepare tarball packer + const injectPath = `inject/${dataPartition}` + const packStream = getTarballStream(outputStream) // streamable + const packFile = promisePacker(packStream, injectPath) // promise + const packManifest = promisePacker(packStream) // promise + + // 0. create and stream a manifest + const manifest = { + image: balenaosRef, + inject: [ + { + partition: dataPartition, + partitionName: "resin-data", + inject: dataPartition, + }, + ], + } + + await packManifest({ name: "manifest.json", mode: 644 }, JSON.stringify(manifest)) + + // Beware that knowing the file size in advance is mandatory + const baseImageStreamEntry = packStream.entry({ + // TOOD: name: `${balenaosRef}.img`, // switch when inject.mjs select baseimage from manifest (currently hardcoded) + name: `${balenaosRef}.gz`, + mode: 644, + size: balenaosSize, + }) + + // TODO: optimizatinon : // streamBaseImage with all the metadata retrieval and processing (up to getLayers) + await streamBaseImage({ + pipeStreamFrom: balenaosStream, + pipeStreamTo: baseImageStreamEntry, + }) + + // get apps.json + const appsJson = await getAppsJson({ app_id, api, token }) + + // extract image_ids from appsJson + const images = getImageIds({ appsJson }) + + // FIXME: BROKEN ON BOB BECAUSE WE CANNOT GET THE SV FROM BOB's API + // get the supervisor image + const baseImages = [ + { + image_name: await getSupervisorImageNameFor({ + version: supervisorVersion, + arch, + api, + token, + }), + image_hash: "latest", + isSupervisor: true, + supervisorVersion, + }, + ] + + // get manifests from registry for all images including pre-pre-loaded images (the ones inside the base image) + // const imagesbaseAndPreload = [...baseImages, ...images] + const imagesbaseAndPreload = [...baseImages, ...images] + + const manifests = await getManifests(imagesbaseAndPreload, api, token) + + // precompute layers metadata for all layers + const layers = await getLayers(manifests) + + // download and process layers, this is where most of the work is happening + const layersInjectableFiles = await downloadProcessLayers({ + manifests, + layers, + packStream, + injectPath, + }) + + // prepare images files + const imagesInjectableFiles = getImagesConfigurationFiles(manifests) + + // generate repositories.json snipets for each images, merge everything and inject result + const newRepositoriesJson = buildRepositories({ manifests }) + + // prepare global metadata files + const globalInjectable = [ + { + header: { name: repositoriesJsonInjectionPath, mode: 644 }, + content: JSON.stringify(newRepositoriesJson), + }, + { + header: { name: "apps.json", mode: 644 }, + content: JSON.stringify(appsJson), + }, + ] + + // inject all metadata files and folders + for (const { header, content } of [...layersInjectableFiles, ...imagesInjectableFiles, ...globalInjectable]) { + await packFile(header, content) + } + + // close tarball + await packStream.finalize() + console.log("==> FINISHED @streamPreloadingAssets") + console.log("==> change consoleLevel log levels in logger.mjs for less verbose logging") + + if (callback) callback() +} + +export { streamPreloadingAssets } diff --git a/lib/dotetch-preloading/supervisor.ts b/lib/dotetch-preloading/supervisor.ts new file mode 100644 index 00000000..eaf023aa --- /dev/null +++ b/lib/dotetch-preloading/supervisor.ts @@ -0,0 +1,61 @@ +/** + * Supervisor is currently the only image pre-loaded on HOST OS at build time + * + * We need to know which supervisor image is installed on the device. + * Idealy we'd also know the `chache_id` for each layer so we can dedupe when downloading / installing users images. + * + * As we cannot currently do that easily(*) the next best thing is to preload the supervisor again (overwriting the layerdb and duplicating cache (overlay2 folder)). + * This is wasteful as all the layers for supervisor will be transfered twice but it's the safest. + * + * To make that less wasteful, we can easily optimise by checking first if we have duplicated layers between SV and user apps and only repreload SV if it's the case. ( //TODO ) + * + * To make the call we need : + * - supervisor version -> from S3 (VERSION) + * - device arch -> from S3 (device-type.json) + * - api env (bob, balena-machine, prod, ...) -> from env + * - auth token -> from env; //TODO: retrieve from user / password + * + * We retrieve SV image name from the api using the [same method as the one used by meta-balena when building balenaos.img](https://github.com/balena-os/meta-balena/blob/a1084cfc437eb601cbb9f3de7822d916e08351de/meta-balena-common/recipes-containers/balena-supervisor/balena-supervisor.bb#L68) + * curl "https://api.${_api_env}/v6/supervisor_release?\$top=1&\$select=image_name&\$filter=(supervisor_version%20eq%20%27${_version}%27)%20and%20(is_for__device_type/any(ifdt:ifdt/is_of__cpu_architecture/any(ioca:ioca/slug%20eq%20%27${_arch}%27)))" \ -H "Content-Type: application/json" -H "Authorization: Bearer `cat ~/.balena/token`" + * + * Note that what we do here will be deprecated and should only be used for the PoC : https://jel.ly.fish/facc0db4-57a6-4e5d-a45a-cbda5c9e7a76?event=f79c9c8d-d2b1-4470-9d8a-cad7c7c9f2f0 + * + */ + +import axios from "axios" + +interface supervisorImageNameIn { + version: string + arch: string + api?: string + token: string +} + +// FIXME: WE CURRENTLY CANNOT RETRIEVE THE SV IMAGE FROM BOB AS WE DON'T HAVE A PROPER WAY TO LOAD IT IN +// @cmcruz is working on an improvement of of the current os loading in bob / bM to fix this +// In the meantime we'll pull the image directly from prod +// THIS WILL BE AN ISSUE WITH BALENAMACHINES !! + +/** + * Fetch the supervisor image name (pullable url) from api for a version and arch + * + */ +const getSupervisorImageNameFor = async ({ version, arch, api, token }: supervisorImageNameIn): Promise => { + console.log(api) + const options = { + headers: { + Authorization: token!, + ContentType: "application/json", + }, + url: `https://api.balena-cloud.com/v6/supervisor_release?\$top=1&\$select=image_name&\$filter=(supervisor_version%20eq%20%27${version}%27)%20and%20(is_for__device_type/any(ifdt:ifdt/is_of__cpu_architecture/any(ioca:ioca/slug%20eq%20%27${arch}%27)))`, + } + try { + const { data } = await axios(options) + + return data.d[0].image_name + } catch (error) { + console.error("\n\n==> get SV image Name error:", error) + } +} + +export { getSupervisorImageNameFor } diff --git a/lib/index.ts b/lib/index.ts index 75b7126f..fd996cbf 100644 --- a/lib/index.ts +++ b/lib/index.ts @@ -1,14 +1,14 @@ /* * Copyright 2018 balena.io * - * Licensed under the Apache License, Version 2.0 (the "License"); + * Licensed under the Apache License, Version 2.0 (the 'License'); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, + * distributed under the License is distributed on an 'AS IS' BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. @@ -19,6 +19,7 @@ import * as errors from './errors'; import * as multiWrite from './multi-write'; import * as scanner from './scanner'; import * as sourceDestination from './source-destination'; +import { streamPreloadingAssets } from './dotetch-preloading'; import * as sparseStream from './sparse-stream'; import * as tmp from './tmp'; import * as utils from './utils'; @@ -32,4 +33,5 @@ export { sparseStream, tmp, utils, + streamPreloadingAssets, }; diff --git a/package-lock.json b/package-lock.json index 9f133e34..c87a3004 100644 --- a/package-lock.json +++ b/package-lock.json @@ -19,9 +19,11 @@ "check-disk-space": "^2.1.0", "cyclic-32": "^1.1.0", "debug": "^3.1.0", + "docker-parse-image": "^3.0.1", "drivelist": "^11.0.4", "file-disk": "^8.0.1", "file-type": "^8.0.0", + "gunzip-maybe": "^1.4.2", "gzip-stream": "^1.1.2", "lzma-native": "^8.0.6", "mountutils": "^1.3.20", @@ -29,6 +31,7 @@ "outdent": "^0.8.0", "partitioninfo": "^6.0.2", "rwmutex": "^1.0.0", + "tar-stream": "^2.2.0", "tslib": "^2.0.0", "unbzip2-stream": "^1.4.2", "unzip-stream": "^0.3.0", @@ -44,11 +47,13 @@ "@types/crc": "^3.4.0", "@types/debug": "0.0.31", "@types/file-type": "^5.2.1", + "@types/gunzip-maybe": "^1.4.0", "@types/lodash": "^4.14.108", "@types/mocha": "^5.2.4", "@types/node": "^10.17.60", "@types/progress": "^2.0.1", "@types/sinon": "^5.0.1", + "@types/tar-stream": "^2.2.2", "@types/yargs": "^11.0.0", "@types/yauzl": "^2.9.0", "chai": "^4.1.2", @@ -1281,6 +1286,15 @@ "@types/node": "*" } }, + "node_modules/@types/gunzip-maybe": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@types/gunzip-maybe/-/gunzip-maybe-1.4.0.tgz", + "integrity": "sha512-dFP9GrYAR9KhsjTkWJ8q8Gsfql75YIKcg9DuQOj/IrlPzR7W+1zX+cclw1McV82UXAQ+Lpufvgk3e9bC8+HzgA==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/lodash": { "version": "4.14.181", "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.181.tgz", @@ -1331,6 +1345,15 @@ "integrity": "sha512-opwMHufhUwkn/UUDk35LDbKJpA2VBsZT8WLU8NjayvRLGPxQkN+8XmfC2Xl35MAscBE8469koLLBjaI3XLEIww==", "dev": true }, + "node_modules/@types/tar-stream": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@types/tar-stream/-/tar-stream-2.2.2.tgz", + "integrity": "sha512-1AX+Yt3icFuU6kxwmPakaiGrJUwG44MpuiqPg4dSolRFk6jmvs4b3IbUol9wKDLIgU76gevn3EwE8y/DkSJCZQ==", + "dev": true, + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/w3c-web-usb": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/w3c-web-usb/-/w3c-web-usb-1.0.6.tgz", @@ -1797,6 +1820,14 @@ "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", "dev": true }, + "node_modules/browserify-zlib": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.1.4.tgz", + "integrity": "sha512-19OEpq7vWgsH6WkvkBJQDFvJS1uPcbFOQ4v9CU839dO+ZZXUZO6XpE6hNCqvlIIj+4fZvRiJ6DsAQ382GwiyTQ==", + "dependencies": { + "pako": "~0.2.0" + } + }, "node_modules/buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", @@ -1850,8 +1881,7 @@ "node_modules/buffer-from": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" }, "node_modules/buffers": { "version": "0.1.1", @@ -2406,6 +2436,11 @@ "node": ">=0.3.1" } }, + "node_modules/docker-parse-image": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/docker-parse-image/-/docker-parse-image-3.0.1.tgz", + "integrity": "sha512-vyR7dtMfJabw/gJGjUvFspsWXlHDzQt24aSaHac92OggNXdvkAb9jGunsH77lprqExox2esbrrOHfDRe+otL7Q==" + }, "node_modules/drivelist": { "version": "11.0.4", "resolved": "https://registry.npmjs.org/drivelist/-/drivelist-11.0.4.tgz", @@ -2548,6 +2583,49 @@ "simple-concat": "^1.0.0" } }, + "node_modules/duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "dependencies": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + } + }, + "node_modules/duplexify/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/duplexify/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/duplexify/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/duplexify/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, "node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", @@ -2906,6 +2984,22 @@ "node": ">=4.x" } }, + "node_modules/gunzip-maybe": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/gunzip-maybe/-/gunzip-maybe-1.4.2.tgz", + "integrity": "sha512-4haO1M4mLO91PW57BMsDFf75UmwoRX0GkdD+Faw+Lr+r/OZrOCS0pIBwOL1xCKQqnQzbNFGgK2V2CpBUPeFNTw==", + "dependencies": { + "browserify-zlib": "^0.1.4", + "is-deflate": "^1.0.0", + "is-gzip": "^1.0.0", + "peek-stream": "^1.1.0", + "pumpify": "^1.3.3", + "through2": "^2.0.3" + }, + "bin": { + "gunzip-maybe": "bin.js" + } + }, "node_modules/gzip-stream": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/gzip-stream/-/gzip-stream-1.1.2.tgz", @@ -3117,6 +3211,11 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-deflate": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-deflate/-/is-deflate-1.0.0.tgz", + "integrity": "sha512-YDoFpuZWu1VRXlsnlYMzKyVRITXj7Ej/V9gXQ2/pAe7X1J7M/RNOqaIYi6qUn+B7nGyB9pDXrv02dsB58d2ZAQ==" + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -3149,6 +3248,14 @@ "node": ">=0.10.0" } }, + "node_modules/is-gzip": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-gzip/-/is-gzip-1.0.0.tgz", + "integrity": "sha512-rcfALRIb1YewtnksfRIHGcIY93QnK8BIQ/2c9yDYcG/Y6+vRoJuTWBmmSEbyLLYtXm7q35pHOHbZFQBaLrhlWQ==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", @@ -4092,6 +4199,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/pako": { + "version": "0.2.9", + "resolved": "https://registry.npmjs.org/pako/-/pako-0.2.9.tgz", + "integrity": "sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA==" + }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -4185,6 +4297,16 @@ "node": "*" } }, + "node_modules/peek-stream": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/peek-stream/-/peek-stream-1.1.3.tgz", + "integrity": "sha512-FhJ+YbOSBb9/rIl2ZeE/QHEsWn7PqNYt8ARAY3kIgNGOk13g9FGyIY6JIl/xB/3TFRVoTv5as0l11weORrTekA==", + "dependencies": { + "buffer-from": "^1.0.0", + "duplexify": "^3.5.0", + "through2": "^2.0.3" + } + }, "node_modules/pend": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", @@ -4283,10 +4405,13 @@ } }, "node_modules/prebuild-install/node_modules/minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", - "optional": true + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", + "optional": true, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } }, "node_modules/prettier": { "version": "2.6.2", @@ -4326,6 +4451,25 @@ "once": "^1.3.1" } }, + "node_modules/pumpify": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", + "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", + "dependencies": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + } + }, + "node_modules/pumpify/node_modules/pump": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, "node_modules/query-ast": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/query-ast/-/query-ast-1.0.4.tgz", @@ -4706,6 +4850,11 @@ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", "dev": true }, + "node_modules/stream-shift": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" + }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -4810,6 +4959,47 @@ "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" }, + "node_modules/through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "dependencies": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + } + }, + "node_modules/through2/node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "node_modules/through2/node_modules/readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/through2/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "node_modules/through2/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, "node_modules/to-buffer": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", @@ -6582,6 +6772,15 @@ "@types/node": "*" } }, + "@types/gunzip-maybe": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@types/gunzip-maybe/-/gunzip-maybe-1.4.0.tgz", + "integrity": "sha512-dFP9GrYAR9KhsjTkWJ8q8Gsfql75YIKcg9DuQOj/IrlPzR7W+1zX+cclw1McV82UXAQ+Lpufvgk3e9bC8+HzgA==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, "@types/lodash": { "version": "4.14.181", "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.181.tgz", @@ -6632,6 +6831,15 @@ "integrity": "sha512-opwMHufhUwkn/UUDk35LDbKJpA2VBsZT8WLU8NjayvRLGPxQkN+8XmfC2Xl35MAscBE8469koLLBjaI3XLEIww==", "dev": true }, + "@types/tar-stream": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@types/tar-stream/-/tar-stream-2.2.2.tgz", + "integrity": "sha512-1AX+Yt3icFuU6kxwmPakaiGrJUwG44MpuiqPg4dSolRFk6jmvs4b3IbUol9wKDLIgU76gevn3EwE8y/DkSJCZQ==", + "dev": true, + "requires": { + "@types/node": "*" + } + }, "@types/w3c-web-usb": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/@types/w3c-web-usb/-/w3c-web-usb-1.0.6.tgz", @@ -7033,6 +7241,14 @@ "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==", "dev": true }, + "browserify-zlib": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/browserify-zlib/-/browserify-zlib-0.1.4.tgz", + "integrity": "sha512-19OEpq7vWgsH6WkvkBJQDFvJS1uPcbFOQ4v9CU839dO+ZZXUZO6XpE6hNCqvlIIj+4fZvRiJ6DsAQ382GwiyTQ==", + "requires": { + "pako": "~0.2.0" + } + }, "buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", @@ -7069,8 +7285,7 @@ "buffer-from": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz", - "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", - "dev": true + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==" }, "buffers": { "version": "0.1.1", @@ -7482,6 +7697,11 @@ "integrity": "sha512-/VTCrvm5Z0JGty/BWHljh+BAiw3IK+2j87NGMu8Nwc/f48WoDAC395uomO9ZD117ZOBaHmkX1oyLvkVM/aIT3w==", "dev": true }, + "docker-parse-image": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/docker-parse-image/-/docker-parse-image-3.0.1.tgz", + "integrity": "sha512-vyR7dtMfJabw/gJGjUvFspsWXlHDzQt24aSaHac92OggNXdvkAb9jGunsH77lprqExox2esbrrOHfDRe+otL7Q==" + }, "drivelist": { "version": "11.0.4", "resolved": "https://registry.npmjs.org/drivelist/-/drivelist-11.0.4.tgz", @@ -7573,6 +7793,51 @@ } } }, + "duplexify": { + "version": "3.7.1", + "resolved": "https://registry.npmjs.org/duplexify/-/duplexify-3.7.1.tgz", + "integrity": "sha512-07z8uv2wMyS51kKhD1KsdXJg5WQ6t93RneqRxUHnskXVtlYYkLqM0gqStQZ3pj073g687jPCHrqNfCzawLYh5g==", + "requires": { + "end-of-stream": "^1.0.0", + "inherits": "^2.0.1", + "readable-stream": "^2.0.0", + "stream-shift": "^1.0.0" + }, + "dependencies": { + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, "emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", @@ -7842,6 +8107,19 @@ "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==", "dev": true }, + "gunzip-maybe": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/gunzip-maybe/-/gunzip-maybe-1.4.2.tgz", + "integrity": "sha512-4haO1M4mLO91PW57BMsDFf75UmwoRX0GkdD+Faw+Lr+r/OZrOCS0pIBwOL1xCKQqnQzbNFGgK2V2CpBUPeFNTw==", + "requires": { + "browserify-zlib": "^0.1.4", + "is-deflate": "^1.0.0", + "is-gzip": "^1.0.0", + "peek-stream": "^1.1.0", + "pumpify": "^1.3.3", + "through2": "^2.0.3" + } + }, "gzip-stream": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/gzip-stream/-/gzip-stream-1.1.2.tgz", @@ -8003,6 +8281,11 @@ "has": "^1.0.3" } }, + "is-deflate": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-deflate/-/is-deflate-1.0.0.tgz", + "integrity": "sha512-YDoFpuZWu1VRXlsnlYMzKyVRITXj7Ej/V9gXQ2/pAe7X1J7M/RNOqaIYi6qUn+B7nGyB9pDXrv02dsB58d2ZAQ==" + }, "is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -8026,6 +8309,11 @@ "is-extglob": "^2.1.1" } }, + "is-gzip": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-gzip/-/is-gzip-1.0.0.tgz", + "integrity": "sha512-rcfALRIb1YewtnksfRIHGcIY93QnK8BIQ/2c9yDYcG/Y6+vRoJuTWBmmSEbyLLYtXm7q35pHOHbZFQBaLrhlWQ==" + }, "is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", @@ -8779,6 +9067,11 @@ "p-limit": "^3.0.2" } }, + "pako": { + "version": "0.2.9", + "resolved": "https://registry.npmjs.org/pako/-/pako-0.2.9.tgz", + "integrity": "sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA==" + }, "parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -8851,6 +9144,16 @@ "integrity": "sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==", "dev": true }, + "peek-stream": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/peek-stream/-/peek-stream-1.1.3.tgz", + "integrity": "sha512-FhJ+YbOSBb9/rIl2ZeE/QHEsWn7PqNYt8ARAY3kIgNGOk13g9FGyIY6JIl/xB/3TFRVoTv5as0l11weORrTekA==", + "requires": { + "buffer-from": "^1.0.0", + "duplexify": "^3.5.0", + "through2": "^2.0.3" + } + }, "pend": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", @@ -8920,9 +9223,9 @@ }, "dependencies": { "minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", "optional": true } } @@ -8953,6 +9256,27 @@ "once": "^1.3.1" } }, + "pumpify": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/pumpify/-/pumpify-1.5.1.tgz", + "integrity": "sha512-oClZI37HvuUJJxSKKrC17bZ9Cu0ZYhEAGPsPUy9KlMUmv9dKX2o77RUmq7f3XjIxbwyGwYzbzQ1L2Ks8sIradQ==", + "requires": { + "duplexify": "^3.6.0", + "inherits": "^2.0.3", + "pump": "^2.0.0" + }, + "dependencies": { + "pump": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pump/-/pump-2.0.1.tgz", + "integrity": "sha512-ruPMNRkN3MHP1cWJc9OWr+T/xDP0jhXYCLfJcBuX54hhfIBnaQmAUMfDcG4DM5UMWByBbJY69QSphm3jtDKIkA==", + "requires": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + } + } + }, "query-ast": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/query-ast/-/query-ast-1.0.4.tgz", @@ -9243,6 +9567,11 @@ "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=", "dev": true }, + "stream-shift": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/stream-shift/-/stream-shift-1.0.1.tgz", + "integrity": "sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==" + }, "string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -9323,6 +9652,49 @@ "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=" }, + "through2": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz", + "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==", + "requires": { + "readable-stream": "~2.3.6", + "xtend": "~4.0.1" + }, + "dependencies": { + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==" + }, + "readable-stream": { + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + } + } + }, "to-buffer": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.1.1.tgz", diff --git a/package.json b/package.json index cf2afab1..ec76efa2 100644 --- a/package.json +++ b/package.json @@ -57,9 +57,11 @@ "check-disk-space": "^2.1.0", "cyclic-32": "^1.1.0", "debug": "^3.1.0", + "docker-parse-image": "^3.0.1", "drivelist": "^11.0.4", "file-disk": "^8.0.1", "file-type": "^8.0.0", + "gunzip-maybe": "^1.4.2", "gzip-stream": "^1.1.2", "lzma-native": "^8.0.6", "mountutils": "^1.3.20", @@ -67,6 +69,7 @@ "outdent": "^0.8.0", "partitioninfo": "^6.0.2", "rwmutex": "^1.0.0", + "tar-stream": "^2.2.0", "tslib": "^2.0.0", "unbzip2-stream": "^1.4.2", "unzip-stream": "^0.3.0", @@ -85,11 +88,13 @@ "@types/crc": "^3.4.0", "@types/debug": "0.0.31", "@types/file-type": "^5.2.1", + "@types/gunzip-maybe": "^1.4.0", "@types/lodash": "^4.14.108", "@types/mocha": "^5.2.4", "@types/node": "^10.17.60", "@types/progress": "^2.0.1", "@types/sinon": "^5.0.1", + "@types/tar-stream": "^2.2.2", "@types/yargs": "^11.0.0", "@types/yauzl": "^2.9.0", "chai": "^4.1.2", diff --git a/tsconfig.json b/tsconfig.json index 70b546d7..5a0eaa42 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,18 +1,17 @@ { - "compilerOptions": { - "typeRoots": ["typings", "node_modules/@types"], - "module": "commonjs", - "outDir": "build", - "sourceMap": true, - "declaration": true, - "target": "es2018", - "noImplicitAny": true, - "noUnusedLocals": true, - "noUnusedParameters": true, - "importHelpers": true, - "strictNullChecks": true - }, - "include": [ - "lib" - ] + "compilerOptions": { + "typeRoots": ["typings", "node_modules/@types"], + "module": "commonjs", + "outDir": "build", + "sourceMap": true, + "declaration": true, + "target": "es2019", + "noImplicitAny": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "importHelpers": true, + "strictNullChecks": true, + "allowSyntheticDefaultImports": true + }, + "include": ["lib"] } diff --git a/typings/gunzip-maybe/index.d.ts b/typings/gunzip-maybe/index.d.ts new file mode 100644 index 00000000..9366f812 --- /dev/null +++ b/typings/gunzip-maybe/index.d.ts @@ -0,0 +1 @@ +declare module "gunzip-maybe";