diff --git a/CHANGES/1509.removal b/CHANGES/1509.removal new file mode 100644 index 000000000..8e858372f --- /dev/null +++ b/CHANGES/1509.removal @@ -0,0 +1,4 @@ +Removed the manifest schema conversion machinery. If the manifest is stored locally in the newer +format and old clients request v2 schema1 manifest they will receive 404. v2 schema1 manifest is +still going to be mirrored from remote source during sync if available and passed to the old clients +on the request. diff --git a/docs/workflows/host.rst b/docs/workflows/host.rst index 4dbdbc358..7e2760233 100644 --- a/docs/workflows/host.rst +++ b/docs/workflows/host.rst @@ -113,10 +113,8 @@ Docker Output:: .. note:: When using a container client that cannot handle requested manifests in the new format - (schema 2), the manifests are rewritten into the old format (schema 1) on-the-fly by Pulp. - In general, the automatic conversion cannot be performed when the content is not available - in the storage. Therefore, it may be successful only if the content was previously synced - with the ``immediate`` policy. + (schema 2), the manifests will **not** be rewritten into the old format (schema 1) and Pulp will + raise a 404 (HTTPNOTFOUND) error. Pull-Through Caching diff --git a/pulp_container/app/redirects.py b/pulp_container/app/redirects.py index e50d6a469..15cb46eb0 100644 --- a/pulp_container/app/redirects.py +++ b/pulp_container/app/redirects.py @@ -3,6 +3,7 @@ from django.http import Http404 from django.shortcuts import redirect +from pulp_container.app.exceptions import ManifestNotFound from pulp_container.app.utils import get_accepted_media_types from pulp_container.constants import BLOB_CONTENT_TYPE, MEDIA_TYPE @@ -38,6 +39,13 @@ def issue_tag_redirect(self, tag): """ Issue a redirect for the passed tag. """ + manifest_media_type = tag.tagged_manifest.media_type + if ( + manifest_media_type not in get_accepted_media_types(self.request.headers) + and manifest_media_type != MEDIA_TYPE.MANIFEST_V1 + ): + raise ManifestNotFound(reference=tag.name) + return self.redirect_to_content_app("manifests", tag.name) def issue_manifest_redirect(self, manifest): @@ -60,7 +68,8 @@ class S3StorageRedirects(CommonRedirects): def issue_tag_redirect(self, tag): """ - Issue a redirect or perform a schema conversion if an accepted media type requires it. + Issue a redirect if an accepted media type requires it or return not found if manifest + version is not supported. """ manifest_media_type = tag.tagged_manifest.media_type if manifest_media_type in get_accepted_media_types(self.request.headers): @@ -70,8 +79,7 @@ def issue_tag_redirect(self, tag): tag.name, tag.tagged_manifest, MEDIA_TYPE.MANIFEST_V1_SIGNED ) else: - # execute the schema conversion - return self.redirect_to_content_app("manifests", tag.name) + raise ManifestNotFound(reference=tag.name) def issue_manifest_redirect(self, manifest): """ diff --git a/pulp_container/app/registry.py b/pulp_container/app/registry.py index c93723ec2..dee5a33c1 100644 --- a/pulp_container/app/registry.py +++ b/pulp_container/app/registry.py @@ -24,7 +24,6 @@ from pulp_container.app.cache import RegistryContentCache from pulp_container.app.models import ContainerDistribution, Tag, Blob, Manifest, BlobManifest -from pulp_container.app.schema_convert import Schema2toSchema1ConverterWrapper from pulp_container.app.tasks import download_image_data from pulp_container.app.utils import ( calculate_digest, @@ -212,8 +211,8 @@ async def get_tag(self, request): } return await self.dispatch_tag(request, tag, response_headers) - # convert if necessary - return await Registry.dispatch_converted_schema(tag, accepted_media_types, path) + # return 404 in case the client is requesting docker manifest v2 schema 1 + raise PathNotResolved(tag_name) async def dispatch_tag(self, request, tag, response_headers): """ @@ -239,40 +238,6 @@ async def dispatch_tag(self, request, tag, response_headers): else: return await Registry._dispatch(artifact, response_headers) - @staticmethod - async def dispatch_converted_schema(tag, accepted_media_types, path): - """ - Convert a manifest from the format schema 2 to the format schema 1. - - The format is converted on-the-go and created resources are not stored for further uses. - The conversion is made after each request which does not accept the format for schema 2. - - Args: - tag: A tag object which contains reference to tagged manifests and config blobs. - accepted_media_types: Accepted media types declared in the accept header. - path: A path of a repository. - - Raises: - PathNotResolved: There was not found a valid conversion for the specified tag. - - Returns: - :class:`aiohttp.web.StreamResponse` or :class:`aiohttp.web.Response`: The response - streamed back to the client. - - """ - schema1_converter = Schema2toSchema1ConverterWrapper(tag, accepted_media_types, path) - try: - result = await sync_to_async(schema1_converter.convert)() - except RuntimeError: - raise PathNotResolved(tag.name) - - response_headers = { - "Docker-Content-Digest": result.digest, - "Content-Type": result.content_type, - "Docker-Distribution-API-Version": "registry/2.0", - } - return web.Response(text=result.text, headers=response_headers) - @RegistryContentCache( base_key=lambda req, cac: Registry.find_base_path_cached(req, cac), auth=lambda req, cac, bk: Registry.auth_cached(req, cac, bk), diff --git a/pulp_container/app/schema_convert.py b/pulp_container/app/schema_convert.py deleted file mode 100644 index f8adbf8b0..000000000 --- a/pulp_container/app/schema_convert.py +++ /dev/null @@ -1,284 +0,0 @@ -import datetime -import hashlib -import json -import logging - -from collections import namedtuple -from jwkest import jws, jwk, ecc - -from django.core.exceptions import ObjectDoesNotExist - -from pulp_container.constants import MEDIA_TYPE - -log = logging.getLogger(__name__) - -FS_Layer = namedtuple("FS_Layer", "layer_id uncompressed_digest history") -ConversionResult = namedtuple("ConversionResult", "text digest content_type") - - -class Schema2toSchema1ConverterWrapper: - """An abstraction around creating new manifests of the format schema 1.""" - - def __init__(self, tag, accepted_media_types, path): - """Store a tag object, accepted media type, and path.""" - self.path = path - self.tag = tag - self.accepted_media_types = accepted_media_types - self.name = path - - def convert(self): - """Convert a manifest to schema 1. - - Raises: - RuntimeError: If the conversion was not successful. - - Returns: - ConversionResult: A converted manifest, corresponding digest, and content type. - - """ - if self.tag.tagged_manifest.media_type == MEDIA_TYPE.MANIFEST_V2: - schema_with_signature, digest = self._convert_schema(self.tag.tagged_manifest) - return ConversionResult(schema_with_signature, digest, MEDIA_TYPE.MANIFEST_V1_SIGNED) - elif self.tag.tagged_manifest.media_type == MEDIA_TYPE.MANIFEST_LIST: - legacy = self._get_legacy_manifest() - if legacy.media_type in self.accepted_media_types: - # return legacy without conversion - legacy_schema = _jsonDumps(_get_manifest_dict(legacy)) - return ConversionResult(legacy_schema, legacy.digest, legacy.media_type) - elif legacy.media_type == MEDIA_TYPE.MANIFEST_V2: - schema_with_signature, digest = self._convert_schema(legacy) - return ConversionResult( - schema_with_signature, digest, MEDIA_TYPE.MANIFEST_V1_SIGNED - ) - else: - raise RuntimeError() - - def _convert_schema(self, manifest): - config_dict = _get_config_dict(manifest) - manifest_dict = _get_manifest_dict(manifest) - - try: - converter = Schema2toSchema1Converter( - manifest_dict, config_dict, name=self.name, tag=self.tag.name - ) - except ValueError: - raise RuntimeError() - - converted_schema, schema_with_signature = converter.convert() - - # According to the docs https://docs.docker.com/registry/spec/api/#content-digests, - # the digest header is deduced from the manifest body without the signature content. - # Therefore, the digest is computed from the formatted and converted manifest here. - digest = compute_digest(converted_schema) - return schema_with_signature, digest - - def _get_legacy_manifest(self): - ml = self.tag.tagged_manifest.listed_manifests.all() - for manifest in ml: - m = manifest.manifest_lists.first() - if m.architecture == "amd64" and m.os == "linux": - return m.manifest_list - - raise RuntimeError() - - -class Schema2toSchema1Converter: - """ - Converter class from schema 2 to schema 1. - - Initialize it with a manifest and a config layer JSON documents, - and call convert() to obtain the signed manifest, as a JSON-encoded string. - """ - - EMPTY_BLOB = "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - - def __init__(self, manifest, config_layer, name, tag): - """ - Initializer needs a manifest and a config layer as JSON documents. - """ - self.name = name - self.tag = tag - self.manifest = manifest - self.config_layer = config_layer - self.fs_layers = [] - self.history = [] - - self._assert_foreign_layers() - - def _assert_foreign_layers(self): - for layer in self.manifest["layers"]: - if layer.get("mediaType") == MEDIA_TYPE.FOREIGN_BLOB: - raise ValueError("The conversion cannot be applied for foreign layers.") - - def convert(self): - """ - Convert manifest from schema 2 to schema 1 - """ - self.compute_layers() - manifest = dict( - name=self.name, - tag=self.tag, - architecture=self.config_layer["architecture"], - schemaVersion=1, - fsLayers=self.fs_layers, - history=self.history, - ) - - key = jwk.ECKey().load_key(ecc.P256) - manifest_data = _jsonDumps(manifest) - signed_manifest_data = sign(manifest_data, key) - return manifest_data, signed_manifest_data - - def compute_layers(self): - """ - Compute layers to be present in the converted image. - Empty (throwaway) layers will be created to store image metadata - """ - # Layers in v2s1 are in reverse order from v2s2 - fs_layers = self._compute_fs_layers() - self.fs_layers = [dict(blobSum=x[0]) for x in fs_layers] - # Compute v1 compatibility - parent = None - history_entries = self.history = [] - - fs_layers_count = len(fs_layers) - # Reverse list so we can compute parent/child properly - fs_layers.reverse() - for i, fs_layer in enumerate(fs_layers): - layer_id = self._compute_layer_id(fs_layer.layer_id, fs_layer.uncompressed_digest, i) - config = self._compute_v1_compatibility_config( - layer_id, fs_layer, last_layer=(i == fs_layers_count - 1) - ) - if parent is not None: - config["parent"] = parent - parent = layer_id - history_entries.append(dict(v1Compatibility=_jsonDumpsCompact(config))) - # Reverse again for proper order - history_entries.reverse() - - def _compute_fs_layers(self): - """Utility function to return a list of FS_Layer objects""" - layers = reversed(self.manifest["layers"]) - config_layer_history = reversed(self.config_layer["history"]) - diff_ids = reversed(self.config_layer["rootfs"]["diff_ids"]) - fs_layers = [] - curr_compressed_dig = next(layers)["digest"] - curr_uncompressed_dig = next(diff_ids) - for curr_hist in config_layer_history: - if curr_hist.get("empty_layer"): - layer_id = self.EMPTY_BLOB - uncompressed_dig = None - else: - layer_id = curr_compressed_dig - uncompressed_dig = curr_uncompressed_dig - try: - curr_compressed_dig = next(layers)["digest"] - curr_uncompressed_dig = next(diff_ids) - except StopIteration: - curr_compressed_dig = self.EMPTY_BLOB - curr_uncompressed_dig = None - fs_layers.append(FS_Layer(layer_id, uncompressed_dig, curr_hist)) - return fs_layers - - def _compute_v1_compatibility_config(self, layer_id, fs_layer, last_layer=False): - """Utility function to compute the v1 compatibility""" - if last_layer: - # The whole config layer becomes part of the v1compatibility - # (minus history and rootfs) - config = dict(self.config_layer) - config.pop("history", None) - config.pop("rootfs", None) - else: - # both `created` and `created_by` are optional according to the OCI specs - container_config = dict(Cmd=[fs_layer.history.get("created_by", "")]) - created = fs_layer.history.get("created", "") - config = dict( - created=created, - container_config=container_config, - ) - if fs_layer.uncompressed_digest is None: - config["throwaway"] = True - config["id"] = layer_id - return config - - @classmethod - def _compute_layer_id(cls, compressed_dig, uncompressed_dig, layer_index): - """ - We need to make up an image ID for each layer. - We will digest: - * the compressed digest of the layer - * the uncompressed digest (if present; it will be missing for throw-away layers) - * the zero-padded integer of the layer number - The last one is added so we can get different image IDs for throw-away layers. - """ - dig = hashlib.sha256(compressed_dig.encode("ascii")) - if uncompressed_dig: - dig.update(uncompressed_dig.encode("ascii")) - layer_count = "%06d" % layer_index - dig.update(layer_count.encode("ascii")) - layer_id = dig.hexdigest() - return layer_id - - -def _jsonDumps(data): - return json.dumps(data, indent=3, sort_keys=True, separators=(",", ": ")) - - -def _jsonDumpsCompact(data): - return json.dumps(data, sort_keys=True, separators=(",", ":")) - - -def sign(data, key): - """ - Sign the JSON data with the passed key. - """ - now = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + "Z" - header = dict(alg="ES256", jwk=key.serialize()) - protected = dict(formatLength=len(data) - 2, formatTail=jws.b64encode_item(data[-2:]), time=now) - _jws = jws.JWS(data, **header) - protectedHeader, payload, signature = _jws.sign_compact([key], protected=protected).split(".") - signatures = [dict(header=header, signature=signature, protected=protectedHeader)] - jsig = _jsonDumps(dict(signatures=signatures))[1:-2] - arr = [data[:-2], ",", jsig, data[-2:]] - # Add the signature block at the end of the json string, keeping the formatting - data_with_signature = "".join(arr) - return data_with_signature - - -def compute_digest(manifest_data): - """ - Compute the digest from the passed manifest data. - """ - - hexdigest = hashlib.sha256(manifest_data.encode("utf-8")).hexdigest() - digest = "sha256:{}".format(hexdigest) - return digest - - -def _get_config_dict(manifest): - try: - config_artifact = manifest.config_blob._artifacts.get() - except ObjectDoesNotExist: - raise RuntimeError() - return _get_dict(config_artifact) - - -def _get_manifest_dict(manifest): - try: - manifest_artifact = manifest._artifacts.get() - except ObjectDoesNotExist: - raise RuntimeError() - return _get_dict(manifest_artifact) - - -def _get_dict(artifact): - try: - data = json.load(artifact.file) - artifact.file.close() - return data - except FileNotFoundError: - raise Exception( - "Expected manifest file 'sha256:{}' needed for schema conversion is not found".format( - artifact.sha256 - ) - ) diff --git a/pulp_container/tests/functional/api/test_content_cache.py b/pulp_container/tests/functional/api/test_content_cache.py index 7521581b9..ecd135964 100644 --- a/pulp_container/tests/functional/api/test_content_cache.py +++ b/pulp_container/tests/functional/api/test_content_cache.py @@ -26,6 +26,9 @@ get_auth_for_url, ) + +from pulp_container.constants import MEDIA_TYPE + STANDARD_FILE_STORAGE_FRAMEWORKS = [ "django.core.files.storage.FileSystemStorage", "pulpcore.app.models.storage.FileSystem", @@ -112,7 +115,10 @@ def test_04_multiple_distributions(self): def test_05_different_headers(self): """Simulate a scenario where a user queries manifests with different Accept headers.""" self.check_content(cache_status_found_func) - self.check_content(cache_status_first_func, headers={"Accept": "*/*"}) + self.check_content( + cache_status_first_func, + headers={"Accept": f"{MEDIA_TYPE.INDEX_OCI},{MEDIA_TYPE.MANIFEST_LIST}"}, + ) def test_06_invalidate_multiple_distributions(self): """Test if updating the repository referenced by multiple distributions invalidates all.""" diff --git a/pulp_container/tests/functional/api/test_pull_content.py b/pulp_container/tests/functional/api/test_pull_content.py index 2fd8faff2..0272cbb9f 100644 --- a/pulp_container/tests/functional/api/test_pull_content.py +++ b/pulp_container/tests/functional/api/test_pull_content.py @@ -1,7 +1,6 @@ """Tests that verify that images served by Pulp can be pulled.""" import contextlib import hashlib -import json import requests import unittest import uuid @@ -169,22 +168,8 @@ def test_api_performes_schema_conversion(self): content_response = requests.get( latest_image_url, auth=auth, headers={"Accept": MEDIA_TYPE.MANIFEST_V1} ) - content_response.raise_for_status() - base_content_type = content_response.headers["Content-Type"].split(";")[0] - self.assertIn(base_content_type, {MEDIA_TYPE.MANIFEST_V1, MEDIA_TYPE.MANIFEST_V1_SIGNED}) - - header_digest = content_response.headers["Docker-Content-Digest"] - converted_manifest = json.loads(content_response.content) - converted_manifest.pop("signatures") - manifest_string = json.dumps( - converted_manifest, indent=3, sort_keys=True, separators=(",", ": ") - ).encode("utf-8") - # the header digest should be equal to the SHA256 hash computed from - # a manifest without signatures - computed_digest = hashlib.sha256(manifest_string).hexdigest() - self.assertEqual( - computed_digest, header_digest.split(":")[1], "The manifest digests are not equal" - ) + with self.assertRaises(requests.exceptions.HTTPError): + content_response.raise_for_status() def test_create_empty_blob_on_the_fly(self): """ diff --git a/pulp_container/tests/unit/test_convert.py b/pulp_container/tests/unit/test_convert.py deleted file mode 100644 index fa4b4bdd3..000000000 --- a/pulp_container/tests/unit/test_convert.py +++ /dev/null @@ -1,175 +0,0 @@ -import base64 -import json - -from django.test import TestCase - -from jwkest import jws - -from pulp_container.app import schema_convert - - -class Test(TestCase): - """Schema2toSchema1Converter test class""" - - def test_convert(self): - """Test schema converter on a known manifest""" - cnv = schema_convert.Schema2toSchema1Converter( - MANIFEST, CONFIG_BLOB, "test-repo", "tes-tag" - ) - converted_mf, signed_mf = cnv.convert() - compare_manifests(converted_mf, signed_mf) - validate_signature(signed_mf) - - empty = dict(blobSum=cnv.EMPTY_BLOB) - assert [ - dict(blobSum="sha256:layer1"), - empty, - empty, - empty, - empty, - dict(blobSum="sha256:base"), - ] == cnv.fs_layers - - def test_manifest_with_foreign_layers_conversion(self): - """Test if the conversion of a manifest with foreign layers fails gracefully""" - try: - schema_convert.Schema2toSchema1Converter( - MANIFEST_WITH_FOREIGN_BLOBS, CONFIG_BLOB, "test-repo", "tes-tag" - ) - except ValueError: - pass - else: - assert False - - def test_compute_layers(self): - """Test that computing the layers produces the expected data""" - cnv = schema_convert.Schema2toSchema1Converter( - MANIFEST, CONFIG_BLOB, "test-repo", "tes-tag" - ) - cnv.compute_layers() - empty = dict(blobSum=cnv.EMPTY_BLOB) - assert [ - dict(blobSum="sha256:layer1"), - empty, - empty, - empty, - empty, - dict(blobSum="sha256:base"), - ] == cnv.fs_layers - assert [ - { - "v1Compatibility": '{"architecture":"amd64","author":"Mihai Ibanescu ","config":{"Cmd":["/bin/bash"],"Hostname":"decafbad"},"container_config":{"Hostname":"decafbad","Tty":false},"created":"2019-09-05T21:28:52.173079282Z","docker_version":"1.13.1","id":"8e1cc996a0d319582f770dbded72777c5b8c5c46859c506db5acc674bc42ee51","parent":"3d0d31cc2270f872e56b7b7e35fb4fb7796797a8979ae180a2baee7107a5eb5b"}' # noqa - }, - { - "v1Compatibility": '{"container_config":{"Cmd":[""]},"created":"2019-09-05T21:28:46.173079282Z","id":"3d0d31cc2270f872e56b7b7e35fb4fb7796797a8979ae180a2baee7107a5eb5b","parent":"6474547c15d178825c70a42efdc59a88c6e30d764d184b415f32484562803446","throwaway":true}' # noqa - }, - { - "v1Compatibility": '{"container_config":{"Cmd":["/bin/sh -c #(nop) MAINTAINER Mihai Ibanescu "]},"created":"2019-09-05T21:28:43.305854958Z","id":"6474547c15d178825c70a42efdc59a88c6e30d764d184b415f32484562803446","parent":"5708420291e0a86d8dc08ec40b2c1b1799117c33fe85032b87227632f70c1018","throwaway":true}' # noqa - }, - { - "v1Compatibility": '{"container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\\"/bin/bash\\"]"]},"created":"2018-03-06T00:48:12.679169547Z","id":"5708420291e0a86d8dc08ec40b2c1b1799117c33fe85032b87227632f70c1018","parent":"9e9220abceaf86f2ad7820ae8124d01223d8ec022b9a6cb8c99a8ae1747137ea","throwaway":true}' # noqa - }, - { - "v1Compatibility": '{"container_config":{"Cmd":["/bin/sh -c #(nop) LABEL name=CentOS Base Image vendor=CentOS license=GPLv2 build-date=20180302"]},"created":"2018-03-06T00:48:12.458578213Z","id":"9e9220abceaf86f2ad7820ae8124d01223d8ec022b9a6cb8c99a8ae1747137ea","parent":"cb48c1db9c0a1ede7c85c85351856fc3e40e750931295c8fac837c63b403586a","throwaway":true}' # noqa - }, - { - "v1Compatibility": '{"container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:FILE_CHECKSUM in / "]},"created":"2018-03-06T00:48:12.077095981Z","id":"cb48c1db9c0a1ede7c85c85351856fc3e40e750931295c8fac837c63b403586a"}' # noqa - }, - ] == cnv.history - - -def compare_manifests(converted_mf, signed_mf): - """ - Compare the manifests without signatures. - """ - converted_mf_json = json.loads(converted_mf) - signed_mf_json = json.loads(signed_mf) - - signed_mf_json.pop("signatures") - - assert converted_mf_json == signed_mf_json - - -def validate_signature(signed_mf): - """ - Validate the signature of a signed manifest - - A signed manifest is a JSON document with a signature attribute - as the last element. - """ - # In order to validate the signature, we need the exact original payload - # (the document without the signature). We cannot json.load the document - # and get rid of the signature, the payload would likely end up - # differently because of differences in field ordering and indentation. - # So we need to strip the signature using plain string manipulation, and - # add back a trailing } - - # strip the signature block - payload, sep, signatures = signed_mf.partition(' "signatures"') - # get rid of the trailing ,\n, and add \n} - jw_payload = payload[:-2] + "\n}" - # base64-encode and remove any trailing = - jw_payload = base64.urlsafe_b64encode(jw_payload.encode("ascii")).decode("ascii").rstrip("=") - # add payload as a json attribute, and then add the signatures back - complete_msg = payload + ' "payload": "{}",\n'.format(jw_payload) + sep + signatures - _jws = jws.JWS() - _jws.verify_json(complete_msg.encode("ascii")) - - -MANIFEST = dict(schemaVersion=2, layers=[dict(digest="sha256:base"), dict(digest="sha256:layer1")]) - -MANIFEST_WITH_FOREIGN_BLOBS = dict( - schemaVersion=2, - layers=[ - dict( - digest="sha256:base", - mediaType="application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", - ), - dict(digest="sha256:layer1"), - ], -) - -CONFIG_BLOB = dict( - architecture="amd64", - author="Mihai Ibanescu ", - config=dict(Hostname="decafbad", Cmd=["/bin/bash"]), - container_config=dict(Hostname="decafbad", Tty=False), - created="2019-09-05T21:28:52.173079282Z", - docker_version="1.13.1", - history=[ - { - "created": "2018-03-06T00:48:12.077095981Z", - "created_by": "/bin/sh -c #(nop) ADD file:FILE_CHECKSUM in / ", - }, - { - "created": "2018-03-06T00:48:12.458578213Z", - "created_by": "/bin/sh -c #(nop) LABEL name=CentOS Base Image vendor=CentOS " - "license=GPLv2 build-date=20180302", - "empty_layer": True, - }, - { - "created": "2018-03-06T00:48:12.679169547Z", - "created_by": '/bin/sh -c #(nop) CMD ["/bin/bash"]', - "empty_layer": True, - }, - { - "created": "2019-09-05T21:28:43.305854958Z", - "author": "Mihai Ibanescu ", - "created_by": "/bin/sh -c #(nop) MAINTAINER Mihai Ibanescu ", - "empty_layer": True, - }, - { - "created": "2019-09-05T21:28:46.173079282Z", - "empty_layer": True, - }, - { - "created": "2019-09-05T21:28:52.173079282Z", - "author": "Mihai Ibanescu ", - "created_by": "/bin/sh -c touch /usr/share/dummy.txt", - }, - ], - rootfs={ - "type": "layers", - "diff_ids": ["sha256:uncompressed_base", "sha256:uncompressed_layer1"], - }, -) diff --git a/requirements.txt b/requirements.txt index 4e74d379e..e13b435f3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ jsonschema>=4.4,<4.22 pulpcore>=3.43.0,<3.55 -pyjwkest>=1.4,<=1.4.2 pyjwt[crypto]>=2.4,<2.9 diff --git a/staging_docs/user/tutorials/01-sync-and-host.md b/staging_docs/user/tutorials/01-sync-and-host.md index fdc8027c2..9339349c9 100644 --- a/staging_docs/user/tutorials/01-sync-and-host.md +++ b/staging_docs/user/tutorials/01-sync-and-host.md @@ -223,7 +223,5 @@ More info: !!! note When using a container client that cannot handle requested manifests in the new format - (schema 2), the manifests are rewritten into the old format (schema 1) on-the-fly by Pulp. - In general, the automatic conversion cannot be performed when the content is not available - in the storage. Therefore, it may be successful only if the content was previously synced - with the `immediate` policy. + (schema 2), the manifests will **not** be rewritten into the old format (schema 1) and Pulp will + raise a 404 (HTTPNOTFOUND) error.