diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 269d36d2..da78b41e 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -49,5 +49,3 @@ jobs: with: branch: gh-pages folder: site - -# Made with Bob diff --git a/.secrets.baseline b/.secrets.baseline index 9324636a..e0564e09 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2025-12-25T19:13:06Z", + "generated_at": "2026-01-14T11:35:47Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -143,6 +143,16 @@ "verified_result": null } ], + "test/src/test_backup.py": [ + { + "hashed_secret": "4dfd3a58b4820476afe7efa2e2c52b267eec876a", + "is_secret": false, + "is_verified": false, + "line_number": 753, + "type": "Secret Keyword", + "verified_result": null + } + ], "test/src/test_db2.py": [ { "hashed_secret": "a4b48a81cdab1e1a5dd37907d6c85ca1c61ddc7c", diff --git a/mkdocs.yml b/mkdocs.yml index adec8bab..cb5a0d82 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -112,5 +112,3 @@ extra: social: - icon: fontawesome/brands/github link: https://github.com/ibm-mas/python-devops - -# Made with Bob diff --git a/src/mas/devops/backup.py b/src/mas/devops/backup.py new file mode 100644 index 00000000..172f26e4 --- /dev/null +++ b/src/mas/devops/backup.py @@ -0,0 +1,413 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** +import logging +import os +import yaml +from openshift.dynamic import DynamicClient +from openshift.dynamic.exceptions import NotFoundError +import boto3 +from botocore.exceptions import ClientError, NoCredentialsError + +logger = logging.getLogger(name=__name__) + + +def createBackupDirectories(paths: list) -> bool: + """ + Create backup directories if they do not exist + """ + try: + for path in paths: + os.makedirs(path, exist_ok=True) + logger.info(msg=f"Created backup directory: {path}") + return True + except Exception as e: + logger.error(msg=f"Error creating backup directories: {e}") + return False + + +def copyContentsToYamlFile(file_path: str, content: dict) -> bool: + """ + Write dictionary content to a YAML file + """ + try: + # Create a custom dumper that uses literal style for multi-line strings + class LiteralDumper(yaml.SafeDumper): + pass + + def str_representer(dumper, data): + if '\n' in data: + return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') + return dumper.represent_scalar('tag:yaml.org,2002:str', data) + + LiteralDumper.add_representer(str, str_representer) + + with open(file_path, 'w') as yaml_file: + yaml.dump(content, yaml_file, default_flow_style=False, Dumper=LiteralDumper) + return True + except Exception as e: + logger.error(f"Error writing to YAML file {file_path}: {e}") + return False + + +def filterResourceData(data: dict) -> dict: + """ + Filter metadata from Resource data and create minimal dict + """ + metadata_fields_to_remove = [ + 'annotations', + 'creationTimestamp', + 'generation', + 'resourceVersion', + 'selfLink', + 'ownerReferences', + 'uid', + 'managedFields' + ] + filteredCopy = data.copy() + if 'metadata' in filteredCopy: + for field in metadata_fields_to_remove: + if field in filteredCopy['metadata']: + del filteredCopy['metadata'][field] + + if 'status' in filteredCopy: + del filteredCopy['status'] + + # Remove labels with uid + # this will cause problem when restoring the backup + if 'metadata' in filteredCopy and 'labels' in filteredCopy['metadata']: + for key in list(filteredCopy['metadata']['labels'].keys()): + if "uid" in key.lower(): + filteredCopy['metadata']['labels'].pop(key) + + return filteredCopy + + +def extract_secrets_from_dict(data, secret_names=None): + """ + Recursively extract secret names from a dictionary structure. + Looks for keys like 'secretName' and 'secretRef.name' and collects their values. + + Args: + data: Dictionary to search + secret_names: Set to collect secret names (created if None) + + Returns: + Set of secret names found + """ + if secret_names is None: + secret_names = set() + + if isinstance(data, dict): + for key, value in data.items(): + # Check if this key is 'secretName' and has a string value + if (key == 'secretName' or 'secretname' in key.lower()) and isinstance(value, str) and value: + secret_names.add(value) + # Check if this key contains 'secretRef' and contains a 'name' field + elif 'SecretRef' in key and isinstance(value, dict): + if 'name' in value and isinstance(value['name'], str) and value['name']: + secret_names.add(value['name']) + # Recursively search nested structures + elif isinstance(value, (dict, list)): + extract_secrets_from_dict(value, secret_names) + + elif isinstance(data, list): + for item in data: + if isinstance(item, (dict, list)): + extract_secrets_from_dict(item, secret_names) + + return secret_names + + +def backupResources(dynClient: DynamicClient, kind: str, api_version: str, backup_path: str, namespace=None, name=None, labels=None) -> tuple: + """ + Backup resources of a given kind. + If name is provided, backs up that specific resource. + If name is None, backs up all resources of that kind. + If namespace is None, backs up cluster-level resources. + If labels is provided, filters resources by label selectors. + + Args: + dynClient: Kubernetes dynamic client + kind: Resource kind (e.g., 'MongoCfg', 'Secret', 'ClusterRole') + api_version: API version (e.g., 'config.mas.ibm.com/v1') + backup_path: Path to save backup files + namespace: Optional namespace to backup from (None for cluster-level resources) + name: Optional specific resource name + labels: Optional list of label selectors (e.g., ['app=myapp', 'env=prod']) + + Returns: + tuple: (backed_up_count: int, not_found_count: int, failed_count: int, discovered_secrets: set) + """ + discovered_secrets = set() + backed_up_count = 0 + not_found_count = 0 + failed_count = 0 + + # Build label selector string if labels provided + label_selector = None + if labels: + label_selector = ','.join(labels) + + # Determine scope description for logging + scope_desc = f"namespace '{namespace}'" if namespace else "cluster-level" + label_desc = f" with labels [{label_selector}]" if label_selector else "" + + try: + resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) + + if name: + # Backup specific named resource + logger.info(f"Backing up {kind} '{name}' from {scope_desc} (API version: {api_version}){label_desc}") + try: + if namespace: + resource = resourceAPI.get(name=name, namespace=namespace) + else: + resource = resourceAPI.get(name=name) + + if resource: + resources_to_process = [resource] + else: + logger.info(f"{kind} '{name}' not found in {scope_desc}, skipping backup") + not_found_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + except NotFoundError: + logger.error(f"{kind} '{name}' not found in {scope_desc}, skipping backup") + not_found_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + else: + # Backup all resources of this kind + logger.info(f"Backing up all {kind} resources from {scope_desc} (API version: {api_version}){label_desc}") + + # Build get parameters + get_params = {} + if namespace: + get_params['namespace'] = namespace + if label_selector: + get_params['label_selector'] = label_selector + + resources = resourceAPI.get(**get_params) + resources_to_process = resources.items + + # Process each resource + for resource in resources_to_process: + resource_name = resource["metadata"]["name"] + resource_dict = resource.to_dict() + + # Extract secrets from this resource if it's not a Secret itself + if kind != 'Secret': + secrets = extract_secrets_from_dict(resource_dict.get('spec', {})) + if secrets: + logger.info(f"Found {len(secrets)} secret reference(s) in {kind} '{resource_name}': {', '.join(sorted(secrets))}") + discovered_secrets.update(secrets) + + # Backup the resource + resource_backup_path = f"{backup_path}/resources/{kind.lower()}s" + createBackupDirectories([resource_backup_path]) + resource_file_path = f"{resource_backup_path}/{resource_name}.yaml" + filtered_resource = filterResourceData(resource_dict) + if copyContentsToYamlFile(resource_file_path, filtered_resource): + logger.info(f"Successfully backed up {kind} '{resource_name}' to '{resource_file_path}'") + backed_up_count += 1 + else: + logger.error(f"Failed to back up {kind} '{resource_name}' to '{resource_file_path}'") + failed_count += 1 + + if backed_up_count > 0: + logger.info(f"Successfully backed up {backed_up_count} {kind} resource(s)") + elif not name: + logger.info(f"No {kind} resources found in {scope_desc}{label_desc}") + + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + + except NotFoundError: + if name: + logger.info(f"{kind} '{name}' not found in {scope_desc}") + not_found_count = 1 + else: + logger.info(f"No {kind} resources found in {scope_desc}{label_desc}") + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + except Exception as e: + logger.error(f"Error backing up {kind} resources: {e}") + failed_count = 1 + return (backed_up_count, not_found_count, failed_count, discovered_secrets) + + +def uploadToS3( + file_path: str, + bucket_name: str, + object_name=None, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + region_name=None +) -> bool: + """ + Upload a tar.gz file to S3-compatible storage. + + Args: + file_path: Path to the tar.gz file to upload + bucket_name: Name of the S3 bucket + object_name: S3 object name. If not specified, file_path basename is used + endpoint_url: S3-compatible endpoint URL (e.g., for MinIO, Ceph) + aws_access_key_id: AWS access key ID (if not using environment variables) + aws_secret_access_key: AWS secret access key (if not using environment variables) + region_name: AWS region name (default: us-east-1) + + Returns: + bool: True if file was uploaded successfully, False otherwise + """ + # If S3 object_name was not specified, use file_path basename + if object_name is None: + object_name = os.path.basename(file_path) + + # Validate file exists and is a tar.gz file + if not os.path.exists(file_path): + logger.error(f"File not found: {file_path}") + return False + + if not file_path.endswith('.tar.gz'): + logger.warning(f"File does not have .tar.gz extension: {file_path}") + + # Configure S3 client + try: + s3_config = {} + + if endpoint_url: + s3_config['endpoint_url'] = endpoint_url + if aws_access_key_id and aws_secret_access_key: + s3_config['aws_access_key_id'] = aws_access_key_id + s3_config['aws_secret_access_key'] = aws_secret_access_key + if region_name: + s3_config['region_name'] = region_name + else: + s3_config['region_name'] = 'us-east-1' + + s3_client = boto3.client('s3', **s3_config) + + # Upload the file + logger.info(f"Uploading {file_path} to s3://{bucket_name}/{object_name}") + + file_size = os.path.getsize(file_path) + logger.info(f"File size: {file_size / (1024 * 1024):.2f} MB") + + s3_client.upload_file(file_path, bucket_name, object_name) + + logger.info(f"Successfully uploaded {file_path} to s3://{bucket_name}/{object_name}") + return True + + except FileNotFoundError: + logger.error(f"File not found: {file_path}") + return False + except NoCredentialsError: + logger.error("AWS credentials not found. Please provide credentials or configure environment variables.") + return False + except ClientError as e: + error_code = e.response.get('Error', {}).get('Code', 'Unknown') + error_message = e.response.get('Error', {}).get('Message', str(e)) + logger.error(f"S3 client error ({error_code}): {error_message}") + return False + except Exception as e: + logger.error(f"Unexpected error uploading to S3: {e}") + return False + + +def downloadFromS3( + bucket_name: str, + object_name: str, + local_dir: str, + endpoint_url=None, + aws_access_key_id=None, + aws_secret_access_key=None, + region_name=None +) -> bool: + """ + Download a tar.gz file from S3-compatible storage to a backup directory. + + Args: + bucket_name: Name of the S3 bucket + object_name: S3 object name to download + local_dir: Directory path where the file will be downloaded + endpoint_url: S3-compatible endpoint URL (e.g., for MinIO, Ceph) + aws_access_key_id: AWS access key ID (if not using environment variables) + aws_secret_access_key: AWS secret access key (if not using environment variables) + region_name: AWS region name (default: us-east-1) + + Returns: + bool: True if file was downloaded successfully, False otherwise + """ + # Validate backup directory + if not os.path.exists(local_dir): + logger.info(f"Backup directory does not exist, creating: {local_dir}") + try: + os.makedirs(local_dir, exist_ok=True) + except Exception as e: + logger.error(f"Failed to create backup directory {local_dir}: {e}") + return False + + # Construct the full file path + file_path = os.path.join(local_dir, object_name) + + # Warn if file doesn't have .tar.gz extension + if not object_name.endswith('.tar.gz'): + logger.warning(f"Object does not have .tar.gz extension: {object_name}") + + # Configure S3 client + try: + s3_config = {} + + if endpoint_url: + s3_config['endpoint_url'] = endpoint_url + if aws_access_key_id and aws_secret_access_key: + s3_config['aws_access_key_id'] = aws_access_key_id + s3_config['aws_secret_access_key'] = aws_secret_access_key + if region_name: + s3_config['region_name'] = region_name + else: + s3_config['region_name'] = 'us-east-1' + + s3_client = boto3.client('s3', **s3_config) + + # Check if object exists and get its size + logger.info(f"Downloading s3://{bucket_name}/{object_name} to {file_path}") + + try: + response = s3_client.head_object(Bucket=bucket_name, Key=object_name) + file_size = response.get('ContentLength', 0) + logger.info(f"Object size: {file_size / (1024 * 1024):.2f} MB") + except ClientError as e: + if e.response.get('Error', {}).get('Code') == '404': + logger.error(f"Object not found in S3: s3://{bucket_name}/{object_name}") + return False + raise + + # Download the file + s3_client.download_file(bucket_name, object_name, file_path) + + # Verify the downloaded file exists + if os.path.exists(file_path): + downloaded_size = os.path.getsize(file_path) + logger.info(f"Successfully downloaded {object_name} to {file_path}") + logger.info(f"Downloaded file size: {downloaded_size / (1024 * 1024):.2f} MB") + return True + else: + logger.error(f"Download completed but file not found at {file_path}") + return False + + except NoCredentialsError: + logger.error("AWS credentials not found. Please provide credentials or configure environment variables.") + return False + except ClientError as e: + error_code = e.response.get('Error', {}).get('Code', 'Unknown') + error_message = e.response.get('Error', {}).get('Message', str(e)) + logger.error(f"S3 client error ({error_code}): {error_message}") + return False + except Exception as e: + logger.error(f"Unexpected error downloading from S3: {e}") + return False diff --git a/src/mas/devops/data/catalogs/v9-260226-amd64.yaml b/src/mas/devops/data/catalogs/v9-260226-amd64.yaml index 1b6e5300..0934090d 100644 --- a/src/mas/devops/data/catalogs/v9-260226-amd64.yaml +++ b/src/mas/devops/data/catalogs/v9-260226-amd64.yaml @@ -170,4 +170,6 @@ editorial: - IBM Data Dictionary v1.1 known_issues: - title: Customers using **Maximo Assist v8.7 or v8.8** should not update and must instead contact IBM Support for guidance regarding the removal of IBM Watson Discovery and upgrading to Maximo Assist v9.0 - - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. + - title: A known issue has been identified in Db2u warehouse operator in this catalog. Customers restoring/upgrading Db2, intermittently get db2 `SQL0290N Table space access is not allowed. SQLSTATE=55039` error causing connectivity issue between MAS and Db2. If you are facing this problem, please refer to the workaround provided in this [documentation](https://www.ibm.com/docs/en/cloud-paks/cp-data/5.3.x?topic=SSQNUZ_5.3.x/svc-db2/known-issues-dbs.htm#known-issues-dbs__db2-instance-fails-sql0290n__title__1). + - title: A known issue exists in the February 26, 2026 release affecting IBM Maximo Real Estate and Facilities. Customers with MREF installed should avoid upgrading to the February 9.1.8 release. Installation of MREF 9.1.x should be deferred until the March 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260305-amd64.yaml b/src/mas/devops/data/catalogs/v9-260305-amd64.yaml new file mode 100644 index 00000000..44d16db3 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260305-amd64.yaml @@ -0,0 +1,161 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260305 (AMD64) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:192fdb38b135b828916fba8d77c6bee10e6c4f33b4cfd252576ca120e442784b + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +# Dependencies +# ----------------------------------------------------------------------------- +ibm_licensing_version: 4.2.17 # Operator version 4.2.14 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-licensing) +common_svcs_version: 4.13.0 # Operator version 4.13.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-common-services) +common_svcs_version_1: 4.11.0 # Additional version 4.11.0 + +cp4d_platform_version: 5.2.0+20250709.170324 # Operator version 5.2.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-datacore/) +ibm_zen_version: 6.2.0+20250530.152516.232 # For CPD5 ibm-zen has to be explicitily mirrored + +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) +db2_channel_default: v110509.0 # Default Channel version for db2u-operator +events_version: 5.0.1 # Operator version 5.0.1 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-events-operator) +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.5 # No Update # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.4 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +dd_version: 1.1.22 # Operator version 1.1.22 (https://github.ibm.com/maximoappsuite/ibm-data-dictionary/releases) +appconnect_version: 6.2.0 # Operator version 6.2.0 # sticking to 6.2.0 version # Please do Not Change +wsl_version: 11.0.0+20250521.202913.73 # used for wsl and wsl_runtimes unless wsl_runtimes_version also specified +wsl_runtimes_version: 11.0.0+20250515.090949.21 # cpd 5.1.3 uses version 10.3.0 of wsl runtimes but only 10.2.0 for wsl itself +wml_version: 11.0.0+20250530.193146.282 # Operator version 5.2.0 +postgress_version: 5.16.0+20250827.110911.2626 # ibm-cpd-cloud-native-postgresql-operator 5.2.0 cp4d + +ccs_build: 11.0.0+20250605.130237.468 # cpd 5.2.0 using ccs build +# datarefinery_build: +20240517.202103.146 + +spark_version: 11.0.0+20250604.163055.2097 # Operator version 5.2.0 +cognos_version: 28.0.0+20250515.175459.10054 # Operator version 25.0.0 +couchdb_version: 1.0.13 # Operator version 2.2.1 (1.0.13) sticking with 1.0.13 # (This is required for Assist 9.0, https://github.com/IBM/cloud-pak/blob/master/repo/case/ibm-couchdb/index.yaml) +elasticsearch_version: 1.1.2667 # Operator version 1.1.2667 # used in cpd 5.1.3 only +opensearch_version: 1.1.2494 # Operator version 1.1.2494 + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.10 # No Update + 9.0.x: 9.0.22 # Updated + 8.10.x: 8.10.35 # No Update + 8.11.x: 8.11.32 # No Update +mas_assist_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.14 # No Update + 8.10.x: 8.7.8 # No Update + 8.11.x: 8.8.7 # No Update +mas_hputilities_version: + 9.1.x: "" # Not Supported + 9.0.x: "" # Not Supported + 8.10.x: 8.6.7 # No Update + 8.11.x: "" # Not Supported +mas_iot_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.17 # No Update + 8.10.x: 8.7.31 # No Update + 8.11.x: 8.8.28 # No Update +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.11 # No Update + 9.0.x: 9.0.23 # No Update + 8.10.x: 8.6.36 # No Update + 8.11.x: 8.7.30 # No Update +mas_monitor_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.18 # No Update + 8.10.x: 8.10.28 # No Update + 8.11.x: 8.11.26 # No Update +mas_optimizer_version: + 9.2.x-feature: 9.2.0-pre.stable_12739 # No Update + 9.1.x: 9.1.9 # No Update + 9.0.x: 9.0.20 # No Update + 8.10.x: 8.4.26 # Need to update + 8.11.x: 8.5.26 # No Update +mas_predict_version: + 9.1.x: 9.1.5 # No Update + 9.0.x: 9.0.12 # No Update + 8.10.x: 8.8.13 # No Update + 8.11.x: 8.9.15 # No Update +mas_visualinspection_version: + 9.2.x-feature: 9.2.0-pre.stable_12598 # No Update + 9.1.x: 9.1.10 # No Update + 9.0.x: 9.0.17 # No Update + 8.10.x: 8.8.4 # No Update + 8.11.x: 8.9.20 # No Update +mas_facilities_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: "" # Not Supported + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + + +# Maximo AI Service +# ------------------------------------------------------------------------------ +aiservice_version: + 9.2.x-feature: 9.2.0-pre.stable_12908 # No Update + 9.1.x: 9.1.12 # No Update + + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.23 +mongo_extras_version_8: 8.0.17 + +# Extra Images for Db2u +# ------------------------------------------------------------------------------ +db2u_extras_version: 1.0.6 # No Update +db2u_filter: db2 + +# Extra Images for CCS used for PCD 5.2.0 Hotfix +# ------------------------------------------------------------------------------ +ccs_extras_version: 11.0.0 + +# Extra Images for IBM Watson Discovery +# ------------------------------------------------------------------------------ +#wd_extras_version: 1.0.4 + +# Extra Images for Amlen +# ------------------------------------------------------------------------------ +amlen_extras_version: 1.1.3 + +# Default Cloud Pak for Data version +# ------------------------------------------------------------------------------ +cpd_product_version_default: 5.2.0 + +manage_extras_913: 9.1.3 +minio_version: RELEASE.2025-06-13T11-33-47Z + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.0.22 + - IBM Truststore Manager v1.7 + known_issues: + - title: Customers using **Maximo Assist v8.7 or v8.8** should not update and must instead contact IBM Support for guidance regarding the removal of IBM Watson Discovery and upgrading to Maximo Assist v9.0 + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. + - title: A known issue has been identified in Db2u warehouse operator in this catalog. Customers restoring/upgrading Db2, intermittently get db2 `SQL0290N Table space access is not allowed. SQLSTATE=55039` error causing connectivity issue between MAS and Db2. If you are facing this problem, please refer to the workaround provided in this [documentation](https://www.ibm.com/docs/en/cloud-paks/cp-data/5.3.x?topic=SSQNUZ_5.3.x/svc-db2/known-issues-dbs.htm#known-issues-dbs__db2-instance-fails-sql0290n__title__1). \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260305-ppc64le.yaml b/src/mas/devops/data/catalogs/v9-260305-ppc64le.yaml new file mode 100644 index 00000000..f5db6105 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260305-ppc64le.yaml @@ -0,0 +1,59 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260305 (PPC) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:5defbc1701537d0f5dcb60de1cf43fe687dde1d0d10074b21e82fbc222aa81de + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.5 # No Update # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.4 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.10 # No Update + 9.0.x: 9.0.22 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.11 # No Update + 9.0.x: 9.0.23 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.0.22 + - IBM Truststore Manager v1.7 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260305-s390x.yaml b/src/mas/devops/data/catalogs/v9-260305-s390x.yaml new file mode 100644 index 00000000..f7cb12ee --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260305-s390x.yaml @@ -0,0 +1,59 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260305 (Z) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:9d26c342e18eabc90b2c0a4e6cd0fde33b2570ba0a562b9d2e172dda5e4daa37 + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.5 # No Update # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.4 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.10 # No Update + 9.0.x: 9.0.22 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.11 # No Update + 9.0.x: 9.0.23 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.0.22 + - IBM Truststore Manager v1.7 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260313-amd64.yaml b/src/mas/devops/data/catalogs/v9-260313-amd64.yaml new file mode 100644 index 00000000..f8dd0564 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260313-amd64.yaml @@ -0,0 +1,165 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260313 (AMD64) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:66da7dea7160091ed39a96862cd0046df20c2eeaeb262778dcd5d37b11a3f26b + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +# Dependencies +# ----------------------------------------------------------------------------- +ibm_licensing_version: 4.2.17 # Operator version 4.2.14 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-licensing) +common_svcs_version: 4.13.0 # Operator version 4.13.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-common-services) +common_svcs_version_1: 4.11.0 # Additional version 4.11.0 + +cp4d_platform_version: 5.2.0+20250709.170324 # Operator version 5.2.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-datacore/) +ibm_zen_version: 6.2.0+20250530.152516.232 # For CPD5 ibm-zen has to be explicitily mirrored + +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) +db2_channel_default: v110509.0 # Default Channel version for db2u-operator +events_version: 5.0.1 # Operator version 5.0.1 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-events-operator) +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +dd_version: 1.1.22 # Operator version 1.1.22 (https://github.ibm.com/maximoappsuite/ibm-data-dictionary/releases) +appconnect_version: 6.2.0 # Operator version 6.2.0 # sticking to 6.2.0 version # Please do Not Change +wsl_version: 11.0.0+20250521.202913.73 # used for wsl and wsl_runtimes unless wsl_runtimes_version also specified +wsl_runtimes_version: 11.0.0+20250515.090949.21 # cpd 5.1.3 uses version 10.3.0 of wsl runtimes but only 10.2.0 for wsl itself +wml_version: 11.0.0+20250530.193146.282 # Operator version 5.2.0 +postgress_version: 5.16.0+20250827.110911.2626 # ibm-cpd-cloud-native-postgresql-operator 5.2.0 cp4d + +ccs_build: 11.0.0+20250605.130237.468 # cpd 5.2.0 using ccs build +# datarefinery_build: +20240517.202103.146 + +spark_version: 11.0.0+20250604.163055.2097 # Operator version 5.2.0 +cognos_version: 28.0.0+20250515.175459.10054 # Operator version 25.0.0 +couchdb_version: 1.0.13 # Operator version 2.2.1 (1.0.13) sticking with 1.0.13 # (This is required for Assist 9.0, https://github.com/IBM/cloud-pak/blob/master/repo/case/ibm-couchdb/index.yaml) +elasticsearch_version: 1.1.2667 # Operator version 1.1.2667 # used in cpd 5.1.3 only +opensearch_version: 1.1.2494 # Operator version 1.1.2494 + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.11 # Updated + 9.0.x: 9.0.22 # No Update + 8.10.x: 8.10.35 # No Update + 8.11.x: 8.11.32 # No Update +mas_assist_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.14 # No Update + 8.10.x: 8.7.8 # No Update + 8.11.x: 8.8.7 # No Update +mas_hputilities_version: + 9.1.x: "" # Not Supported + 9.0.x: "" # Not Supported + 8.10.x: 8.6.7 # No Update + 8.11.x: "" # Not Supported +mas_iot_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.17 # No Update + 8.10.x: 8.7.31 # No Update + 8.11.x: 8.8.28 # No Update +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.12 # Updated + 9.0.x: 9.0.23 # No Update + 8.10.x: 8.6.36 # No Update + 8.11.x: 8.7.30 # No Update +mas_monitor_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.18 # No Update + 8.10.x: 8.10.28 # No Update + 8.11.x: 8.11.26 # No Update +mas_optimizer_version: + 9.2.x-feature: 9.2.0-pre.stable_12739 # No Update + 9.1.x: 9.1.9 # No Update + 9.0.x: 9.0.20 # No Update + 8.10.x: 8.4.26 # No Update + 8.11.x: 8.5.26 # No Update +mas_predict_version: + 9.1.x: 9.1.5 # No Update + 9.0.x: 9.0.12 # No Update + 8.10.x: 8.8.13 # No Update + 8.11.x: 8.9.15 # No Update +mas_visualinspection_version: + 9.2.x-feature: 9.2.0-pre.stable_12598 # No Update + 9.1.x: 9.1.10 # No Update + 9.0.x: 9.0.17 # No Update + 8.10.x: 8.8.4 # No Update + 8.11.x: 8.9.20 # No Update +mas_facilities_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: "" # Not Supported + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + + +# Maximo AI Service +# ------------------------------------------------------------------------------ +aiservice_version: + 9.2.x-feature: 9.2.0-pre.stable_12908 # No Update + 9.1.x: 9.1.12 # No Update + + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.23 +mongo_extras_version_8: 8.0.17 + +# Extra Images for Db2u +# ------------------------------------------------------------------------------ +db2u_extras_version: 1.0.6 # No Update +db2u_filter: db2 + +# Extra Images for CCS used for PCD 5.2.0 Hotfix +# ------------------------------------------------------------------------------ +ccs_extras_version: 11.0.0 + +# Extra Images for IBM Watson Discovery +# ------------------------------------------------------------------------------ +#wd_extras_version: 1.0.4 + +# Extra Images for Amlen +# ------------------------------------------------------------------------------ +amlen_extras_version: 1.1.4 + +# Default Cloud Pak for Data version +# ------------------------------------------------------------------------------ +cpd_product_version_default: 5.2.0 + +manage_extras_913: 9.1.3 +minio_version: RELEASE.2025-06-13T11-33-47Z + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.1.11 + - IBM Maximo Manage 9.1.12 + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + - Amlen v1.1 + known_issues: + - title: Customers using **Maximo Assist v8.7 or v8.8** should not update and must instead contact IBM Support for guidance regarding the removal of IBM Watson Discovery and upgrading to Maximo Assist v9.0 + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. + - title: A known issue has been identified in Db2u warehouse operator in this catalog. Customers restoring/upgrading Db2, intermittently get db2 `SQL0290N Table space access is not allowed. SQLSTATE=55039` error causing connectivity issue between MAS and Db2. If you are facing this problem, please refer to the workaround provided in this [documentation](https://www.ibm.com/docs/en/cloud-paks/cp-data/5.3.x?topic=SSQNUZ_5.3.x/svc-db2/known-issues-dbs.htm#known-issues-dbs__db2-instance-fails-sql0290n__title__1). + - title: A known issue exists in the February 26, 2026 release affecting IBM Maximo Real Estate and Facilities. Customers with MREF installed should avoid upgrading to the February 9.1.8 release. Installation of MREF 9.1.x should be deferred until the March 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260313-ppc64le.yaml b/src/mas/devops/data/catalogs/v9-260313-ppc64le.yaml new file mode 100644 index 00000000..6aca0f7c --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260313-ppc64le.yaml @@ -0,0 +1,61 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260313 (PPC) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:e15e7288ac2e79cc5fef02ae0fdb2a528b5447b03cf1d7bdc50b4893ba7474f5 + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.11 # Updated + 9.0.x: 9.0.22 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.12 # Updated + 9.0.x: 9.0.23 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.1.11 + - IBM Maximo Manage 9.1.12 + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260313-s390x.yaml b/src/mas/devops/data/catalogs/v9-260313-s390x.yaml new file mode 100644 index 00000000..20fd9c94 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260313-s390x.yaml @@ -0,0 +1,61 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260313 (Z) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:6fbd933bf20acb2b93e7dcbf97a470cd61ece22a0560ba197e014255223f8c7d + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.11 # Updated + 9.0.x: 9.0.22 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.12 # Updated + 9.0.x: 9.0.23 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.1.11 + - IBM Maximo Manage 9.1.12 + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260318-amd64.yaml b/src/mas/devops/data/catalogs/v9-260318-amd64.yaml new file mode 100644 index 00000000..4cc27a5a --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260318-amd64.yaml @@ -0,0 +1,162 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260313 (AMD64) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:f7caddb7ec725d48a6476d81b3652de9e0411bea0966bbc5b9ebf1e424e8330e + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +# Dependencies +# ----------------------------------------------------------------------------- +ibm_licensing_version: 4.2.17 # Operator version 4.2.14 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-licensing) +common_svcs_version: 4.13.0 # Operator version 4.13.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-common-services) +common_svcs_version_1: 4.11.0 # Additional version 4.11.0 + +cp4d_platform_version: 5.2.0+20250709.170324 # Operator version 5.2.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-datacore/) +ibm_zen_version: 6.2.0+20250530.152516.232 # For CPD5 ibm-zen has to be explicitily mirrored + +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) +db2_channel_default: v110509.0 # Default Channel version for db2u-operator +events_version: 5.0.1 # Operator version 5.0.1 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-events-operator) +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # No Update # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # No Update # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +dd_version: 1.1.22 # Operator version 1.1.22 (https://github.ibm.com/maximoappsuite/ibm-data-dictionary/releases) +appconnect_version: 6.2.0 # Operator version 6.2.0 # sticking to 6.2.0 version # Please do Not Change +wsl_version: 11.0.0+20250521.202913.73 # used for wsl and wsl_runtimes unless wsl_runtimes_version also specified +wsl_runtimes_version: 11.0.0+20250515.090949.21 # cpd 5.1.3 uses version 10.3.0 of wsl runtimes but only 10.2.0 for wsl itself +wml_version: 11.0.0+20250530.193146.282 # Operator version 5.2.0 +postgress_version: 5.16.0+20250827.110911.2626 # ibm-cpd-cloud-native-postgresql-operator 5.2.0 cp4d + +ccs_build: 11.0.0+20250605.130237.468 # cpd 5.2.0 using ccs build +# datarefinery_build: +20240517.202103.146 + +spark_version: 11.0.0+20250604.163055.2097 # Operator version 5.2.0 +cognos_version: 28.0.0+20250515.175459.10054 # Operator version 25.0.0 +couchdb_version: 1.0.13 # Operator version 2.2.1 (1.0.13) sticking with 1.0.13 # (This is required for Assist 9.0, https://github.com/IBM/cloud-pak/blob/master/repo/case/ibm-couchdb/index.yaml) +elasticsearch_version: 1.1.2667 # Operator version 1.1.2667 # used in cpd 5.1.3 only +opensearch_version: 1.1.2494 # Operator version 1.1.2494 + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.13 # Updated + 9.0.x: 9.0.22 # No Update + 8.10.x: 8.10.35 # No Update + 8.11.x: 8.11.32 # No Update +mas_assist_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.14 # No Update + 8.10.x: 8.7.8 # No Update + 8.11.x: 8.8.7 # No Update +mas_hputilities_version: + 9.1.x: "" # Not Supported + 9.0.x: "" # Not Supported + 8.10.x: 8.6.7 # No Update + 8.11.x: "" # Not Supported +mas_iot_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.17 # No Update + 8.10.x: 8.7.31 # No Update + 8.11.x: 8.8.28 # No Update +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # No Update + 8.10.x: 8.6.36 # No Update + 8.11.x: 8.7.30 # No Update +mas_monitor_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: 9.0.18 # No Update + 8.10.x: 8.10.28 # No Update + 8.11.x: 8.11.26 # No Update +mas_optimizer_version: + 9.2.x-feature: 9.2.0-pre.stable_12739 # No Update + 9.1.x: 9.1.9 # No Update + 9.0.x: 9.0.20 # No Update + 8.10.x: 8.4.26 # No Update + 8.11.x: 8.5.26 # No Update +mas_predict_version: + 9.1.x: 9.1.5 # No Update + 9.0.x: 9.0.12 # No Update + 8.10.x: 8.8.13 # No Update + 8.11.x: 8.9.15 # No Update +mas_visualinspection_version: + 9.2.x-feature: 9.2.0-pre.stable_12598 # No Update + 9.1.x: 9.1.10 # No Update + 9.0.x: 9.0.17 # No Update + 8.10.x: 8.8.4 # No Update + 8.11.x: 8.9.20 # No Update +mas_facilities_version: + 9.1.x: 9.1.8 # No Update + 9.0.x: "" # Not Supported + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + + +# Maximo AI Service +# ------------------------------------------------------------------------------ +aiservice_version: + 9.2.x-feature: 9.2.0-pre.stable_12908 # No Update + 9.1.x: 9.1.12 # No Update + + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.23 +mongo_extras_version_8: 8.0.17 + +# Extra Images for Db2u +# ------------------------------------------------------------------------------ +db2u_extras_version: 1.0.6 # No Update +db2u_filter: db2 + +# Extra Images for CCS used for PCD 5.2.0 Hotfix +# ------------------------------------------------------------------------------ +ccs_extras_version: 11.0.0 + +# Extra Images for IBM Watson Discovery +# ------------------------------------------------------------------------------ +#wd_extras_version: 1.0.4 + +# Extra Images for Amlen +# ------------------------------------------------------------------------------ +amlen_extras_version: 1.1.4 + +# Default Cloud Pak for Data version +# ------------------------------------------------------------------------------ +cpd_product_version_default: 5.2.0 + +manage_extras_913: 9.1.3 +minio_version: RELEASE.2025-06-13T11-33-47Z + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.1 + - IBM Maximo Manage v9.1 + known_issues: + - title: Customers using **Maximo Assist v8.7 or v8.8** should not update and must instead contact IBM Support for guidance regarding the removal of IBM Watson Discovery and upgrading to Maximo Assist v9.0 + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. + - title: A known issue has been identified in Db2u warehouse operator in this catalog. Customers restoring/upgrading Db2, intermittently get db2 `SQL0290N Table space access is not allowed. SQLSTATE=55039` error causing connectivity issue between MAS and Db2. If you are facing this problem, please refer to the workaround provided in this [documentation](https://www.ibm.com/docs/en/cloud-paks/cp-data/5.3.x?topic=SSQNUZ_5.3.x/svc-db2/known-issues-dbs.htm#known-issues-dbs__db2-instance-fails-sql0290n__title__1). + - title: A known issue exists in the February 26, 2026 release affecting IBM Maximo Real Estate and Facilities. Customers with MREF installed should avoid upgrading to the February 9.1.8 release. Installation of MREF 9.1.x should be deferred until the March 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260318-ppc64le.yaml b/src/mas/devops/data/catalogs/v9-260318-ppc64le.yaml new file mode 100644 index 00000000..be4c76b9 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260318-ppc64le.yaml @@ -0,0 +1,59 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260313 (PPC) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:6ebe5ae85ec02831413451ac7f1635ad538b7212642f12101f23444d58c553cf + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # No Update # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # No Update # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.13 # Updated + 9.0.x: 9.0.22 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.1 + - IBM Maximo Manage v9.1 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260318-s390x.yaml b/src/mas/devops/data/catalogs/v9-260318-s390x.yaml new file mode 100644 index 00000000..d450e833 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260318-s390x.yaml @@ -0,0 +1,59 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260313 (Z) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:aa22f763e4ba23e64d71bc58155187bba606138600f3d7f5b2264c8a6e92242c + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # No Update # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # No Update # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_12926 # No Update + 9.1.x: 9.1.13 # Updated + 9.0.x: 9.0.22 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_13730 # No Update + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # No Update + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform v9.1 + - IBM Maximo Manage v9.1 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260326-amd64.yaml b/src/mas/devops/data/catalogs/v9-260326-amd64.yaml new file mode 100644 index 00000000..7b912d18 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260326-amd64.yaml @@ -0,0 +1,176 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260326 (AMD64) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:c0cc7bfca67a47d12a3fce6bcf6ad25ee83d75234c17b0e32e736af5e30b971c + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +# Dependencies +# ----------------------------------------------------------------------------- +ibm_licensing_version: 4.2.17 # Operator version 4.2.14 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-licensing) +common_svcs_version: 4.13.0 # Operator version 4.13.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-common-services) +common_svcs_version_1: 4.11.0 # Additional version 4.11.0 + +cp4d_platform_version: 5.2.0+20250709.170324 # Operator version 5.2.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-datacore/) +ibm_zen_version: 6.2.0+20250530.152516.232 # For CPD5 ibm-zen has to be explicitily mirrored + +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) +db2_channel_default: v110509.0 # Default Channel version for db2u-operator +events_version: 5.0.1 # Operator version 5.0.1 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-events-operator) +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +dd_version: 1.1.23 # Updated # Operator version 1.1.23 (https://github.ibm.com/maximoappsuite/ibm-data-dictionary/releases) +appconnect_version: 6.2.0 # Operator version 6.2.0 # sticking to 6.2.0 version # Please do Not Change +wsl_version: 11.0.0+20250521.202913.73 # used for wsl and wsl_runtimes unless wsl_runtimes_version also specified +wsl_runtimes_version: 11.0.0+20250515.090949.21 # cpd 5.1.3 uses version 10.3.0 of wsl runtimes but only 10.2.0 for wsl itself +wml_version: 11.0.0+20250530.193146.282 # Operator version 5.2.0 +postgress_version: 5.16.0+20250827.110911.2626 # ibm-cpd-cloud-native-postgresql-operator 5.2.0 cp4d + +ccs_build: 11.0.0+20250605.130237.468 # cpd 5.2.0 using ccs build +# datarefinery_build: +20240517.202103.146 + +spark_version: 11.0.0+20250604.163055.2097 # Operator version 5.2.0 +cognos_version: 28.0.0+20250515.175459.10054 # Operator version 25.0.0 +couchdb_version: 1.0.13 # Operator version 2.2.1 (1.0.13) sticking with 1.0.13 # (This is required for Assist 9.0, https://github.com/IBM/cloud-pak/blob/master/repo/case/ibm-couchdb/index.yaml) +elasticsearch_version: 1.1.2667 # Operator version 1.1.2667 # used in cpd 5.1.3 only +opensearch_version: 1.1.2494 # Operator version 1.1.2494 + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_16717 # Updated + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # Updated + 8.10.x: 8.10.36 # Updated + 8.11.x: 8.11.33 # Updated +mas_assist_version: + 9.1.x: 9.1.9 # Updated + 9.0.x: 9.0.15 # Updated + 8.10.x: 8.7.8 # No Update + 8.11.x: 8.8.7 # No Update +mas_hputilities_version: + 9.1.x: "" # Not Supported + 9.0.x: "" # Not Supported + 8.10.x: 8.6.7 # No Update + 8.11.x: "" # Not Supported +mas_iot_version: + 9.1.x: 9.1.9 # Updated + 9.0.x: 9.0.18 # Updated + 8.10.x: 8.7.32 # Updated + 8.11.x: 8.8.29 # Updated +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_17064 # Updated + 9.1.x: 9.1.15 # Updated + 9.0.x: 9.0.24 # Updated + 8.10.x: 8.6.37 # Updated + 8.11.x: 8.7.31 # Updated +mas_monitor_version: + 9.1.x: 9.1.9 # Updated + 9.0.x: 9.0.19 # Updated + 8.10.x: 8.10.29 # Updated + 8.11.x: 8.11.27 # Updated +mas_optimizer_version: + 9.2.x-feature: 9.2.0-pre.stable_16613 # Updated + 9.1.x: 9.1.10 # Updated + 9.0.x: 9.0.21 # Updated + 8.10.x: 8.4.27 # Updated + 8.11.x: 8.5.27 # Updated +mas_predict_version: + 9.1.x: 9.1.6 # Updated + 9.0.x: 9.0.13 # Updated + 8.10.x: 8.8.14 # Updated + 8.11.x: 8.9.16 # Updated +mas_visualinspection_version: + 9.2.x-feature: 9.2.0-pre.stable_12598 # No Update + 9.1.x: 9.1.10 # No Update + 9.0.x: 9.0.18 # Updated + 8.10.x: 8.8.4 # No Update + 8.11.x: 8.9.20 # No Update +mas_facilities_version: + 9.2.x-feature: 9.2.0-pre.stable_16853 # Updated + 9.1.x: 9.1.9 # Updated + 9.0.x: "" # Not Supported + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + + +# Maximo AI Service +# ------------------------------------------------------------------------------ +aiservice_version: + 9.2.x-feature: 9.2.0-pre.stable_16576 # Updated + 9.1.x: 9.1.13 # Updated + + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.23 +mongo_extras_version_8: 8.0.17 + +# Extra Images for Db2u +# ------------------------------------------------------------------------------ +db2u_extras_version: 1.0.6 # No Update +db2u_filter: db2 + +# Extra Images for CCS used for PCD 5.2.0 Hotfix +# ------------------------------------------------------------------------------ +ccs_extras_version: 11.0.0 + +# Extra Images for IBM Watson Discovery +# ------------------------------------------------------------------------------ +#wd_extras_version: 1.0.4 + +# Extra Images for Amlen +# ------------------------------------------------------------------------------ +amlen_extras_version: 1.1.4 + +# Default Cloud Pak for Data version +# ------------------------------------------------------------------------------ +cpd_product_version_default: 5.2.0 + +manage_extras_913: 9.1.3 +minio_version: RELEASE.2025-06-13T11-33-47Z + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform [v8.10.36](https://www.ibm.com/support/pages/node/7267462), [v8.11.36](https://www.ibm.com/support/pages/node/7267463), [v9.0.23](https://www.ibm.com/support/pages/node/7267464) and [v9.1.14](https://www.ibm.com/support/pages/node/7267465) + - IBM Maximo Manage [v8.6.37](https://www.ibm.com/support/pages/node/7266693), [v8.7.31](https://www.ibm.com/support/pages/node/7266692), [v9.0.24](https://www.ibm.com/support/pages/node/7266691) and [v9.1.15](https://www.ibm.com/support/pages/node/7266690) + - IBM Maximo IoT [v8.7.32](https://www.ibm.com/support/pages/node/7267121), [v8.8.29](https://www.ibm.com/support/pages/node/7267122), [v9.0.18](https://www.ibm.com/support/pages/node/7267123) and [v9.1.9](https://www.ibm.com/support/pages/node/7267124) + - IBM Maximo Monitor [v8.10.29](https://www.ibm.com/support/pages/node/7267303), [v8.11.27](https://www.ibm.com/support/pages/node/7267304), [v9.0.19](https://www.ibm.com/support/pages/node/7267305) and [v9.1.9](https://www.ibm.com/support/pages/node/7267306) + - IBM Maximo Optimizer [v8.4.27](https://www.ibm.com/support/pages/node/7266732),[v8.5.26](https://www.ibm.com/support/pages/node/7266734), [v9.0.21](https://www.ibm.com/support/pages/node/7266738) and [v9.1.10](https://www.ibm.com/support/pages/node/7266745) + - IBM Maximo Assist/Collaborate [v9.0.15](https://www.ibm.com/support/pages/node/7267220), [v9.1.9](https://www.ibm.com/support/pages/node/7267222) + - IBM Maximo Predict [v8.8.14](https://www.ibm.com/support/pages/node/7267302), [v8.9.16](https://www.ibm.com/support/pages/node/7267299), [v9.0.13](https://www.ibm.com/support/pages/node/7267298) and [v9.1.6](https://www.ibm.com/support/pages/node/7267181) + - IBM Maximo Visual Inspection [v9.0.18](https://www.ibm.com/support/pages/node/7267485) + - IBM Maximo Real Estate and Facilities [v9.1.9](https://www.ibm.com/support/pages/node/7267132) + - IBM Maximo AI Service [v9.1.13](https://www.ibm.com/support/pages/node/7266231) + - IBM Data Dictionary v1.1 + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + + known_issues: + - title: Customers using **Maximo Assist v8.7 or v8.8** should not update and must instead contact IBM Support for guidance regarding the removal of IBM Watson Discovery and upgrading to Maximo Assist v9.0 + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. + - title: A known issue has been identified in Db2u warehouse operator in this catalog. Customers restoring/upgrading Db2, intermittently get db2 `SQL0290N Table space access is not allowed. SQLSTATE=55039` error causing connectivity issue between MAS and Db2. If you are facing this problem, please refer to the workaround provided in this [documentation](https://www.ibm.com/docs/en/cloud-paks/cp-data/5.3.x?topic=SSQNUZ_5.3.x/svc-db2/known-issues-dbs.htm#known-issues-dbs__db2-instance-fails-sql0290n__title__1). + - title: A known issue exists in the February 26, 2026 release affecting IBM Maximo Real Estate and Facilities. Customers with MREF installed should avoid upgrading to the February 9.1.8 release. Installation of MREF 9.1.x should be deferred until the March 2026 patch. + - title: A know issue in IBM Maximo Real Estate and Facilities, users may encounter failures during upgrade scenarios (9.1.x → 9.2 FC) and experience slow installation in airgap environments in the March 26, 2026 release. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260326-ppc64le.yaml b/src/mas/devops/data/catalogs/v9-260326-ppc64le.yaml new file mode 100644 index 00000000..14e7b2c1 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260326-ppc64le.yaml @@ -0,0 +1,61 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260326 (PPC) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:1664484a0e10f936cb899841f8b8cd37ae6dcdcc53efc3ef32698d584013cdc7 + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_16717 # Updated + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_17064 # Updated + 9.1.x: 9.1.15 # Updated + 9.0.x: 9.0.24 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform [v9.0.23](https://www.ibm.com/support/pages/node/7267464) and [v9.1.14](https://www.ibm.com/support/pages/node/7267465) + - IBM Maximo Manage [v9.0.24](https://www.ibm.com/support/pages/node/7266691) and [v9.1.15](https://www.ibm.com/support/pages/node/7266690) + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260326-s390x.yaml b/src/mas/devops/data/catalogs/v9-260326-s390x.yaml new file mode 100644 index 00000000..6385fb58 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260326-s390x.yaml @@ -0,0 +1,61 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260326 (Z) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:e8f185bdec56281ea3f431b43edbc36f2512615a45561bd44df274a2b4cca6df + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_16717 # Updated + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_17064 # Updated + 9.1.x: 9.1.15 # Updated + 9.0.x: 9.0.24 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform [v9.0.23](https://www.ibm.com/support/pages/node/7267464) and [v9.1.14](https://www.ibm.com/support/pages/node/7267465) + - IBM Maximo Manage [v9.0.24](https://www.ibm.com/support/pages/node/7266691) and [v9.1.15](https://www.ibm.com/support/pages/node/7266690) + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260430-amd64.yaml b/src/mas/devops/data/catalogs/v9-260430-amd64.yaml new file mode 100644 index 00000000..80bbb4a8 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260430-amd64.yaml @@ -0,0 +1,176 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260326 (AMD64) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:c852ac109d2a17fab9c58b38389bdf87ed60b9d6273811eb380b8ba2a0c0184f + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +# Dependencies +# ----------------------------------------------------------------------------- +ibm_licensing_version: 4.2.17 # Operator version 4.2.14 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-licensing) +common_svcs_version: 4.13.0 # Operator version 4.13.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-common-services) +common_svcs_version_1: 4.11.0 # Additional version 4.11.0 + +cp4d_platform_version: 5.2.0+20250709.170324 # Operator version 5.2.0 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-cp-datacore/) +ibm_zen_version: 6.2.0+20250530.152516.232 # For CPD5 ibm-zen has to be explicitily mirrored + +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) +db2_channel_default: v110509.0 # Default Channel version for db2u-operator +events_version: 5.0.1 # Operator version 5.0.1 (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-events-operator) +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +dd_version: 1.1.23 # Updated # Operator version 1.1.23 (https://github.ibm.com/maximoappsuite/ibm-data-dictionary/releases) +appconnect_version: 6.2.0 # Operator version 6.2.0 # sticking to 6.2.0 version # Please do Not Change +wsl_version: 11.0.0+20250521.202913.73 # used for wsl and wsl_runtimes unless wsl_runtimes_version also specified +wsl_runtimes_version: 11.0.0+20250515.090949.21 # cpd 5.1.3 uses version 10.3.0 of wsl runtimes but only 10.2.0 for wsl itself +wml_version: 11.0.0+20250530.193146.282 # Operator version 5.2.0 +postgress_version: 5.16.0+20250827.110911.2626 # ibm-cpd-cloud-native-postgresql-operator 5.2.0 cp4d + +ccs_build: 11.0.0+20250605.130237.468 # cpd 5.2.0 using ccs build +# datarefinery_build: +20240517.202103.146 + +spark_version: 11.0.0+20250604.163055.2097 # Operator version 5.2.0 +cognos_version: 28.0.0+20250515.175459.10054 # Operator version 25.0.0 +couchdb_version: 1.0.13 # Operator version 2.2.1 (1.0.13) sticking with 1.0.13 # (This is required for Assist 9.0, https://github.com/IBM/cloud-pak/blob/master/repo/case/ibm-couchdb/index.yaml) +elasticsearch_version: 1.1.2667 # Operator version 1.1.2667 # used in cpd 5.1.3 only +opensearch_version: 1.1.2494 # Operator version 1.1.2494 + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_16717 # Updated + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # Updated + 8.10.x: 8.10.36 # Updated + 8.11.x: 8.11.33 # Updated +mas_assist_version: + 9.1.x: 9.1.9 # Updated + 9.0.x: 9.0.15 # Updated + 8.10.x: 8.7.8 # No Update + 8.11.x: 8.8.7 # No Update +mas_hputilities_version: + 9.1.x: "" # Not Supported + 9.0.x: "" # Not Supported + 8.10.x: 8.6.7 # No Update + 8.11.x: "" # Not Supported +mas_iot_version: + 9.1.x: 9.1.9 # Updated + 9.0.x: 9.0.18 # Updated + 8.10.x: 8.7.32 # Updated + 8.11.x: 8.8.29 # Updated +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_17064 # Updated + 9.1.x: 9.1.15 # Updated + 9.0.x: 9.0.24 # Updated + 8.10.x: 8.6.37 # Updated + 8.11.x: 8.7.31 # Updated +mas_monitor_version: + 9.1.x: 9.1.9 # Updated + 9.0.x: 9.0.19 # Updated + 8.10.x: 8.10.29 # Updated + 8.11.x: 8.11.27 # Updated +mas_optimizer_version: + 9.2.x-feature: 9.2.0-pre.stable_16613 # Updated + 9.1.x: 9.1.10 # Updated + 9.0.x: 9.0.21 # Updated + 8.10.x: 8.4.27 # Updated + 8.11.x: 8.5.27 # Updated +mas_predict_version: + 9.1.x: 9.1.6 # Updated + 9.0.x: 9.0.13 # Updated + 8.10.x: 8.8.14 # Updated + 8.11.x: 8.9.16 # Updated +mas_visualinspection_version: + 9.2.x-feature: 9.2.0-pre.stable_12598 # No Update + 9.1.x: 9.1.10 # No Update + 9.0.x: 9.0.18 # Updated + 8.10.x: 8.8.4 # No Update + 8.11.x: 8.9.20 # No Update +mas_facilities_version: + 9.2.x-feature: 9.2.0-pre.stable_16853 # Updated + 9.1.x: 9.1.9 # Updated + 9.0.x: "" # Not Supported + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + + +# Maximo AI Service +# ------------------------------------------------------------------------------ +aiservice_version: + 9.2.x-feature: 9.2.0-pre.stable_16576 # Updated + 9.1.x: 9.1.13 # Updated + + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.23 +mongo_extras_version_8: 8.0.17 + +# Extra Images for Db2u +# ------------------------------------------------------------------------------ +db2u_extras_version: 1.0.6 # No Update +db2u_filter: db2 + +# Extra Images for CCS used for PCD 5.2.0 Hotfix +# ------------------------------------------------------------------------------ +ccs_extras_version: 11.0.0 + +# Extra Images for IBM Watson Discovery +# ------------------------------------------------------------------------------ +#wd_extras_version: 1.0.4 + +# Extra Images for Amlen +# ------------------------------------------------------------------------------ +amlen_extras_version: 1.1.4 + +# Default Cloud Pak for Data version +# ------------------------------------------------------------------------------ +cpd_product_version_default: 5.2.0 + +manage_extras_913: 9.1.3 +minio_version: RELEASE.2025-06-13T11-33-47Z + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform [v8.10.36](https://www.ibm.com/support/pages/node/7267462), [v8.11.36](https://www.ibm.com/support/pages/node/7267463), [v9.0.23](https://www.ibm.com/support/pages/node/7267464) and [v9.1.14](https://www.ibm.com/support/pages/node/7267465) + - IBM Maximo Manage [v8.6.37](https://www.ibm.com/support/pages/node/7266693), [v8.7.31](https://www.ibm.com/support/pages/node/7266692), [v9.0.24](https://www.ibm.com/support/pages/node/7266691) and [v9.1.15](https://www.ibm.com/support/pages/node/7266690) + - IBM Maximo IoT [v8.7.32](https://www.ibm.com/support/pages/node/7267121), [v8.8.29](https://www.ibm.com/support/pages/node/7267122), [v9.0.18](https://www.ibm.com/support/pages/node/7267123) and [v9.1.9](https://www.ibm.com/support/pages/node/7267124) + - IBM Maximo Monitor [v8.10.29](https://www.ibm.com/support/pages/node/7267303), [v8.11.27](https://www.ibm.com/support/pages/node/7267304), [v9.0.19](https://www.ibm.com/support/pages/node/7267305) and [v9.1.9](https://www.ibm.com/support/pages/node/7267306) + - IBM Maximo Optimizer [v8.4.27](https://www.ibm.com/support/pages/node/7266732),[v8.5.26](https://www.ibm.com/support/pages/node/7266734), [v9.0.21](https://www.ibm.com/support/pages/node/7266738) and [v9.1.10](https://www.ibm.com/support/pages/node/7266745) + - IBM Maximo Assist/Collaborate [v9.0.15](https://www.ibm.com/support/pages/node/7267220), [v9.1.9](https://www.ibm.com/support/pages/node/7267222) + - IBM Maximo Predict [v8.8.14](https://www.ibm.com/support/pages/node/7267302), [v8.9.16](https://www.ibm.com/support/pages/node/7267299), [v9.0.13](https://www.ibm.com/support/pages/node/7267298) and [v9.1.6](https://www.ibm.com/support/pages/node/7267181) + - IBM Maximo Visual Inspection [v9.0.18](https://www.ibm.com/support/pages/node/7267485) + - IBM Maximo Real Estate and Facilities [v9.1.9](https://www.ibm.com/support/pages/node/7267132) + - IBM Maximo AI Service [v9.1.13](https://www.ibm.com/support/pages/node/7266231) + - IBM Data Dictionary v1.1 + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + + known_issues: + - title: Customers using **Maximo Assist v8.7 or v8.8** should not update and must instead contact IBM Support for guidance regarding the removal of IBM Watson Discovery and upgrading to Maximo Assist v9.0 + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. + - title: A known issue has been identified in Db2u warehouse operator in this catalog. Customers restoring/upgrading Db2, intermittently get db2 `SQL0290N Table space access is not allowed. SQLSTATE=55039` error causing connectivity issue between MAS and Db2. If you are facing this problem, please refer to the workaround provided in this [documentation](https://www.ibm.com/docs/en/cloud-paks/cp-data/5.3.x?topic=SSQNUZ_5.3.x/svc-db2/known-issues-dbs.htm#known-issues-dbs__db2-instance-fails-sql0290n__title__1). + - title: A known issue exists in the February 26, 2026 release affecting IBM Maximo Real Estate and Facilities. Customers with MREF installed should avoid upgrading to the February 9.1.8 release. Installation of MREF 9.1.x should be deferred until the March 2026 patch. + - title: A know issue in IBM Maximo Real Estate and Facilities, users may encounter failures during upgrade scenarios (9.1.x → 9.2 FC) and experience slow installation in airgap environments in the March 26, 2026 release. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260430-ppc64le.yaml b/src/mas/devops/data/catalogs/v9-260430-ppc64le.yaml new file mode 100644 index 00000000..ec34b7af --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260430-ppc64le.yaml @@ -0,0 +1,61 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260430 (PPC) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:989162d1f7b519163a7dad9747d78bec0ad05681db396b95f5c9c8fa97ea1d16 + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_16717 # Updated + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_17064 # Updated + 9.1.x: 9.1.15 # Updated + 9.0.x: 9.0.24 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform [v9.0.23](https://www.ibm.com/support/pages/node/7267464) and [v9.1.14](https://www.ibm.com/support/pages/node/7267465) + - IBM Maximo Manage [v9.0.24](https://www.ibm.com/support/pages/node/7266691) and [v9.1.15](https://www.ibm.com/support/pages/node/7266690) + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/catalogs/v9-260430-s390x.yaml b/src/mas/devops/data/catalogs/v9-260430-s390x.yaml new file mode 100644 index 00000000..736acd47 --- /dev/null +++ b/src/mas/devops/data/catalogs/v9-260430-s390x.yaml @@ -0,0 +1,61 @@ +--- +# Case bundle configuration for IBM Maximo Operator Catalog 260430 (Z) +# ----------------------------------------------------------------------------- +# In the future this won't be necessary as we'll be able to mirror from the +# catalog itself, but not everything in the catalog supports this yet (including MAS) +# so we need to use the CASE bundle mirror process still. + +catalog_digest: sha256:bed372211414253afe67ed78a90c3891e002ab8ebddb007050438c94305e940c + +ocp_compatibility: +- "4.16" +- "4.17" +- "4.18" +- "4.19" +- "4.20" + +uds_version: 2.0.12 # Operator version 2.0.12 # sticking to 2.0.12 version # Please do Not Change +sls_version: 3.12.6 # Updated # Operator version 3.12.5 (https://github.ibm.com/maximoappsuite/ibm-sls/releases) +tsm_version: 1.7.5 # Updated # Operator version 1.7.4 (https://github.ibm.com/maximoappsuite/ibm-truststore-mgr/releases) +db2u_version: 7.3.1+20250821.161005.16793 # Operator version 110509.0.6 to find the version 7.3.1+20250821.161005.16793, search db2u-operator digest on repo (https://github.com/IBM/cloud-pak/tree/master/repo/case/ibm-db2uoperator) + +# Maximo Application Suite +# ----------------------------------------------------------------------------- +mas_core_version: + 9.2.x-feature: 9.2.0-pre.stable_16717 # Updated + 9.1.x: 9.1.14 # Updated + 9.0.x: 9.0.23 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported +mas_manage_version: + 9.2.x-feature: 9.2.0-pre.stable_17064 # Updated + 9.1.x: 9.1.15 # Updated + 9.0.x: 9.0.24 # Updated + 8.10.x: "" # Not Supported + 8.11.x: "" # Not Supported + +# Extra Images for UDS +# ------------------------------------------------------------------------------ +uds_extras_version: 1.5.0 + +# Extra Images for Mongo +# ------------------------------------------------------------------------------ +mongo_extras_version_default: 8.0.17 + +# Variables used to mirror additional mongo image versions +mongo_extras_version_4: 4.4.21 +mongo_extras_version_5: 5.0.23 +mongo_extras_version_6: 6.0.12 +mongo_extras_version_7: 7.0.12 +mongo_extras_version_8: 8.0.17 + +editorial: + whats_new: + - title: '**Security updates and bug fixes**' + details: + - IBM Maximo Application Suite Core Platform [v9.0.23](https://www.ibm.com/support/pages/node/7267464) and [v9.1.14](https://www.ibm.com/support/pages/node/7267465) + - IBM Maximo Manage [v9.0.24](https://www.ibm.com/support/pages/node/7266691) and [v9.1.15](https://www.ibm.com/support/pages/node/7266690) + - IBM Truststore Manager v1.7 + - IBM Suite License Service v3.12 + known_issues: + - title: A known issue exists in the January 29, 2026 release affecting HSE and Oil & Gas (9.0.23 / 9.1.64). Customers with HSE installed should avoid upgrading to the January release. Installation of HSE or Oil & Gas on Manage 9.0.x / 9.1.x should be deferred until the February 2026 patch. \ No newline at end of file diff --git a/src/mas/devops/data/ocp.yaml b/src/mas/devops/data/ocp.yaml index 173ba916..275eacad 100644 --- a/src/mas/devops/data/ocp.yaml +++ b/src/mas/devops/data/ocp.yaml @@ -59,5 +59,3 @@ ocp_versions: # - Extended Support (EUS): Additional 6 months available for purchase # - EUS is included with Premium subscriptions # - Not all versions have EUS available - -# Made with Bob diff --git a/src/mas/devops/mas/__init__.py b/src/mas/devops/mas/__init__.py index dfbecf04..333f109f 100644 --- a/src/mas/devops/mas/__init__.py +++ b/src/mas/devops/mas/__init__.py @@ -13,4 +13,5 @@ verifyMasInstance, getMasChannel, updateIBMEntitlementKey, + getMasPublicClusterIssuer, ) diff --git a/src/mas/devops/mas/suite.py b/src/mas/devops/mas/suite.py index 744b4e0d..01020be8 100644 --- a/src/mas/devops/mas/suite.py +++ b/src/mas/devops/mas/suite.py @@ -313,3 +313,47 @@ def updateIBMEntitlementKey(dynClient: DynamicClient, namespace: str, icrUsernam secret = secretsAPI.apply(body=secret, namespace=namespace) return secret + + +def getMasPublicClusterIssuer(dynClient: DynamicClient, instanceId: str) -> str | None: + """ + Retrieve the Public Cluster Issuer for a MAS instance. + + This function queries the Suite custom resource and attempts to retrieve the + certificate issuer name from spec.certificateIssuer.name. If the keys don't exist, + it returns the default issuer name. + + Args: + dynClient (DynamicClient): OpenShift dynamic client for cluster API interactions. + instanceId (str): The MAS instance identifier to use. + + Returns: + str: The name of the cluster issuer used for the passed in MAS Instance. + Returns the default "mas-{instanceId}-core-public-issuer" if the suite + doesn't specify a custom issuer, or None if the suite is not found. + """ + try: + suitesAPI = dynClient.resources.get(api_version="core.mas.ibm.com/v1", kind="Suite") + suite = suitesAPI.get(name=instanceId, namespace=f"mas-{instanceId}-core") + + # Check if spec.certificateIssuer.name exists + if hasattr(suite, 'spec') and hasattr(suite.spec, 'certificateIssuer') and hasattr(suite.spec.certificateIssuer, 'name'): + issuerName = suite.spec.certificateIssuer.name + logger.debug(f"Found custom certificate issuer: {issuerName}") + return issuerName + + # Keys don't exist, return default + defaultIssuer = f"mas-{instanceId}-core-public-issuer" + logger.debug(f"No custom certificate issuer found, using default: {defaultIssuer}") + return defaultIssuer + + except NotFoundError: + logger.warning(f"Suite instance '{instanceId}' not found") + return None + except ResourceNotFoundError: + # The MAS Suite CRD has not even been installed in the cluster + logger.warning("MAS Suite CRD not found in the cluster") + return None + except UnauthorizedError as e: + logger.error(f"Error: Unable to retrieve MAS instance due to failed authorization: {e}") + return None diff --git a/src/mas/devops/ocp.py b/src/mas/devops/ocp.py index 9e14261b..efc66fa6 100644 --- a/src/mas/devops/ocp.py +++ b/src/mas/devops/ocp.py @@ -437,6 +437,64 @@ def crdExists(dynClient: DynamicClient, crdName: str) -> bool: return False +def getCR(dynClient: DynamicClient, cr_api_version: str, cr_kind: str, cr_name: str, namespace: str = None) -> dict: + """ + Get a Custom Resource + """ + + try: + crAPI = dynClient.resources.get(api_version=cr_api_version, kind=cr_kind) + if namespace: + cr = crAPI.get(name=cr_name, namespace=namespace) + else: + cr = crAPI.get(name=cr_name) + return cr + except NotFoundError: + logger.debug(f"CR {cr_name} of kind {cr_kind} does not exist in namespace {namespace}") + except Exception as e: + logger.debug(f"Error retrieving CR {cr_name} of kind {cr_kind} in namespace {namespace}: {e}") + + return {} + + +def getSecret(dynClient: DynamicClient, namespace: str, secret_name: str) -> dict: + """ + Get a Secret + """ + try: + secretAPI = dynClient.resources.get(api_version="v1", kind="Secret") + secret = secretAPI.get(name=secret_name, namespace=namespace) + logger.debug(f"Secret {secret_name} exists in namespace {namespace}") + return secret.to_dict() + except NotFoundError: + logger.debug(f"Secret {secret_name} does not exist in namespace {namespace}") + return {} + + +def apply_resource(dynClient: DynamicClient, resource_yaml: str, namespace: str): + """ + Apply a Kubernetes resource from its YAML definition. + If the resource already exists, it will be updated. + If it does not exist, it will be created. + """ + resource_dict = yaml.safe_load(resource_yaml) + kind = resource_dict['kind'] + api_version = resource_dict['apiVersion'] + metadata = resource_dict['metadata'] + name = metadata['name'] + + try: + resource = dynClient.resources.get(api_version=api_version, kind=kind) + # Try to get the existing resource + resource.get(name=name, namespace=namespace) + # If found, skip creation + logger.debug(f"{kind} '{name}' already exists in namespace '{namespace}', skipping creation.") + except NotFoundError: + # If not found, create it + logger.debug(f"Creating new {kind} '{name}' in namespace '{namespace}'") + resource.create(body=resource_dict, namespace=namespace) + + def listInstances(dynClient: DynamicClient, apiVersion: str, kind: str) -> list: """ Get a list of instances of a particular custom resource on the cluster. diff --git a/src/mas/devops/olm.py b/src/mas/devops/olm.py index d9351031..63f5f0d4 100644 --- a/src/mas/devops/olm.py +++ b/src/mas/devops/olm.py @@ -11,6 +11,7 @@ import logging from time import sleep from os import path +from typing import Optional from kubernetes.dynamic.exceptions import NotFoundError from openshift.dynamic import DynamicClient @@ -117,13 +118,17 @@ def getSubscription(dynClient: DynamicClient, namespace: str, packageName: str): return subscriptions.items[0] -def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str, packageChannel: str = None, catalogSource: str = None, catalogSourceNamespace: str = "openshift-marketplace", config: dict = None, installMode: str = "OwnNamespace"): +def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str, packageChannel: Optional[str] = None, catalogSource: Optional[str] = None, catalogSourceNamespace: str = "openshift-marketplace", config: Optional[dict] = None, installMode: str = "OwnNamespace", installPlanApproval: Optional[str] = None, startingCSV: Optional[str] = None): """ Create or update an operator subscription in a namespace. Automatically detects default channel and catalog source from PackageManifest if not provided. Ensures an OperatorGroup exists before creating the subscription. + When installPlanApproval is set to "Manual" and a startingCSV is specified, this function will + automatically approve the InstallPlan for the first-time installation to move to that startingCSV. + Subsequent upgrades will still require manual approval. + Parameters: dynClient (DynamicClient): OpenShift Dynamic Client namespace (str): The namespace to create the subscription in @@ -133,14 +138,20 @@ def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str catalogSourceNamespace (str, optional): Namespace of the catalog source. Defaults to "openshift-marketplace". config (dict, optional): Additional subscription configuration. Defaults to None. installMode (str, optional): Install mode for the OperatorGroup. Defaults to "OwnNamespace". + installPlanApproval (str, optional): Install plan approval mode ("Automatic" or "Manual"). Defaults to None. + startingCSV (str, optional): The specific CSV version to install. When combined with Manual approval, + the first InstallPlan to this CSV will be automatically approved. Required when installPlanApproval is "Manual". Defaults to None. Returns: Subscription: The created or updated subscription resource Raises: - OLMException: If the package is not available in any catalog + OLMException: If the package is not available in any catalog, or if installPlanApproval is "Manual" without a startingCSV NotFoundError: If resources cannot be created """ + # Validate that startingCSV is provided when installPlanApproval is Manual + if installPlanApproval == "Manual" and startingCSV is None: + raise OLMException("When installPlanApproval is 'Manual', a startingCSV must be provided") if catalogSourceNamespace is None: catalogSourceNamespace = "openshift-marketplace" @@ -190,7 +201,9 @@ def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str package_name=packageName, package_channel=packageChannel, catalog_name=catalogSource, - catalog_namespace=catalogSourceNamespace + catalog_namespace=catalogSourceNamespace, + install_plan_approval=installPlanApproval, + starting_csv=startingCSV ) subscription = yaml.safe_load(renderedTemplate) subscriptionsAPI.apply(body=subscription, namespace=namespace) @@ -199,6 +212,7 @@ def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str logger.debug(f"Waiting for {packageName}.{namespace} InstallPlans") installPlanAPI = dynClient.resources.get(api_version="operators.coreos.com/v1alpha1", kind="InstallPlan") + # Use label selector to get InstallPlans (standard approach) installPlanResources = installPlanAPI.get(label_selector=labelSelector, namespace=namespace) while len(installPlanResources.items) == 0: installPlanResources = installPlanAPI.get(label_selector=labelSelector, namespace=namespace) @@ -207,17 +221,93 @@ def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str if len(installPlanResources.items) == 0: raise OLMException(f"Found 0 InstallPlans for {packageName}") elif len(installPlanResources.items) > 1: - logger.warning(f"More than 1 InstallPlan found for {packageName}") + logger.warning(f"More than 1 InstallPlan found for {packageName} using label selector") + + # Select the InstallPlan to use + installPlanResource = None + + # Special handling for Manual approval with startingCSV + if installPlanApproval == "Manual" and startingCSV is not None: + logger.debug(f"Manual approval with startingCSV {startingCSV} - checking if label selector returned correct InstallPlan") + + # Check if any of the InstallPlans from label selector match the startingCSV + for plan in installPlanResources.items: + csvNames = getattr(plan.spec, "clusterServiceVersionNames", []) + logger.debug(f"InstallPlan {plan.metadata.name} (from label selector) contains CSVs: {csvNames}") + if csvNames and startingCSV in csvNames: + installPlanResource = plan + logger.info(f"Found InstallPlan {plan.metadata.name} matching startingCSV {startingCSV} via label selector") + break + + # If no match found via label selector, search all InstallPlans owned by this subscription + if installPlanResource is None: + logger.warning(f"Label selector did not return InstallPlan matching startingCSV {startingCSV}") + logger.debug(f"Searching all InstallPlans in {namespace} owned by subscription {name}") + + allInstallPlans = installPlanAPI.get(namespace=namespace) + for plan in allInstallPlans.items: + # Check if this InstallPlan is owned by our subscription + owner_refs = getattr(plan.metadata, 'ownerReferences', []) + is_owned_by_subscription = any( + ref.kind == "Subscription" and ref.name == name + for ref in owner_refs + ) + + if is_owned_by_subscription: + csvNames = getattr(plan.spec, "clusterServiceVersionNames", []) + logger.debug(f"InstallPlan {plan.metadata.name} (owned by subscription) contains CSVs: {csvNames}") + if csvNames and startingCSV in csvNames: + installPlanResource = plan + logger.info(f"Found InstallPlan {plan.metadata.name} matching startingCSV {startingCSV} via subscription ownership") + break + + if installPlanResource is None: + logger.warning(f"No InstallPlan found matching startingCSV {startingCSV}, using first from label selector") + installPlanResource = installPlanResources.items[0] else: - installPlanName = installPlanResources.items[0].metadata.name - - # Wait for InstallPlan to complete - logger.debug(f"Waiting for InstallPlan {installPlanName}") - installPlanPhase = installPlanResources.items[0].status.phase - while installPlanPhase != "Complete": - installPlanResource = installPlanAPI.get(name=installPlanName, namespace=namespace) - installPlanPhase = installPlanResource.status.phase - sleep(30) + # Standard case: use first InstallPlan from label selector + installPlanResource = installPlanResources.items[0] + + installPlanName = installPlanResource.metadata.name + installPlanPhase = installPlanResource.status.phase + + # If the InstallPlan for our startingCSV is already Complete, we're done + if installPlanPhase == "Complete": + logger.info(f"InstallPlan {installPlanName} for {startingCSV} is already Complete") + else: + # Wait for InstallPlan to complete + logger.debug(f"Waiting for InstallPlan {installPlanName}") + + # Track if we've already approved this install plan + approved_manual_install = False + + while installPlanPhase != "Complete": + installPlanResource = installPlanAPI.get(name=installPlanName, namespace=namespace) + installPlanPhase = installPlanResource.status.phase + + # If InstallPlan requires approval and this is the first installation to startingCSV + if installPlanPhase == "RequiresApproval" and not approved_manual_install: + # Check if this is the first installation by verifying the CSV matches startingCSV + if startingCSV is not None: + csvName = getattr(installPlanResource.spec, "clusterServiceVersionNames", []) + if csvName and startingCSV in csvName: + logger.info(f"Approving InstallPlan {installPlanName} for first-time installation to {startingCSV}") + # Patch the InstallPlan to approve it + installPlanResource.spec.approved = True + installPlanAPI.patch( + body=installPlanResource, + name=installPlanName, + namespace=namespace, + content_type="application/merge-patch+json" + ) + approved_manual_install = True + logger.info(f"InstallPlan {installPlanName} approved successfully") + else: + logger.debug(f"InstallPlan CSV {csvName} does not match startingCSV {startingCSV}, waiting for manual approval") + else: + logger.debug(f"No startingCSV specified, InstallPlan {installPlanName} requires manual approval") + + sleep(30) # Wait for Subscription to complete logger.debug(f"Waiting for Subscription {name} in {namespace}") @@ -225,9 +315,20 @@ def applySubscription(dynClient: DynamicClient, namespace: str, packageName: str subscriptionResource = subscriptionsAPI.get(name=name, namespace=namespace) state = getattr(subscriptionResource.status, "state", None) + # When manual approval is used with startingCSV, the state will be "UpgradePending" + # after the initial installation completes (indicating newer versions are available + # but require manual approval). For automatic approval, the state will be "AtLatestKnown". if state == "AtLatestKnown": logger.debug(f"Subscription {name} in {namespace} reached state: {state}") return subscriptionResource + elif state == "UpgradePending" and installPlanApproval == "Manual" and startingCSV is not None: + # Verify the installed CSV matches the startingCSV + installedCSV = getattr(subscriptionResource.status, "installedCSV", None) + if installedCSV == startingCSV: + logger.debug(f"Subscription {name} in {namespace} reached state: {state} with installedCSV: {installedCSV}") + return subscriptionResource + else: + logger.debug(f"Subscription {name} in {namespace} state is {state} but installedCSV ({installedCSV}) does not match startingCSV ({startingCSV}), retrying...") logger.debug(f"Subscription {name} in {namespace} not ready yet (state = {state}), retrying...") sleep(30) diff --git a/src/mas/devops/restore.py b/src/mas/devops/restore.py new file mode 100644 index 00000000..d427e628 --- /dev/null +++ b/src/mas/devops/restore.py @@ -0,0 +1,122 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** +import logging +import yaml +from openshift.dynamic import DynamicClient +from openshift.dynamic.exceptions import NotFoundError + +logger = logging.getLogger(name=__name__) + + +def loadYamlFile(file_path: str): + """ + Load YAML content from a file + + Args: + file_path: Path to the YAML file + + Returns: + dict: Parsed YAML content or None if error + """ + try: + with open(file_path, 'r') as yaml_file: + content = yaml.safe_load(yaml_file) + return content + except Exception as e: + logger.error(f"Error reading YAML file {file_path}: {e}") + return None + + +def restoreResource(dynClient: DynamicClient, resource_data: dict, namespace=None, replace_resource=True) -> tuple: + """ + Restore a single Kubernetes resource from its YAML representation. + If the resource exists and replace_resource is True, it will be updated (replaced). + If the resource exists and replace_resource is False, it will be skipped. + If the resource doesn't exist, it will be created. + + Args: + dynClient: Kubernetes dynamic client + resource_data: Dictionary containing the resource definition + namespace: Optional namespace override (uses resource's namespace if not provided) + replace_resource: If True, replace existing resources; if False, skip them (default: True) + + Returns: + tuple: (success: bool, resource_name: str, status_message: str or None) + - success: True if created, updated, or skipped; False if failed + - resource_name: Name of the resource + - status_message: None if created, "updated" if replaced, "skipped" if exists and not replaced, error message if failed + """ + try: + # Extract resource metadata + kind = resource_data.get('kind') + api_version = resource_data.get('apiVersion') + metadata = resource_data.get('metadata', {}) + resource_name = metadata.get('name') + resource_namespace = namespace or metadata.get('namespace') + + if not kind or not api_version or not resource_name: + error_msg = "Resource missing required fields (kind, apiVersion, or name)" + logger.error(error_msg) + return (False, resource_name or 'unknown', error_msg) + + # Get the resource API + resourceAPI = dynClient.resources.get(api_version=api_version, kind=kind) + + # Determine scope description for logging + scope_desc = f"namespace '{resource_namespace}'" if resource_namespace else "cluster-level" + + # Check if resource already exists + resource_exists = False + existing_resource = None + try: + if resource_namespace: + existing_resource = resourceAPI.get(name=resource_name, namespace=resource_namespace) + else: + existing_resource = resourceAPI.get(name=resource_name) + resource_exists = existing_resource is not None + except NotFoundError: + resource_exists = False + + # Apply the resource (create, update, or skip) + try: + if resource_exists: + if replace_resource: + # Resource exists - update it using strategic merge patch + logger.info(f"Patching existing {kind} '{resource_name}' in {scope_desc}") + + if resource_namespace: + resourceAPI.patch(body=resource_data, name=resource_name, namespace=resource_namespace, content_type='application/merge-patch+json') + else: + resourceAPI.patch(body=resource_data, name=resource_name, content_type='application/merge-patch+json') + logger.info(f"Successfully patched {kind} '{resource_name}' in {scope_desc}") + return (True, resource_name, "updated") + else: + # Resource exists but replace_resource is False - skip it + logger.info(f"{kind} '{resource_name}' already exists in {scope_desc}, skipping (replace_resource=False)") + return (True, resource_name, "skipped") + else: + # Resource doesn't exist - create it + logger.info(f"Creating {kind} '{resource_name}' in {scope_desc}") + if resource_namespace: + resourceAPI.create(body=resource_data, namespace=resource_namespace) + else: + resourceAPI.create(body=resource_data) + logger.info(f"Successfully created {kind} '{resource_name}' in {scope_desc}") + return (True, resource_name, None) + except Exception as e: + action = "update" if resource_exists else "create" + error_msg = f"Failed to {action} {kind} '{resource_name}': {e}" + logger.error(error_msg) + return (False, resource_name, error_msg) + + except Exception as e: + error_msg = f"Error restoring resource: {e}" + logger.error(error_msg) + return (False, resource_data.get('metadata', {}).get('name', 'unknown'), error_msg) diff --git a/src/mas/devops/sls.py b/src/mas/devops/sls.py index 4927ed9a..e204bc8d 100644 --- a/src/mas/devops/sls.py +++ b/src/mas/devops/sls.py @@ -77,3 +77,33 @@ def findSLSByNamespace(namespace: str, instances: list = None, dynClient: Dynami if namespace in instance['metadata']['namespace']: return True return False + + +def getSLSRegistrationDetails(namespace: str, name: str, dynClient: DynamicClient): + """ + Retrieve registration details like licenseId and registrationKey from the LicenseService instance's CR status + + This function gets the LicenseService instance of a specified name in a specified namespace. + It retrieves licenseId and registrationKey keys in CR status and returns. + + Args: + namespace (str): The OpenShift namespace to search for SLS instances. + name (str): Name of SLS(LicenseService) instance. + dynClient (DynamicClient): OpenShift dynamic client for querying instances. + Required if instances is None. Defaults to None. + + Returns: + dict: dict with 'licenseId' and 'registrationKey' when details are found. + Empty if not found. + """ + try: + slsAPI = dynClient.resources.get(api_version="sls.ibm.com/v1", kind="LicenseService") + slsInstance = slsAPI.get(name=name, namespace=namespace) + if hasattr(slsInstance, 'status') and hasattr(slsInstance.status, 'licenseId') and hasattr(slsInstance.status, 'registrationKey'): + return dict( + registrationKey=slsInstance.status.registrationKey, + licenseId=slsInstance.status.licenseId + ) + except NotFoundError: + logger.info(f"No SLS '{name}' found in namespace {namespace}.'") + return dict() diff --git a/src/mas/devops/tekton.py b/src/mas/devops/tekton.py index 7ba7c164..96054c99 100644 --- a/src/mas/devops/tekton.py +++ b/src/mas/devops/tekton.py @@ -262,7 +262,7 @@ def updateTektonDefinitions(namespace: str, yamlFile: str) -> None: logger.debug(line) -def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True): +def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True, createConfigPVC: bool = True, createBackupPVC: bool = False, backupStorageSize: str = "20Gi"): """ Prepare a namespace for MAS pipelines by creating RBAC and PVC resources. @@ -276,6 +276,9 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, accessMode (str, optional): Access mode for the PVC. Defaults to None. waitForBind (bool, optional): Whether to wait for PVC to bind. Defaults to True. configureRBAC (bool, optional): Whether to configure RBAC. Defaults to True. + createConfigPVC (bool, optional): Whether to create config PVC. Defaults to True. + createBackupPVC (bool, optional): Whether to create backup PVC. Defaults to False. + backupStorageSize (str, optional): Size of the backup PVC storage. Defaults to "20Gi". Returns: None @@ -304,32 +307,66 @@ def preparePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, # Create PVC (instanceId namespace only) if instanceId is not None: - template = env.get_template("pipelines-pvc.yml.j2") - renderedTemplate = template.render( - mas_instance_id=instanceId, - pipeline_storage_class=storageClass, - pipeline_storage_accessmode=accessMode - ) - logger.debug(renderedTemplate) - pvc = yaml.safe_load(renderedTemplate) pvcAPI = dynClient.resources.get(api_version="v1", kind="PersistentVolumeClaim") - pvcAPI.apply(body=pvc, namespace=namespace) + # Automatically determine if we should wait for PVC binding based on storage class volumeBindingMode = getStorageClassVolumeBindingMode(dynClient, storageClass) waitForBind = (volumeBindingMode == "Immediate") - if waitForBind: - logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for PVC to bind") - pvcIsBound = False - while not pvcIsBound: - configPVC = pvcAPI.get(name="config-pvc", namespace=namespace) - if configPVC.status.phase == "Bound": - pvcIsBound = True - else: - logger.debug("Waiting 15s before checking status of PVC again") - logger.debug(configPVC) - sleep(15) - else: - logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping PVC bind wait") + + # Create config PVC if requested + if createConfigPVC: + logger.info("Creating config PVC") + template = env.get_template("pipelines-pvc.yml.j2") + renderedTemplate = template.render( + mas_instance_id=instanceId, + pipeline_storage_class=storageClass, + pipeline_storage_accessmode=accessMode + ) + logger.debug(renderedTemplate) + pvc = yaml.safe_load(renderedTemplate) + pvcAPI.apply(body=pvc, namespace=namespace) + + if waitForBind: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for config PVC to bind") + pvcIsBound = False + while not pvcIsBound: + configPVC = pvcAPI.get(name="config-pvc", namespace=namespace) + if configPVC.status.phase == "Bound": + pvcIsBound = True + else: + logger.debug("Waiting 15s before checking status of config PVC again") + logger.debug(configPVC) + sleep(15) + else: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping config PVC bind wait") + + # Create backup PVC if requested + if createBackupPVC: + logger.info("Creating backup PVC") + backupTemplate = env.get_template("pipelines-backup-pvc.yml.j2") + renderedBackupTemplate = backupTemplate.render( + mas_instance_id=instanceId, + pipeline_storage_class=storageClass, + pipeline_storage_accessmode=accessMode, + backup_storage_size=backupStorageSize + ) + logger.debug(renderedBackupTemplate) + backupPvc = yaml.safe_load(renderedBackupTemplate) + pvcAPI.apply(body=backupPvc, namespace=namespace) + + if waitForBind: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, waiting for backup PVC to bind") + backupPvcIsBound = False + while not backupPvcIsBound: + backupPVC = pvcAPI.get(name="backup-pvc", namespace=namespace) + if backupPVC.status.phase == "Bound": + backupPvcIsBound = True + else: + logger.debug("Waiting 15s before checking status of backup PVC again") + logger.debug(backupPVC) + sleep(15) + else: + logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping backup PVC bind wait") def prepareAiServicePipelinesNamespace(dynClient: DynamicClient, instanceId: str = None, storageClass: str = None, accessMode: str = None, waitForBind: bool = True, configureRBAC: bool = True): @@ -398,6 +435,46 @@ def prepareAiServicePipelinesNamespace(dynClient: DynamicClient, instanceId: str logger.info(f"Storage class {storageClass} uses volumeBindingMode={volumeBindingMode}, skipping PVC bind wait") +def prepareRestoreSecrets(dynClient: DynamicClient, namespace: str, restoreConfigs: dict = None): + """ + Create or update secret required for MAS Restore pipeline. + + Creates secret in the specified namespace: + - pipeline-restore-configs + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + namespace (str): The namespace to create secrets in + restoreConfigs (dict, optional): configuration data for restore. Defaults to None (empty secret). + + Returns: + None + + Raises: + NotFoundError: If secrets cannot be created + """ + secretsAPI = dynClient.resources.get(api_version="v1", kind="Secret") + + # 1. Secret/pipeline-restore-configs + # ------------------------------------------------------------------------- + # Must exist, but can be empty + try: + secretsAPI.delete(name="pipeline-restore-configs", namespace=namespace) + except NotFoundError: + pass + + if restoreConfigs is None: + restoreConfigs = { + "apiVersion": "v1", + "kind": "Secret", + "type": "Opaque", + "metadata": { + "name": "pipeline-restore-configs" + } + } + secretsAPI.create(body=restoreConfigs, namespace=namespace) + + def prepareInstallSecrets(dynClient: DynamicClient, namespace: str, slsLicenseFile: dict | None = None, additionalConfigs: dict | None = None, certs: dict | None = None, podTemplates: dict | None = None) -> None: """ Create or update secrets required for MAS installation pipelines. @@ -697,6 +774,52 @@ def launchUpdatePipeline(dynClient: DynamicClient, params: dict) -> str: return pipelineURL +def launchBackupPipeline(dynClient: DynamicClient, params: dict) -> str: + """ + Create a PipelineRun to backup a MAS instance. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + params (dict): Backup parameters including instance ID and configuration + + Returns: + str: URL to the PipelineRun in the OpenShift console + + Raises: + NotFoundError: If resources cannot be created + """ + instanceId = params["mas_instance_id"] + backupVersion = params["backup_version"] + namespace = f"mas-{instanceId}-pipelines" + timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-backup", params) + + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-backup-{backupVersion}-{timestamp}" + return pipelineURL + + +def launchRestorePipeline(dynClient: DynamicClient, params: dict) -> str: + """ + Create a PipelineRun to restore a MAS instance. + + Parameters: + dynClient (DynamicClient): OpenShift Dynamic Client + params (dict): Backup/Restore parameters including instance ID and configuration + + Returns: + str: URL to the PipelineRun in the OpenShift console + + Raises: + NotFoundError: If resources cannot be created + """ + instanceId = params["mas_instance_id"] + restoreVersion = params["restore_version"] + namespace = f"mas-{instanceId}-pipelines" + timestamp = launchPipelineRun(dynClient, namespace, "pipelinerun-restore", params) + + pipelineURL = f"{getConsoleURL(dynClient)}/k8s/ns/mas-{instanceId}-pipelines/tekton.dev~v1beta1~PipelineRun/{instanceId}-restore-{restoreVersion}-{timestamp}" + return pipelineURL + + def launchAiServiceUpgradePipeline(dynClient: DynamicClient, aiserviceInstanceId: str, skipPreCheck: bool = False, diff --git a/src/mas/devops/templates/pipelinerun-backup.yml.j2 b/src/mas/devops/templates/pipelinerun-backup.yml.j2 new file mode 100644 index 00000000..2d82b49f --- /dev/null +++ b/src/mas/devops/templates/pipelinerun-backup.yml.j2 @@ -0,0 +1,166 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + name: "{{ mas_instance_id }}-backup-{{ backup_version }}-{{ timestamp }}" + labels: + tekton.dev/pipeline: mas-backup +spec: + pipelineRef: + name: mas-backup + serviceAccountName: "{{ service_account_name | default('pipeline', True) }}" + timeouts: + pipeline: "0" + workspaces: + - name: shared-backups + persistentVolumeClaim: + claimName: backup-pvc + params: + # Common Parameters + - name: image_pull_policy + value: IfNotPresent + - name: mas_instance_id + value: "{{ mas_instance_id }}" + + {% if skip_pre_check is defined and skip_pre_check != "" %} + # Pipeline config + - name: skip_pre_check + value: "{{ skip_pre_check }}" + {% endif %} + + # Backup Configuration + - name: backup_version + value: "{{ backup_version }}" + {% if clean_backup is defined and clean_backup != "" %} + - name: clean_backup + value: "{{ clean_backup }}" + {% endif %} + + # Component Flags + {% if include_sls is defined and include_sls != "" %} + - name: include_sls + value: "{{ include_sls }}" + {% endif %} + + {% if include_mongo is defined and include_mongo != "" %} + - name: include_mongo + value: "{{ include_mongo }}" + {% endif %} + + # MongoDB Configuration + {% if mongodb_namespace is defined and mongodb_namespace != "" %} + - name: mongodb_namespace + value: "{{ mongodb_namespace }}" + {% endif %} + {% if mongodb_instance_name is defined and mongodb_instance_name != "" %} + - name: mongodb_instance_name + value: "{{ mongodb_instance_name }}" + {% endif %} + {% if mongodb_provider is defined and mongodb_provider != "" %} + - name: mongodb_provider + value: "{{ mongodb_provider }}" + {% endif %} + + # SLS Configuration + {% if sls_namespace is defined and sls_namespace != "" %} + - name: sls_namespace + value: "{{ sls_namespace }}" + {% endif %} + + # Certificate Manager Configuration + {% if cert_manager_provider is defined and cert_manager_provider != "" %} + - name: cert_manager_provider + value: "{{ cert_manager_provider }}" + {% endif %} + + # Development Build Support + {% if artifactory_username is defined and artifactory_username != "" %} + - name: artifactory_username + value: "{{ artifactory_username }}" + {% endif %} + {% if artifactory_token is defined and artifactory_token != "" %} + - name: artifactory_token + value: "{{ artifactory_token }}" + {% endif %} + + # Upload Configuration + {% if upload_backup is defined and upload_backup != "" %} + - name: upload_backup + value: "{{ upload_backup }}" + {% endif %} + {% if aws_access_key_id is defined and aws_access_key_id != "" %} + - name: aws_access_key_id + value: "{{ aws_access_key_id }}" + {% endif %} + {% if aws_secret_access_key is defined and aws_secret_access_key != "" %} + - name: aws_secret_access_key + value: "{{ aws_secret_access_key }}" + {% endif %} + {% if s3_bucket_name is defined and s3_bucket_name != "" %} + - name: s3_bucket_name + value: "{{ s3_bucket_name }}" + {% endif %} + {% if s3_region is defined and s3_region != "" %} + - name: s3_region + value: "{{ s3_region }}" + {% endif %} + {% if s3_endpoint_url is defined and s3_endpoint_url != "" %} + - name: s3_endpoint_url + value: "{{ s3_endpoint_url }}" + {% endif %} + {% if artifactory_url is defined and artifactory_url != "" %} + - name: artifactory_url + value: "{{ artifactory_url }}" + {% endif %} + {% if artifactory_repository is defined and artifactory_repository != "" %} + - name: artifactory_repository + value: "{{ artifactory_repository }}" + {% endif %} + + # Manage Application Backup Configuration + {% if backup_manage_app is defined and backup_manage_app != "" %} + - name: backup_manage_app + value: "{{ backup_manage_app }}" + {% endif %} + {% if backup_manage_db is defined and backup_manage_db != "" %} + - name: backup_manage_db + value: "{{ backup_manage_db }}" + {% endif %} + {% if manage_workspace_id is defined and manage_workspace_id != "" %} + - name: manage_workspace_id + value: "{{ manage_workspace_id }}" + {% endif %} + + # Manage Db2 Backup Configuration + {% if manage_db2_namespace is defined and manage_db2_namespace != "" %} + - name: manage_db2_namespace + value: "{{ manage_db2_namespace }}" + {% endif %} + {% if manage_db2_instance_name is defined and manage_db2_instance_name != "" %} + - name: manage_db2_instance_name + value: "{{ manage_db2_instance_name }}" + {% endif %} + {% if manage_db2_backup_type is defined and manage_db2_backup_type != "" %} + - name: manage_db2_backup_type + value: "{{ manage_db2_backup_type }}" + {% endif %} + {% if manage_db2_backup_vendor is defined and manage_db2_backup_vendor != "" %} + - name: manage_db2_backup_vendor + value: "{{ manage_db2_backup_vendor }}" + {% endif %} + {% if backup_s3_endpoint is defined and backup_s3_endpoint != "" %} + - name: backup_s3_endpoint + value: "{{ backup_s3_endpoint }}" + {% endif %} + {% if backup_s3_bucket is defined and backup_s3_bucket != "" %} + - name: backup_s3_bucket + value: "{{ backup_s3_bucket }}" + {% endif %} + {% if backup_s3_access_key is defined and backup_s3_access_key != "" %} + - name: backup_s3_access_key + value: "{{ backup_s3_access_key }}" + {% endif %} + {% if backup_s3_secret_key is defined and backup_s3_secret_key != "" %} + - name: backup_s3_secret_key + value: "{{ backup_s3_secret_key }}" + {% endif %} \ No newline at end of file diff --git a/src/mas/devops/templates/pipelinerun-install.yml.j2 b/src/mas/devops/templates/pipelinerun-install.yml.j2 index 6651f68b..eebbbd5a 100644 --- a/src/mas/devops/templates/pipelinerun-install.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-install.yml.j2 @@ -707,6 +707,10 @@ spec: # ------------------------------------------------------------------------- - name: mas_app_channel_monitor value: "{{ mas_app_channel_monitor }}" + {%- if mas_monitor_install_order is defined and mas_monitor_install_order != "" %} + - name: mas_monitor_install_order + value: "{{ mas_monitor_install_order }}" + {%- endif %} {%- endif %} {%- if mas_app_channel_optimizer is defined and mas_app_channel_optimizer != "" %} @@ -760,6 +764,10 @@ spec: - name: mas_ws_facilities_size value: "{{ mas_ws_facilities_size }}" {%- endif %} +{%- if mas_ws_facilities_app_om_upgrade_mode is defined and mas_ws_facilities_app_om_upgrade_mode != "" %} + - name: mas_ws_facilities_app_om_upgrade_mode + value: "{{ mas_ws_facilities_app_om_upgrade_mode }}" +{%- endif %} {%- if mas_ws_facilities_routes_timeout is defined and mas_ws_facilities_routes_timeout != "" %} - name: mas_ws_facilities_routes_timeout value: "{{ mas_ws_facilities_routes_timeout }}" diff --git a/src/mas/devops/templates/pipelinerun-restore.yml.j2 b/src/mas/devops/templates/pipelinerun-restore.yml.j2 new file mode 100644 index 00000000..9daeb43d --- /dev/null +++ b/src/mas/devops/templates/pipelinerun-restore.yml.j2 @@ -0,0 +1,238 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + name: "{{ mas_instance_id }}-restore-{{ restore_version }}-{{ timestamp }}" + labels: + tekton.dev/pipeline: mas-restore +spec: + pipelineRef: + name: mas-restore + serviceAccountName: "{{ service_account_name | default('pipeline', True) }}" + timeouts: + pipeline: "0" + workspaces: + - name: shared-backups + persistentVolumeClaim: + claimName: backup-pvc + - name: restore-configurations + secret: + secretName: pipeline-restore-configs + params: + # Common Parameters + - name: image_pull_policy + value: IfNotPresent + - name: mas_instance_id + value: "{{ mas_instance_id }}" + + {% if skip_pre_check is defined and skip_pre_check != "" %} + # Pipeline config + - name: skip_pre_check + value: "{{ skip_pre_check }}" + {% endif %} + + # Restore Configuration + - name: restore_version + value: "{{ restore_version }}" + {% if clean_backup is defined and clean_backup != "" %} + - name: clean_backup + value: "{{ clean_backup }}" + {% endif %} + + # Component Flags + {% if include_mongo is defined and include_mongo != "" %} + - name: include_mongo + value: "{{ include_mongo }}" + {% endif %} + {% if include_sls is defined and include_sls != "" %} + - name: include_sls + value: "{{ include_sls }}" + {% endif %} + {% if include_dro is defined and include_dro != "" %} + - name: include_dro + value: "{{ include_dro }}" + {% endif %} + {% if include_grafana is defined and include_grafana != "" %} + - name: include_grafana + value: "{{ include_grafana }}" + {% endif %} + + # MongoDB Configuration + {% if mongodb_namespace is defined and mongodb_namespace != "" %} + - name: mongodb_namespace + value: "{{ mongodb_namespace }}" + {% endif %} + {% if mongodb_instance_name is defined and mongodb_instance_name != "" %} + - name: mongodb_instance_name + value: "{{ mongodb_instance_name }}" + {% endif %} + {% if mongodb_provider is defined and mongodb_provider != "" %} + - name: mongodb_provider + value: "{{ mongodb_provider }}" + {% endif %} + {% if override_mongodb_storageclass is defined and override_mongodb_storageclass != "" %} + - name: override_mongodb_storageclass + value: "{{ override_mongodb_storageclass }}" + {% endif %} + + # SLS Configuration + {% if sls_namespace is defined and sls_namespace != "" %} + - name: sls_namespace + value: "{{ sls_namespace }}" + {% endif %} + {% if sls_domain is defined and sls_domain != "" %} + - name: sls_domain + value: "{{ sls_domain }}" + {% endif %} + + # DRO Configuration + {% if dro_contact_email is defined and dro_contact_email != "" %} + - name: dro_contact_email + value: "{{ dro_contact_email }}" + {% endif %} + {% if dro_contact_firstname is defined and dro_contact_firstname != "" %} + - name: dro_contact_firstname + value: "{{ dro_contact_firstname }}" + {% endif %} + {% if dro_contact_lastname is defined and dro_contact_lastname != "" %} + - name: dro_contact_lastname + value: "{{ dro_contact_lastname }}" + {% endif %} + {% if ibm_entitlement_key is defined and ibm_entitlement_key != "" %} + - name: ibm_entitlement_key + value: "{{ ibm_entitlement_key }}" + {% endif %} + {% if dro_namespace is defined and dro_namespace != "" %} + - name: dro_namespace + value: "{{ dro_namespace }}" + {% endif %} + {% if dro_storage_class is defined and dro_storage_class != "" %} + - name: dro_storage_class + value: "{{ dro_storage_class }}" + {% endif %} + + # Suite Restore Configuration + {% if include_slscfg_from_backup is defined and include_slscfg_from_backup != "" %} + - name: include_slscfg_from_backup + value: "{{ include_slscfg_from_backup }}" + {% endif %} + {% if sls_url_on_restore is defined and sls_url_on_restore != "" %} + - name: sls_url_on_restore + value: "{{ sls_url_on_restore }}" + {% endif %} + {% if sls_cfg_file is defined and sls_cfg_file != "" %} + - name: sls_cfg_file + value: "{{ sls_cfg_file }}" + {% endif %} + {% if include_drocfg_from_backup is defined and include_drocfg_from_backup != "" %} + - name: include_drocfg_from_backup + value: "{{ include_drocfg_from_backup }}" + {% endif %} + {% if dro_url_on_restore is defined and dro_url_on_restore != "" %} + - name: dro_url_on_restore + value: "{{ dro_url_on_restore }}" + {% endif %} + {% if dro_cfg_file is defined and dro_cfg_file != "" %} + - name: dro_cfg_file + value: "{{ dro_cfg_file }}" + {% endif %} + {% if mas_domain_on_restore is defined and mas_domain_on_restore != "" %} + - name: mas_domain_on_restore + value: "{{ mas_domain_on_restore }}" + {% endif %} + + # Certificate Manager Configuration + {% if cert_manager_provider is defined and cert_manager_provider != "" %} + - name: cert_manager_provider + value: "{{ cert_manager_provider }}" + {% endif %} + + # Development Build Support + {% if artifactory_username is defined and artifactory_username != "" %} + - name: artifactory_username + value: "{{ artifactory_username }}" + {% endif %} + {% if artifactory_token is defined and artifactory_token != "" %} + - name: artifactory_token + value: "{{ artifactory_token }}" + {% endif %} + + # Download Configuration + {% if backup_archive_name is defined and backup_archive_name != "" %} + - name: backup_archive_name + value: "{{ backup_archive_name }}" + {% endif %} + {% if download_backup is defined and download_backup != "" %} + - name: download_backup + value: "{{ download_backup }}" + {% endif %} + {% if aws_access_key_id is defined and aws_access_key_id != "" %} + - name: aws_access_key_id + value: "{{ aws_access_key_id }}" + {% endif %} + {% if aws_secret_access_key is defined and aws_secret_access_key != "" %} + - name: aws_secret_access_key + value: "{{ aws_secret_access_key }}" + {% endif %} + {% if s3_bucket_name is defined and s3_bucket_name != "" %} + - name: s3_bucket_name + value: "{{ s3_bucket_name }}" + {% endif %} + {% if s3_region is defined and s3_region != "" %} + - name: s3_region + value: "{{ s3_region }}" + {% endif %} + {% if s3_endpoint_url is defined and s3_endpoint_url != "" %} + - name: s3_endpoint_url + value: "{{ s3_endpoint_url }}" + {% endif %} + {% if artifactory_url is defined and artifactory_url != "" %} + - name: artifactory_url + value: "{{ artifactory_url }}" + {% endif %} + {% if artifactory_repository is defined and artifactory_repository != "" %} + - name: artifactory_repository + value: "{{ artifactory_repository }}" + {% endif %} + + # Manage Application Restore Configuration + {% if restore_manage_app is defined and restore_manage_app != "" %} + - name: restore_manage_app + value: "{{ restore_manage_app }}" + {% endif %} + {% if restore_manage_db is defined and restore_manage_db != "" %} + - name: restore_manage_db + value: "{{ restore_manage_db }}" + {% endif %} + # Manage App Storage class + {% if manage_app_override_storageclass is defined and manage_app_override_storageclass != "" %} + - name: manage_app_override_storageclass + value: "{{ manage_app_override_storageclass }}" + {% endif %} + {% if manage_app_storage_class_rwx is defined and manage_app_storage_class_rwx != "" %} + - name: manage_app_storage_class_rwx + value: "{{ manage_app_storage_class_rwx }}" + {% endif %} + {% if manage_app_storage_class_rwo is defined and manage_app_storage_class_rwo != "" %} + - name: manage_app_storage_class_rwo + value: "{{ manage_app_storage_class_rwo }}" + {% endif %} + + # Manage Db2 Restore Configuration + {% if manage_db2_restore_vendor is defined and manage_db2_restore_vendor != "" %} + - name: manage_db2_restore_vendor + value: "{{ manage_db2_restore_vendor }}" + {% endif %} + + {% if manage_db_override_storageclass is defined and manage_db_override_storageclass != "" %} + - name: manage_db_override_storageclass + value: "{{ manage_db_override_storageclass }}" + {% endif %} + {% if manage_db_storage_class_rwo is defined and manage_db_storage_class_rwo != "" %} + - name: manage_db_storage_class_rwo + value: "{{ manage_db_storage_class_rwo }}" + {% endif %} + {% if manage_db_storage_class_rwx is defined and manage_db_storage_class_rwx != "" %} + - name: manage_db_storage_class_rwx + value: "{{ manage_db_storage_class_rwx }}" + {% endif %} diff --git a/src/mas/devops/templates/pipelinerun-upgrade.yml.j2 b/src/mas/devops/templates/pipelinerun-upgrade.yml.j2 index 47741f39..ba5c71ea 100644 --- a/src/mas/devops/templates/pipelinerun-upgrade.yml.j2 +++ b/src/mas/devops/templates/pipelinerun-upgrade.yml.j2 @@ -119,6 +119,13 @@ spec: value: "{{ db2_data_storage_size }}" {%- endif %} {%- endif %} +{%- if mas_monitor_install_order is defined and mas_monitor_install_order != "" %} + + # Monitor Install Order + # ------------------------------------------------------------------------- + - name: mas_monitor_install_order + value: "{{ mas_monitor_install_order }}" +{%- endif %} {%- if mas_app_channel_manage is defined and mas_app_channel_manage != "" %} # Manage Application diff --git a/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 b/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 new file mode 100644 index 00000000..655b9b4d --- /dev/null +++ b/src/mas/devops/templates/pipelines-backup-pvc.yml.j2 @@ -0,0 +1,15 @@ +--- +# PVC for backup storage +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: backup-pvc + namespace: mas-{{ mas_instance_id }}-pipelines +spec: + accessModes: + - {{ pipeline_storage_accessmode }} + volumeMode: Filesystem + storageClassName: {{ pipeline_storage_class }} + resources: + requests: + storage: {{ backup_storage_size }} \ No newline at end of file diff --git a/src/mas/devops/templates/subscription.yml.j2 b/src/mas/devops/templates/subscription.yml.j2 index e9439ece..23bb3a66 100644 --- a/src/mas/devops/templates/subscription.yml.j2 +++ b/src/mas/devops/templates/subscription.yml.j2 @@ -9,6 +9,12 @@ spec: channel: {{ package_channel }} source: {{ catalog_name }} sourceNamespace: {{ catalog_namespace }} + {%- if install_plan_approval is not none %} + installPlanApproval: {{ install_plan_approval }} + {%- endif %} + {%- if starting_csv is not none %} + startingCSV: {{ starting_csv }} + {%- endif %} {%- if subscription_config is not none %} config: {{ subscription_config }} {%- endif %} diff --git a/test/src/test_backup.py b/test/src/test_backup.py new file mode 100644 index 00000000..5ae37452 --- /dev/null +++ b/test/src/test_backup.py @@ -0,0 +1,900 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import yaml +from unittest.mock import MagicMock, Mock +from openshift.dynamic.exceptions import NotFoundError + +from mas.devops.backup import createBackupDirectories, copyContentsToYamlFile, filterResourceData, backupResources, extract_secrets_from_dict + + +class TestCreateBackupDirectories: + """Tests for createBackupDirectories function""" + + def test_create_single_directory(self, tmp_path): + """Test creating a single backup directory""" + test_dir = tmp_path / "backup1" + result = createBackupDirectories([str(test_dir)]) + + assert result is True + assert test_dir.exists() + assert test_dir.is_dir() + + def test_create_multiple_directories(self, tmp_path): + """Test creating multiple backup directories""" + test_dirs = [ + tmp_path / "backup1", + tmp_path / "backup2", + tmp_path / "backup3" + ] + paths = [str(d) for d in test_dirs] + result = createBackupDirectories(paths) + + assert result is True + for test_dir in test_dirs: + assert test_dir.exists() + assert test_dir.is_dir() + + def test_create_nested_directories(self, tmp_path): + """Test creating nested backup directories""" + nested_dir = tmp_path / "level1" / "level2" / "level3" + result = createBackupDirectories([str(nested_dir)]) + + assert result is True + assert nested_dir.exists() + assert nested_dir.is_dir() + + def test_create_existing_directory(self, tmp_path): + """Test creating a directory that already exists""" + test_dir = tmp_path / "existing" + test_dir.mkdir() + + result = createBackupDirectories([str(test_dir)]) + + assert result is True + assert test_dir.exists() + + def test_create_empty_list(self): + """Test with empty list of paths""" + result = createBackupDirectories([]) + assert result is True + + def test_create_directory_permission_error(self, mocker): + """Test handling of permission errors""" + mock_makedirs = mocker.patch('os.makedirs', side_effect=PermissionError("Permission denied")) + + result = createBackupDirectories(["/invalid/path"]) + + assert result is False + mock_makedirs.assert_called_once() + + def test_create_directory_os_error(self, mocker): + """Test handling of OS errors""" + mocker.patch('os.makedirs', side_effect=OSError("OS error")) + + result = createBackupDirectories(["/some/path"]) + + assert result is False + + +class TestCopyContentsToYamlFile: + """Tests for copyContentsToYamlFile function""" + + def test_write_simple_dict(self, tmp_path): + """Test writing a simple dictionary to YAML file""" + test_file = tmp_path / "test.yaml" + content = {"key1": "value1", "key2": "value2"} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + assert test_file.exists() + + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_write_nested_dict(self, tmp_path): + """Test writing a nested dictionary to YAML file""" + test_file = tmp_path / "nested.yaml" + content = { + "level1": { + "level2": { + "level3": "value" + } + }, + "list": [1, 2, 3] + } + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_write_empty_dict(self, tmp_path): + """Test writing an empty dictionary""" + test_file = tmp_path / "empty.yaml" + content = {} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + def test_overwrite_existing_file(self, tmp_path): + """Test overwriting an existing YAML file""" + test_file = tmp_path / "overwrite.yaml" + old_content = {"old": "data"} + new_content = {"new": "data"} + + # Write initial content + with open(test_file, 'w') as f: + yaml.dump(old_content, f) + + # Overwrite with new content + result = copyContentsToYamlFile(str(test_file), new_content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == new_content + assert loaded_content != old_content + + def test_write_to_nonexistent_directory(self, tmp_path): + """Test writing to a file in a non-existent directory""" + test_file = tmp_path / "nonexistent" / "test.yaml" + content = {"key": "value"} + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is False + + def test_write_permission_error(self, mocker): + """Test handling of permission errors during write""" + mocker.patch('builtins.open', side_effect=PermissionError("Permission denied")) + + result = copyContentsToYamlFile("/invalid/path.yaml", {"key": "value"}) + + assert result is False + + def test_write_with_special_characters(self, tmp_path): + """Test writing content with special characters""" + test_file = tmp_path / "special.yaml" + content = { + "special": "value with\nnewlines", + "unicode": "café ☕", + "quotes": "value with 'quotes' and \"double quotes\"" + } + + result = copyContentsToYamlFile(str(test_file), content) + + assert result is True + with open(test_file, 'r') as f: + loaded_content = yaml.safe_load(f) + assert loaded_content == content + + +class TestFilterResourceData: + """Tests for filterResourceData function""" + + def test_filter_all_metadata_fields(self): + """Test filtering all metadata fields that should be removed""" + data = { + "apiVersion": "v1", + "kind": "Resource", + "metadata": { + "name": "test-resource", + "namespace": "test-namespace", + "annotations": {"key": "value"}, + "creationTimestamp": "2026-01-01T00:00:00Z", + "generation": 1, + "resourceVersion": "12345", + "selfLink": "/api/v1/namespaces/test/resources/test-resource", + "uid": "abc-123-def", + "managedFields": [{"manager": "test"}] + }, + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "name" in result["metadata"] + assert "namespace" in result["metadata"] + assert "annotations" not in result["metadata"] + assert "creationTimestamp" not in result["metadata"] + assert "generation" not in result["metadata"] + assert "resourceVersion" not in result["metadata"] + assert "selfLink" not in result["metadata"] + assert "uid" not in result["metadata"] + assert "managedFields" not in result["metadata"] + assert "spec" in result + + def test_filter_status_field(self): + """Test that status field is removed""" + data = { + "metadata": {"name": "test"}, + "spec": {"replicas": 3}, + "status": { + "phase": "Running", + "conditions": [] + } + } + + result = filterResourceData(data) + + assert "status" not in result + assert "spec" in result + assert "metadata" in result + + def test_filter_partial_metadata(self): + """Test filtering when only some metadata fields are present""" + data = { + "metadata": { + "name": "test-resource", + "uid": "abc-123", + "labels": {"app": "test"} + } + } + + result = filterResourceData(data) + + assert "name" in result["metadata"] + assert "labels" in result["metadata"] + assert "uid" not in result["metadata"] + + def test_filter_no_metadata(self): + """Test filtering when metadata field is not present""" + data = { + "apiVersion": "v1", + "kind": "Resource", + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "metadata" not in result + assert "spec" in result + assert "apiVersion" in result + + def test_filter_empty_metadata(self): + """Test filtering with empty metadata""" + data = { + "metadata": {}, + "spec": {"replicas": 3} + } + + result = filterResourceData(data) + + assert "metadata" in result + assert result["metadata"] == {} + + def test_filter_preserves_other_fields(self): + """Test that other fields are preserved""" + data = { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": "test-config", + "uid": "should-be-removed" + }, + "data": { + "key1": "value1", + "key2": "value2" + } + } + + result = filterResourceData(data) + + assert result["apiVersion"] == "v1" + assert result["kind"] == "ConfigMap" + assert result["data"] == {"key1": "value1", "key2": "value2"} + assert "uid" not in result["metadata"] + + def test_filter_shallow_copy_behavior(self): + """Test that filterResourceData uses shallow copy (modifies nested dicts)""" + data = { + "metadata": { + "name": "test", + "uid": "abc-123" + }, + "status": {"phase": "Running"} + } + + result = filterResourceData(data) + + # Due to shallow copy, nested metadata dict is modified in original + # but top-level status is not (it's deleted from copy only) + assert "uid" not in data["metadata"] # Modified due to shallow copy + assert "status" in data # Not modified (top-level key) + + # Result should not have uid and status + assert "uid" not in result["metadata"] + assert "status" not in result + + def test_filter_complex_resource(self): + """Test filtering a complex Kubernetes resource""" + data = { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "my-deployment", + "namespace": "default", + "labels": {"app": "myapp"}, + "annotations": {"deployment.kubernetes.io/revision": "1"}, + "creationTimestamp": "2026-01-01T00:00:00Z", + "generation": 5, + "resourceVersion": "98765", + "uid": "xyz-789", + "managedFields": [{"manager": "kubectl"}] + }, + "spec": { + "replicas": 3, + "selector": {"matchLabels": {"app": "myapp"}} + }, + "status": { + "availableReplicas": 3, + "readyReplicas": 3 + } + } + + result = filterResourceData(data) + + # Check preserved fields + assert result["apiVersion"] == "apps/v1" + assert result["kind"] == "Deployment" + assert result["metadata"]["name"] == "my-deployment" + assert result["metadata"]["namespace"] == "default" + assert result["metadata"]["labels"] == {"app": "myapp"} + assert result["spec"]["replicas"] == 3 + + # Check removed fields + assert "annotations" not in result["metadata"] + assert "creationTimestamp" not in result["metadata"] + assert "generation" not in result["metadata"] + assert "resourceVersion" not in result["metadata"] + assert "uid" not in result["metadata"] + assert "managedFields" not in result["metadata"] + assert "status" not in result + + def test_filter_empty_dict(self): + """Test filtering an empty dictionary""" + data = {} + result = filterResourceData(data) + assert result == {} + + +class TestExtractSecretsFromDict: + """Tests for extract_secrets_from_dict function""" + + def test_extract_single_secret(self): + """Test extracting a single secret name""" + data = { + "spec": { + "secretName": "my-secret" + } + } + result = extract_secrets_from_dict(data) + assert result == {"my-secret"} + + def test_extract_multiple_secrets(self): + """Test extracting multiple secret names""" + data = { + "spec": { + "database": { + "secretName": "db-secret" + }, + "auth": { + "secretName": "auth-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"db-secret", "auth-secret"} + + def test_extract_secrets_from_list(self): + """Test extracting secrets from list structures""" + data = { + "spec": { + "volumes": [ + {"secretName": "secret1"}, + {"secretName": "secret2"}, + {"configMap": "not-a-secret"} + ] + } + } + result = extract_secrets_from_dict(data) + assert result == {"secret1", "secret2"} + + def test_extract_nested_secrets(self): + """Test extracting deeply nested secrets""" + data = { + "level1": { + "level2": { + "level3": { + "secretName": "deep-secret" + } + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"deep-secret"} + + def test_no_secrets_found(self): + """Test when no secrets are present""" + data = { + "spec": { + "replicas": 3, + "image": "myapp:latest" + } + } + result = extract_secrets_from_dict(data) + assert result == set() + + def test_empty_dict(self): + """Test with empty dictionary""" + result = extract_secrets_from_dict({}) + assert result == set() + + def test_ignore_empty_secret_name(self): + """Test that empty string secret names are ignored""" + data = { + "spec": { + "secretName": "", + "other": { + "secretName": "valid-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"valid-secret"} + + def test_ignore_non_string_secret_name(self): + """Test that non-string secret names are ignored""" + data = { + "spec": { + "secretName": 123, + "other": { + "secretName": "valid-secret" + } + } + } + result = extract_secrets_from_dict(data) + assert result == {"valid-secret"} + + def test_duplicate_secrets(self): + """Test that duplicate secret names are deduplicated""" + data = { + "spec": { + "volume1": {"secretName": "shared-secret"}, + "volume2": {"secretName": "shared-secret"}, + "volume3": {"secretName": "unique-secret"} + } + } + result = extract_secrets_from_dict(data) + assert result == {"shared-secret", "unique-secret"} + + +class TestBackupResources: + """Tests for backupResources function""" + + def test_backup_single_namespaced_resource(self, tmp_path, mocker): + """Test backing up a single namespaced resource by name""" + backup_path = str(tmp_path / "backup") + + # Mock resource data + mock_resource = { + "metadata": { + "name": "test-resource", + "namespace": "test-ns", + "uid": "abc-123" + }, + "spec": {"replicas": 3} + } + + # Create mock resource object with to_dict method + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + # Mock the dynamic client + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + # Mock the helper functions + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="test-resource" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + assert secrets == set() + + def test_backup_multiple_namespaced_resources(self, tmp_path, mocker): + """Test backing up all resources of a kind in a namespace""" + backup_path = str(tmp_path / "backup") + + # Mock multiple resources + mock_resources = [ + { + "metadata": {"name": "resource1", "namespace": "test-ns"}, + "spec": {"data": "value1"} + }, + { + "metadata": {"name": "resource2", "namespace": "test-ns"}, + "spec": {"data": "value2"} + } + ] + + # Create mock resource objects + mock_resource_objs = [] + for res in mock_resources: + mock_obj = MagicMock() + mock_obj.__getitem__ = lambda self, key, r=res: r[key] + mock_obj.to_dict.return_value = res + mock_resource_objs.append(mock_obj) + + # Mock the response with items + mock_response = MagicMock() + mock_response.items = mock_resource_objs + + # Mock the dynamic client + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 2 + assert not_found == 0 + assert failed == 0 + + def test_backup_cluster_level_resource(self, tmp_path, mocker): + """Test backing up cluster-level resources (no namespace)""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "cluster-role"}, + "rules": [{"apiGroups": ["*"], "resources": ["*"], "verbs": ["*"]}] + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ClusterRole", + api_version="rbac.authorization.k8s.io/v1", + backup_path=backup_path, + name="cluster-role" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + + def test_backup_with_label_selector(self, tmp_path, mocker): + """Test backing up resources with label selectors""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": { + "name": "labeled-resource", + "namespace": "test-ns", + "labels": {"app": "myapp", "env": "prod"} + }, + "spec": {} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_response = MagicMock() + mock_response.items = [mock_resource_obj] + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + labels=["app=myapp", "env=prod"] + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert not_found == 0 + assert failed == 0 + + # Verify label selector was passed correctly + mock_api.get.assert_called_once_with(namespace="test-ns", label_selector="app=myapp,env=prod") + + def test_backup_resource_not_found_by_name(self, mocker): + """Test handling when a specific named resource is not found""" + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.side_effect = NotFoundError(Mock()) + mock_client.resources.get.return_value = mock_api + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns", + name="nonexistent" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 1 + assert failed == 0 + assert secrets == set() + + def test_backup_no_resources_found(self, mocker): + """Test when no resources of the kind exist""" + mock_response = MagicMock() + mock_response.items = [] + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 0 + + def test_backup_discovers_secrets(self, tmp_path, mocker): + """Test that secrets are discovered from resource specs""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "app-deployment", "namespace": "test-ns"}, + "spec": { + "template": { + "spec": { + "volumes": [ + {"secretName": "db-credentials"}, + {"secretName": "api-key"} + ] + } + } + } + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="Deployment", + api_version="apps/v1", + backup_path=backup_path, + namespace="test-ns", + name="app-deployment" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert secrets == {"db-credentials", "api-key"} + + def test_backup_secret_does_not_discover_itself(self, tmp_path, mocker): + """Test that backing up Secrets doesn't try to discover secrets""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "my-secret", "namespace": "test-ns"}, + "data": {"password": "encoded-value"} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=True) + + result = backupResources( + mock_client, + kind="Secret", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="my-secret" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 1 + assert secrets == set() # Should not discover secrets from Secret resources + + def test_backup_write_failure(self, tmp_path, mocker): + """Test handling when writing backup file fails""" + backup_path = str(tmp_path / "backup") + + mock_resource = { + "metadata": {"name": "test-resource", "namespace": "test-ns"}, + "spec": {} + } + + mock_resource_obj = MagicMock() + mock_resource_obj.__getitem__ = lambda self, key: mock_resource[key] + mock_resource_obj.to_dict.return_value = mock_resource + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_resource_obj + mock_client.resources.get.return_value = mock_api + + # Mock copyContentsToYamlFile to fail + mocker.patch('mas.devops.backup.copyContentsToYamlFile', return_value=False) + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns", + name="test-resource" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 1 + + def test_backup_api_exception(self, mocker): + """Test handling of general API exceptions""" + mock_client = MagicMock() + mock_client.resources.get.side_effect = Exception("API error") + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 1 + + def test_backup_mixed_success_and_failure(self, tmp_path, mocker): + """Test backing up multiple resources with mixed success/failure""" + backup_path = str(tmp_path / "backup") + + mock_resources = [ + { + "metadata": {"name": "resource1", "namespace": "test-ns"}, + "spec": {} + }, + { + "metadata": {"name": "resource2", "namespace": "test-ns"}, + "spec": {} + }, + { + "metadata": {"name": "resource3", "namespace": "test-ns"}, + "spec": {} + } + ] + + mock_resource_objs = [] + for res in mock_resources: + mock_obj = MagicMock() + mock_obj.__getitem__ = lambda self, key, r=res: r[key] + mock_obj.to_dict.return_value = res + mock_resource_objs.append(mock_obj) + + mock_response = MagicMock() + mock_response.items = mock_resource_objs + + mock_client = MagicMock() + mock_api = MagicMock() + mock_api.get.return_value = mock_response + mock_client.resources.get.return_value = mock_api + + # Mock copyContentsToYamlFile to succeed for first two, fail for third + mock_copy = mocker.patch('mas.devops.backup.copyContentsToYamlFile') + mock_copy.side_effect = [True, True, False] + + result = backupResources( + mock_client, + kind="ConfigMap", + api_version="v1", + backup_path=backup_path, + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 2 + assert not_found == 0 + assert failed == 1 + + def test_backup_resource_kind_not_found(self, mocker): + """Test when the resource kind itself is not found in the API""" + mock_client = MagicMock() + mock_client.resources.get.side_effect = NotFoundError(Mock()) + + result = backupResources( + mock_client, + kind="NonExistentKind", + api_version="v1", + backup_path="/tmp/backup", + namespace="test-ns" + ) + + backed_up, not_found, failed, secrets = result + assert backed_up == 0 + assert not_found == 0 + assert failed == 0 diff --git a/test/src/test_data.py b/test/src/test_data.py index cbf0f152..62402b89 100644 --- a/test/src/test_data.py +++ b/test/src/test_data.py @@ -32,7 +32,7 @@ def test_list_catalogs(): def test_get_newest_catalog_tag(): catalogTag = getNewestCatalogTag("amd64") # Reminder: update this test when adding a new catalog each month! - assert True or catalogTag == "v9-260129-amd64" + assert catalogTag == "v9-260430-amd64" def test_get_newest_catalog_tag_fail(): diff --git a/test/src/test_mas.py b/test/src/test_mas.py index 10583926..3902a1e7 100644 --- a/test/src/test_mas.py +++ b/test/src/test_mas.py @@ -64,6 +64,12 @@ def test_is_airgap_install(): assert mas.isAirgapInstall(dynClient, checkICSP=False) is False +def test_get_mas_public_cluster_issuer(): + # Test with non-existent instance - should return None + issuer = mas.getMasPublicClusterIssuer(dynClient, "doesnotexist") + assert issuer is None + + # def test_is_app_ready(): # mas.waitForAppReady(dynClient, "fvtcpd", "iot") # mas.waitForAppReady(dynClient, "fvtcpd", "iot", "masdev") diff --git a/test/src/test_olm.py b/test/src/test_olm.py index 0e148388..354a9e2a 100644 --- a/test/src/test_olm.py +++ b/test/src/test_olm.py @@ -79,3 +79,89 @@ def test_crud_with_config(): olm.deleteSubscription(dynClient, namespace, "ibm-sls") olm.deleteSubscription(dynClient, namespace, "ibm-truststore-mgr") ocp.deleteNamespace(dynClient, namespace) + + +def test_crud_with_manual_approval(): + """ + Test that when installPlanApproval is Manual without a startingCSV, + an OLMException is raised. + """ + namespace = "cli-fvt-3" + + # This should raise an OLMException because Manual approval requires a startingCSV + try: + olm.applySubscription( + dynClient, + namespace, + "ibm-sls", + packageChannel="3.x", + installPlanApproval="Manual" + ) + # If we get here, the test should fail + assert False, "Expected OLMException to be raised when installPlanApproval is Manual without startingCSV" + except olm.OLMException as e: + # Verify the error message is correct + assert "When installPlanApproval is 'Manual', a startingCSV must be provided" in str(e) + # Test passed - exception was raised as expected + + +def test_crud_with_starting_csv(): + namespace = "cli-fvt-4" + # Note: This test assumes a specific CSV version exists in the catalog + # You may need to adjust the version based on what's available + subscription = olm.applySubscription( + dynClient, + namespace, + "ibm-sls", + packageChannel="3.x", + startingCSV="ibm-sls.v3.8.0" + ) + assert subscription.metadata.name == "ibm-sls" + assert subscription.metadata.namespace == namespace + assert subscription.spec.startingCSV == "ibm-sls.v3.8.0" + + # When we install the ibm-sls subscription OLM will automatically create the ibm-truststore-mgr + # subscription, but when we delete the subscription, OLM will not automatically remove the latter + olm.deleteSubscription(dynClient, namespace, "ibm-sls") + olm.deleteSubscription(dynClient, namespace, "ibm-truststore-mgr") + ocp.deleteNamespace(dynClient, namespace) + + +def test_crud_with_manual_approval_and_starting_csv(): + """ + Test that when installPlanApproval is Manual and startingCSV is specified, + the first InstallPlan is automatically approved to reach the startingCSV. + This allows the initial installation to proceed without manual intervention. + + Note: With Manual approval and startingCSV, the subscription state will be + "UpgradePending" after installation (indicating newer versions are available + but require manual approval), not "AtLatestKnown". + """ + namespace = "cli-fvt-5" + subscription = olm.applySubscription( + dynClient, + namespace, + "ibm-sls", + packageChannel="3.x", + installPlanApproval="Manual", + startingCSV="ibm-sls.v3.8.0" + ) + assert subscription.metadata.name == "ibm-sls" + assert subscription.metadata.namespace == namespace + assert subscription.spec.installPlanApproval == "Manual" + assert subscription.spec.startingCSV == "ibm-sls.v3.8.0" + + # Verify that the subscription reached UpgradePending state + # This confirms the InstallPlan was automatically approved and installed + # UpgradePending indicates newer versions are available but require manual approval + assert subscription.status.state == "UpgradePending" + + # Verify the installed CSV matches the startingCSV + installedCSV = subscription.status.installedCSV + assert installedCSV == "ibm-sls.v3.8.0" + + # When we install the ibm-sls subscription OLM will automatically create the ibm-truststore-mgr + # subscription, but when we delete the subscription, OLM will not automatically remove the latter + olm.deleteSubscription(dynClient, namespace, "ibm-sls") + olm.deleteSubscription(dynClient, namespace, "ibm-truststore-mgr") + ocp.deleteNamespace(dynClient, namespace) diff --git a/test/src/test_olm_installplan_selection.py b/test/src/test_olm_installplan_selection.py new file mode 100644 index 00000000..1101742c --- /dev/null +++ b/test/src/test_olm_installplan_selection.py @@ -0,0 +1,535 @@ +# ***************************************************************************** +# Copyright (c) 2024 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +""" +Unit tests for InstallPlan selection logic in applySubscription. + +These tests mock up the resources rather than use real resources as the +OLM processing on a cluster is quite slow and it would take time to +exercise all these scenerios +""" + +import pytest +from unittest.mock import Mock, patch +from mas.devops import olm + + +class MockResource: + """Mock Kubernetes resource object""" + + def __init__(self, name, labels=None, owner_refs=None, csv_names=None, phase="Complete"): + self.metadata = Mock() + self.metadata.name = name + self.metadata.labels = labels or {} + self.metadata.ownerReferences = owner_refs or [] + + self.spec = Mock() + self.spec.clusterServiceVersionNames = csv_names or [] + + self.status = Mock() + self.status.phase = phase + + +class MockResourceList: + """Mock Kubernetes resource list""" + + def __init__(self, items): + self.items = items + # Add status attribute to match real ResourceList behavior + self.status = Mock() + + +@pytest.fixture +def mock_dyn_client(): + """Create a mock DynamicClient""" + client = Mock() + return client + + +@pytest.fixture +def mock_env(): + """Create a mock Jinja2 Environment""" + env = Mock() + template = Mock() + template.render.return_value = """ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: test-operator + namespace: test-namespace +spec: + channel: stable + name: test-operator + source: test-catalog + sourceNamespace: openshift-marketplace +""" + env.get_template.return_value = template + return env + + +def create_owner_ref(kind, name): + """Helper to create an owner reference""" + ref = Mock() + ref.kind = kind + ref.name = name + return ref + + +@patch('mas.devops.olm.createNamespace') +@patch('mas.devops.olm.ensureOperatorGroupExists') +@patch('mas.devops.olm.getPackageManifest') +@patch('mas.devops.olm.sleep') +def test_automatic_approval_uses_label_selector_only( + mock_sleep, mock_get_manifest, mock_ensure_og, mock_create_ns, mock_dyn_client, mock_env +): + """ + Test that automatic approval uses only the label selector (standard behavior). + Should NOT query all InstallPlans. + """ + # Setup mocks + mock_get_manifest.return_value = Mock( + status=Mock(defaultChannel="stable", catalogSource="test-catalog") + ) + + # Mock subscription API + sub_api = Mock() + + # Mock subscription resource with proper status + mock_subscription = Mock() + mock_subscription.metadata.name = "test-operator" + mock_subscription.status = Mock() + mock_subscription.status.state = "AtLatestKnown" + mock_subscription.status.installedCSV = "test-operator.v1.0.0" + + # First call returns empty list (no existing subscription), subsequent calls return the subscription + sub_api.get.side_effect = [ + MockResourceList([]), # Initial check for existing subscription + mock_subscription # Subsequent calls when waiting for subscription to complete + ] + sub_api.apply.return_value = Mock() + + # Mock InstallPlan API - label selector returns one InstallPlan + install_plan_api = Mock() + install_plan = MockResource( + name="install-plan-1", + labels={"operators.coreos.com/test-operator.test-namespace": ""}, + csv_names=["test-operator.v1.0.0"], + phase="Complete" + ) + install_plan_api.get.return_value = MockResourceList([install_plan]) + + # Setup resource API + mock_dyn_client.resources.get.side_effect = lambda **kwargs: { + ("operators.coreos.com/v1alpha1", "Subscription"): sub_api, + ("operators.coreos.com/v1alpha1", "InstallPlan"): install_plan_api, + }.get((kwargs.get("api_version"), kwargs.get("kind"))) + + with patch('mas.devops.olm.Environment', return_value=mock_env): + # Call applySubscription with Automatic approval (default) + olm.applySubscription( + mock_dyn_client, + "test-namespace", + "test-operator", + packageChannel="stable", + installPlanApproval="Automatic" + ) + + # Verify InstallPlan API was called with label selector only + install_plan_calls = [c for c in install_plan_api.get.call_args_list] + + # Should only use label selector, never query all InstallPlans + for call_args in install_plan_calls: + args, kwargs = call_args + assert 'label_selector' in kwargs, "Should use label selector" + assert kwargs.get('namespace') == "test-namespace" + + +@patch('mas.devops.olm.createNamespace') +@patch('mas.devops.olm.ensureOperatorGroupExists') +@patch('mas.devops.olm.getPackageManifest') +@patch('mas.devops.olm.sleep') +def test_manual_approval_without_starting_csv_uses_label_selector_only( + mock_sleep, mock_get_manifest, mock_ensure_og, mock_create_ns, mock_dyn_client, mock_env +): + """ + Test that Manual approval with startingCSV uses only label selector when it finds a match. + Should NOT query all InstallPlans. + """ + # Setup mocks + mock_get_manifest.return_value = Mock( + status=Mock(defaultChannel="stable", catalogSource="test-catalog") + ) + + # Mock subscription API + sub_api = Mock() + + # Mock subscription resource with proper status + mock_subscription = Mock() + mock_subscription.metadata.name = "test-operator" + mock_subscription.status = Mock() + mock_subscription.status.state = "UpgradePending" + mock_subscription.status.installedCSV = "test-operator.v1.0.0" + + # First call returns empty list (no existing subscription), subsequent calls return the subscription + sub_api.get.side_effect = [ + MockResourceList([]), # Initial check for existing subscription + mock_subscription # Subsequent calls when waiting for subscription to complete + ] + sub_api.apply.return_value = Mock() + + # Mock InstallPlan API + install_plan_api = Mock() + install_plan_requires_approval = MockResource( + name="install-plan-1", + labels={"operators.coreos.com/test-operator.test-namespace": ""}, + csv_names=["test-operator.v1.0.0"], + phase="RequiresApproval" + ) + install_plan_complete = MockResource( + name="install-plan-1", + labels={"operators.coreos.com/test-operator.test-namespace": ""}, + csv_names=["test-operator.v1.0.0"], + phase="Complete" + ) + + # Simulate phase transition: first returns RequiresApproval, then Complete after patch + def get_install_plan_side_effect(*args, **kwargs): + if 'name' in kwargs: + # After patch is called, return Complete phase + return install_plan_complete + else: + # Initial query with label selector + return MockResourceList([install_plan_requires_approval]) + + install_plan_api.get.side_effect = get_install_plan_side_effect + install_plan_api.patch.return_value = Mock() + + mock_dyn_client.resources.get.side_effect = lambda **kwargs: { + ("operators.coreos.com/v1alpha1", "Subscription"): sub_api, + ("operators.coreos.com/v1alpha1", "InstallPlan"): install_plan_api, + }.get((kwargs.get("api_version"), kwargs.get("kind"))) + + with patch('mas.devops.olm.Environment', return_value=mock_env): + # Call with Manual approval with startingCSV + olm.applySubscription( + mock_dyn_client, + "test-namespace", + "test-operator", + packageChannel="stable", + installPlanApproval="Manual", + startingCSV="test-operator.v1.0.0" + ) + + # Verify only label selector was used + install_plan_calls = [c for c in install_plan_api.get.call_args_list] + for call_args in install_plan_calls: + args, kwargs = call_args + # Should only use label selector or get by name, never query all + assert 'label_selector' in kwargs or 'name' in kwargs + + +@patch('mas.devops.olm.createNamespace') +@patch('mas.devops.olm.ensureOperatorGroupExists') +@patch('mas.devops.olm.getPackageManifest') +@patch('mas.devops.olm.sleep') +def test_manual_approval_with_starting_csv_label_selector_finds_match( + mock_sleep, mock_get_manifest, mock_ensure_og, mock_create_ns, mock_dyn_client, mock_env +): + """ + Test Manual approval with startingCSV when label selector returns the correct InstallPlan. + Should use label selector result, NOT query all InstallPlans. + """ + # Setup mocks + mock_get_manifest.return_value = Mock( + status=Mock(defaultChannel="stable", catalogSource="test-catalog") + ) + + # Mock subscription API + sub_api = Mock() + + # Mock subscription resource with proper status + mock_subscription = Mock() + mock_subscription.metadata.name = "test-operator" + mock_subscription.status = Mock() + mock_subscription.status.state = "UpgradePending" + mock_subscription.status.installedCSV = "test-operator.v1.0.0" + + # First call returns empty list (no existing subscription), subsequent calls return the subscription + sub_api.get.side_effect = [ + MockResourceList([]), # Initial check for existing subscription + mock_subscription # Subsequent calls when waiting for subscription to complete + ] + sub_api.apply.return_value = Mock() + + # Mock InstallPlan API - label selector returns matching InstallPlan + install_plan_api = Mock() + install_plan_requires_approval = MockResource( + name="install-plan-1", + labels={"operators.coreos.com/test-operator.test-namespace": ""}, + csv_names=["test-operator.v1.0.0"], # Matches startingCSV + phase="RequiresApproval" + ) + install_plan_complete = MockResource( + name="install-plan-1", + labels={"operators.coreos.com/test-operator.test-namespace": ""}, + csv_names=["test-operator.v1.0.0"], + phase="Complete" + ) + + # Simulate phase transition + def get_install_plan_side_effect(*args, **kwargs): + if 'name' in kwargs: + return install_plan_complete + else: + return MockResourceList([install_plan_requires_approval]) + + install_plan_api.get.side_effect = get_install_plan_side_effect + install_plan_api.patch.return_value = Mock() + + mock_dyn_client.resources.get.side_effect = lambda **kwargs: { + ("operators.coreos.com/v1alpha1", "Subscription"): sub_api, + ("operators.coreos.com/v1alpha1", "InstallPlan"): install_plan_api, + }.get((kwargs.get("api_version"), kwargs.get("kind"))) + + with patch('mas.devops.olm.Environment', return_value=mock_env): + olm.applySubscription( + mock_dyn_client, + "test-namespace", + "test-operator", + packageChannel="stable", + installPlanApproval="Manual", + startingCSV="test-operator.v1.0.0" + ) + + # Verify we found the InstallPlan via label selector + # Should NOT have queried all InstallPlans (no call without label_selector) + install_plan_calls = [c for c in install_plan_api.get.call_args_list] + + # Check that we never queried without a label_selector or name + for call_args in install_plan_calls: + args, kwargs = call_args + assert 'label_selector' in kwargs or 'name' in kwargs, \ + "Should only use label selector or get by name, not query all" + + +@patch('mas.devops.olm.createNamespace') +@patch('mas.devops.olm.ensureOperatorGroupExists') +@patch('mas.devops.olm.getPackageManifest') +@patch('mas.devops.olm.sleep') +def test_manual_approval_with_starting_csv_fallback_to_ownership_search( + mock_sleep, mock_get_manifest, mock_ensure_og, mock_create_ns, mock_dyn_client, mock_env +): + """ + Test Manual approval with startingCSV when label selector misses the completed InstallPlan. + Should fall back to querying all InstallPlans and filter by subscription ownership. + This is the key scenario the bug fix addresses. + """ + # Setup mocks + mock_get_manifest.return_value = Mock( + status=Mock(defaultChannel="stable", catalogSource="test-catalog") + ) + + # Mock subscription API + sub_api = Mock() + + # Mock subscription resource with proper status + mock_subscription = Mock() + mock_subscription.metadata.name = "test-operator" + mock_subscription.status = Mock() + mock_subscription.status.state = "UpgradePending" + mock_subscription.status.installedCSV = "test-operator.v1.0.0" + + # First call returns empty list (no existing subscription), subsequent calls return the subscription + sub_api.get.side_effect = [ + MockResourceList([]), # Initial check for existing subscription + mock_subscription # Subsequent calls when waiting for subscription to complete + ] + sub_api.apply.return_value = Mock() + + # Mock InstallPlan API + install_plan_api = Mock() + + # Label selector returns only the in-progress InstallPlan (wrong one) + wrong_install_plan = MockResource( + name="install-plan-2", + labels={"operators.coreos.com/test-operator.test-namespace": ""}, + csv_names=["test-operator.v2.0.0"], # Does NOT match startingCSV + phase="Installing" + ) + + # All InstallPlans query returns both (including the completed one) + correct_install_plan_requires_approval = MockResource( + name="install-plan-1", + labels={}, # Label might be removed from completed plan + owner_refs=[create_owner_ref("Subscription", "test-operator")], + csv_names=["test-operator.v1.0.0"], # Matches startingCSV + phase="RequiresApproval" + ) + + correct_install_plan_complete = MockResource( + name="install-plan-1", + labels={}, + owner_refs=[create_owner_ref("Subscription", "test-operator")], + csv_names=["test-operator.v1.0.0"], + phase="Complete" + ) + + # Setup the mock to return different results based on parameters + def get_side_effect(*args, **kwargs): + if 'label_selector' in kwargs: + # Label selector query - returns only wrong InstallPlan + return MockResourceList([wrong_install_plan]) + elif 'name' in kwargs: + # Get by name - return the correct one (Complete after patch) + return correct_install_plan_complete + else: + # Query all InstallPlans - returns both + return MockResourceList([correct_install_plan_requires_approval, wrong_install_plan]) + + install_plan_api.get.side_effect = get_side_effect + install_plan_api.patch.return_value = Mock() + + mock_dyn_client.resources.get.side_effect = lambda **kwargs: { + ("operators.coreos.com/v1alpha1", "Subscription"): sub_api, + ("operators.coreos.com/v1alpha1", "InstallPlan"): install_plan_api, + }.get((kwargs.get("api_version"), kwargs.get("kind"))) + + with patch('mas.devops.olm.Environment', return_value=mock_env): + olm.applySubscription( + mock_dyn_client, + "test-namespace", + "test-operator", + packageChannel="stable", + installPlanApproval="Manual", + startingCSV="test-operator.v1.0.0" + ) + + # Verify the fallback behavior occurred + install_plan_calls = [c for c in install_plan_api.get.call_args_list] + + # Should have: + # 1. Called with label_selector (initial query) + # 2. Called without label_selector (fallback to query all) + has_label_selector_call = any( + 'label_selector' in call_args[1] + for call_args in install_plan_calls + ) + has_all_query_call = any( + 'label_selector' not in call_args[1] and 'name' not in call_args[1] + for call_args in install_plan_calls + ) + + assert has_label_selector_call, "Should have tried label selector first" + assert has_all_query_call, "Should have fallen back to querying all InstallPlans" + + +@patch('mas.devops.olm.createNamespace') +@patch('mas.devops.olm.ensureOperatorGroupExists') +@patch('mas.devops.olm.getPackageManifest') +@patch('mas.devops.olm.sleep') +def test_manual_approval_filters_by_subscription_ownership( + mock_sleep, mock_get_manifest, mock_ensure_og, mock_create_ns, mock_dyn_client, mock_env +): + """ + Test that when querying all InstallPlans, we correctly filter by subscription ownership. + This ensures we don't accidentally use InstallPlans from other subscriptions. + """ + # Setup mocks + mock_get_manifest.return_value = Mock( + status=Mock(defaultChannel="stable", catalogSource="test-catalog") + ) + + # Mock subscription API + sub_api = Mock() + + # Mock subscription resource with proper status + mock_subscription = Mock() + mock_subscription.metadata.name = "test-operator" + mock_subscription.status = Mock() + mock_subscription.status.state = "UpgradePending" + mock_subscription.status.installedCSV = "test-operator.v1.0.0" + + # First call returns empty list (no existing subscription), subsequent calls return the subscription + sub_api.get.side_effect = [ + MockResourceList([]), # Initial check for existing subscription + mock_subscription # Subsequent calls when waiting for subscription to complete + ] + sub_api.apply.return_value = Mock() + + # Mock InstallPlan API + install_plan_api = Mock() + + # Label selector returns wrong InstallPlan + wrong_install_plan = MockResource( + name="install-plan-wrong", + labels={"operators.coreos.com/test-operator.test-namespace": ""}, + csv_names=["test-operator.v2.0.0"], + phase="Installing" + ) + + # All InstallPlans includes: + # 1. Correct one owned by our subscription + correct_install_plan_requires_approval = MockResource( + name="install-plan-correct", + labels={}, + owner_refs=[create_owner_ref("Subscription", "test-operator")], + csv_names=["test-operator.v1.0.0"], + phase="RequiresApproval" + ) + + correct_install_plan_complete = MockResource( + name="install-plan-correct", + labels={}, + owner_refs=[create_owner_ref("Subscription", "test-operator")], + csv_names=["test-operator.v1.0.0"], + phase="Complete" + ) + + # 2. One owned by a different subscription (should be ignored) + other_subscription_plan = MockResource( + name="install-plan-other", + labels={}, + owner_refs=[create_owner_ref("Subscription", "other-operator")], + csv_names=["test-operator.v1.0.0"], # Same CSV but wrong subscription + phase="Complete" + ) + + def get_side_effect(*args, **kwargs): + if 'label_selector' in kwargs: + return MockResourceList([wrong_install_plan]) + elif 'name' in kwargs: + return correct_install_plan_complete + else: + # Return all three InstallPlans + return MockResourceList([correct_install_plan_requires_approval, other_subscription_plan, wrong_install_plan]) + + install_plan_api.get.side_effect = get_side_effect + install_plan_api.patch.return_value = Mock() + + mock_dyn_client.resources.get.side_effect = lambda **kwargs: { + ("operators.coreos.com/v1alpha1", "Subscription"): sub_api, + ("operators.coreos.com/v1alpha1", "InstallPlan"): install_plan_api, + }.get((kwargs.get("api_version"), kwargs.get("kind"))) + + with patch('mas.devops.olm.Environment', return_value=mock_env): + olm.applySubscription( + mock_dyn_client, + "test-namespace", + "test-operator", + packageChannel="stable", + installPlanApproval="Manual", + startingCSV="test-operator.v1.0.0" + ) + + # The test passes if it completes without error + # The code should have found the correct InstallPlan by checking ownership + # and ignored the one from the other subscription + +# Made with Bob diff --git a/test/src/test_restore.py b/test/src/test_restore.py new file mode 100644 index 00000000..3666b6b4 --- /dev/null +++ b/test/src/test_restore.py @@ -0,0 +1,397 @@ +# ***************************************************************************** +# Copyright (c) 2026 IBM Corporation and other Contributors. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Eclipse Public License v1.0 +# which accompanies this distribution, and is available at +# http://www.eclipse.org/legal/epl-v10.html +# +# ***************************************************************************** + +import yaml +from unittest.mock import MagicMock, Mock +from openshift.dynamic.exceptions import NotFoundError + +from mas.devops.restore import loadYamlFile, restoreResource + + +class TestLoadYamlFile: + """Tests for loadYamlFile function""" + + def test_load_valid_yaml_file(self, tmp_path): + """Test loading a valid YAML file""" + yaml_content = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + yaml_file = tmp_path / "test.yaml" + with open(yaml_file, 'w') as f: + yaml.dump(yaml_content, f) + + result = loadYamlFile(str(yaml_file)) + + assert result is not None + assert result['kind'] == 'ConfigMap' + assert result['metadata']['name'] == 'test-config' + + def test_load_empty_yaml_file(self, tmp_path): + """Test loading an empty YAML file""" + yaml_file = tmp_path / "empty.yaml" + yaml_file.write_text("") + + result = loadYamlFile(str(yaml_file)) + + assert result is None + + def test_load_nonexistent_file(self): + """Test loading a non-existent file""" + result = loadYamlFile("/nonexistent/path/file.yaml") + + assert result is None + + def test_load_invalid_yaml_file(self, tmp_path): + """Test loading an invalid YAML file""" + yaml_file = tmp_path / "invalid.yaml" + yaml_file.write_text("invalid: yaml: content: [") + + result = loadYamlFile(str(yaml_file)) + + assert result is None + + def test_load_yaml_with_multiple_documents(self, tmp_path): + """Test loading YAML file with multiple documents returns None (not supported)""" + yaml_file = tmp_path / "multi.yaml" + yaml_file.write_text("---\nkey1: value1\n---\nkey2: value2") + + result = loadYamlFile(str(yaml_file)) + + # yaml.safe_load() doesn't support multiple documents, so it should return None + assert result is None + + +class TestRestoreResource: + """Tests for restoreResource function""" + + def setup_method(self): + """Set up test fixtures""" + self.mock_client = MagicMock() + self.mock_resource_api = MagicMock() + self.mock_client.resources.get.return_value = self.mock_resource_api + + def test_create_new_namespaced_resource(self): + """Test creating a new namespaced resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + }, + 'data': { + 'key': 'value' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-config' + assert status is None + self.mock_resource_api.create.assert_called_once_with( + body=resource_data, + namespace='test-ns' + ) + + def test_create_new_cluster_resource(self): + """Test creating a new cluster-scoped resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'Namespace', + 'metadata': { + 'name': 'test-namespace' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-namespace' + assert status is None + self.mock_resource_api.create.assert_called_once_with( + body=resource_data + ) + + def test_update_existing_resource_with_replace_true(self): + """Test updating an existing resource when replace_resource is True""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + }, + 'data': { + 'key': 'new-value' + } + } + + # Resource exists + existing_resource = { + 'metadata': { + 'name': 'test-config', + 'resourceVersion': '12345' + } + } + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is True + assert name == 'test-config' + assert status == 'updated' + self.mock_resource_api.patch.assert_called_once_with( + body=resource_data, + name='test-config', + namespace='test-ns', + content_type='application/merge-patch+json' + ) + + def test_skip_existing_resource_with_replace_false(self): + """Test skipping an existing resource when replace_resource is False""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-config'}} + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=False) + + assert success is True + assert name == 'test-config' + assert status == 'skipped' + self.mock_resource_api.patch.assert_not_called() + self.mock_resource_api.create.assert_not_called() + + def test_namespace_override(self): + """Test that namespace parameter overrides resource namespace""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'original-ns' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource( + self.mock_client, + resource_data, + namespace='override-ns' + ) + + assert success is True + self.mock_resource_api.create.assert_called_once_with( + body=resource_data, + namespace='override-ns' + ) + + def test_missing_kind_field(self): + """Test handling resource missing kind field""" + resource_data = { + 'apiVersion': 'v1', + 'metadata': { + 'name': 'test-resource' + } + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-resource' + assert 'missing required fields' in status.lower() + + def test_missing_api_version_field(self): + """Test handling resource missing apiVersion field""" + resource_data = { + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-resource' + } + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-resource' + assert 'missing required fields' in status.lower() + + def test_missing_name_field(self): + """Test handling resource missing name field""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': {} + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'unknown' + assert 'missing required fields' in status.lower() + + def test_create_failure(self): + """Test handling create operation failure""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + # Create fails + self.mock_resource_api.create.side_effect = Exception("Create failed") + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-config' + assert 'Failed to create' in status + assert 'Create failed' in status + + def test_patch_failure(self): + """Test handling patch operation failure""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config', + 'namespace': 'test-ns' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-config'}} + self.mock_resource_api.get.return_value = existing_resource + # Patch fails + self.mock_resource_api.patch.side_effect = Exception("Patch failed") + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is False + assert name == 'test-config' + assert 'Failed to update' in status + assert 'Patch failed' in status + + def test_resource_api_get_failure(self): + """Test handling failure to get resource API""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap', + 'metadata': { + 'name': 'test-config' + } + } + + # Getting resource API fails + self.mock_client.resources.get.side_effect = Exception("API not found") + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'test-config' + assert 'Error restoring resource' in status + + def test_update_cluster_scoped_resource(self): + """Test updating a cluster-scoped resource""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'Namespace', + 'metadata': { + 'name': 'test-namespace' + } + } + + # Resource exists + existing_resource = {'metadata': {'name': 'test-namespace'}} + self.mock_resource_api.get.return_value = existing_resource + + success, name, status = restoreResource(self.mock_client, resource_data, replace_resource=True) + + assert success is True + assert name == 'test-namespace' + assert status == 'updated' + self.mock_resource_api.patch.assert_called_once_with( + body=resource_data, + name='test-namespace', + content_type='application/merge-patch+json' + ) + + def test_malformed_resource_data(self): + """Test handling malformed resource data""" + resource_data = { + 'apiVersion': 'v1', + 'kind': 'ConfigMap' + # Missing metadata entirely + } + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is False + assert name == 'unknown' + assert 'missing required fields' in status.lower() + + def test_resource_with_complex_metadata(self): + """Test resource with complex metadata structure""" + resource_data = { + 'apiVersion': 'apps/v1', + 'kind': 'Deployment', + 'metadata': { + 'name': 'test-deployment', + 'namespace': 'test-ns', + 'labels': { + 'app': 'test', + 'version': 'v1' + }, + 'annotations': { + 'description': 'Test deployment' + } + }, + 'spec': { + 'replicas': 3 + } + } + + # Resource doesn't exist + self.mock_resource_api.get.side_effect = NotFoundError(Mock()) + + success, name, status = restoreResource(self.mock_client, resource_data) + + assert success is True + assert name == 'test-deployment' + assert status is None + self.mock_resource_api.create.assert_called_once()