Coverage for ocp_resources/resource.py: 32%

826 statements  

« prev     ^ index     » next       coverage.py v7.10.1, created at 2025-07-29 12:31 +0300

1import base64 

2import contextlib 

3import copy 

4import json 

5import os 

6import re 

7import sys 

8import threading 

9from abc import ABC, abstractmethod 

10from collections.abc import Callable, Generator 

11from io import StringIO 

12from signal import SIGINT, signal 

13from types import TracebackType 

14from typing import Any, Self, Type 

15from urllib.parse import parse_qs, urlencode, urlparse 

16 

17import jsonschema 

18import kubernetes 

19import requests 

20import yaml 

21from benedict import benedict 

22from kubernetes.dynamic import DynamicClient, ResourceInstance 

23from kubernetes.dynamic.exceptions import ( 

24 ConflictError, 

25 ForbiddenError, 

26 MethodNotAllowedError, 

27 NotFoundError, 

28 ResourceNotFoundError, 

29) 

30from kubernetes.dynamic.resource import ResourceField 

31from packaging.version import Version 

32from simple_logger.logger import get_logger, logging 

33from timeout_sampler import ( 

34 TimeoutExpiredError, 

35 TimeoutSampler, 

36 TimeoutWatch, 

37) 

38from urllib3.exceptions import MaxRetryError 

39 

40from fake_kubernetes_client.dynamic_client import FakeDynamicClient 

41from ocp_resources.event import Event 

42from ocp_resources.exceptions import ( 

43 ClientWithBasicAuthError, 

44 MissingRequiredArgumentError, 

45 MissingResourceResError, 

46 ResourceTeardownError, 

47 ValidationError, 

48) 

49from ocp_resources.utils.constants import ( 

50 DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

51 NOT_FOUND_ERROR_EXCEPTION_DICT, 

52 PROTOCOL_ERROR_EXCEPTION_DICT, 

53 TIMEOUT_1MINUTE, 

54 TIMEOUT_1SEC, 

55 TIMEOUT_4MINUTES, 

56 TIMEOUT_5SEC, 

57 TIMEOUT_10SEC, 

58 TIMEOUT_30SEC, 

59) 

60from ocp_resources.utils.resource_constants import ResourceConstants 

61from ocp_resources.utils.schema_validator import SchemaValidator 

62from ocp_resources.utils.utils import skip_existing_resource_creation_teardown 

63 

64LOGGER = get_logger(name=__name__) 

65MAX_SUPPORTED_API_VERSION = "v2" 

66 

67 

68def _find_supported_resource(dyn_client: DynamicClient, api_group: str, kind: str) -> ResourceField | None: 

69 results = dyn_client.resources.search(group=api_group, kind=kind) 

70 sorted_results = sorted(results, key=lambda result: KubeAPIVersion(result.api_version), reverse=True) 

71 for result in sorted_results: 

72 if KubeAPIVersion(result.api_version) <= KubeAPIVersion(MAX_SUPPORTED_API_VERSION): 

73 return result 

74 return None 

75 

76 

77def _get_api_version(dyn_client: DynamicClient, api_group: str, kind: str) -> str: 

78 # Returns api_group/api_version 

79 res = _find_supported_resource(dyn_client=dyn_client, api_group=api_group, kind=kind) 

80 log = f"Couldn't find {kind} in {api_group} api group" 

81 

82 if not res: 

83 LOGGER.warning(log) 

84 raise NotImplementedError(log) 

85 

86 if isinstance(res.group_version, str): 

87 LOGGER.info(f"kind: {kind} api version: {res.group_version}") 

88 return res.group_version 

89 

90 raise NotImplementedError(log) 

91 

92 

93def client_configuration_with_basic_auth( 

94 username: str, 

95 password: str, 

96 host: str, 

97 configuration: kubernetes.client.Configuration, 

98) -> kubernetes.client.ApiClient: 

99 verify_ssl = configuration.verify_ssl 

100 

101 def _fetch_oauth_config(_host: str, _verify_ssl: bool) -> Any: 

102 well_known_url = f"{_host}/.well-known/oauth-authorization-server" 

103 

104 config_response = requests.get(well_known_url, verify=_verify_ssl) 

105 if config_response.status_code != 200: 

106 raise ClientWithBasicAuthError("No well-known file found at endpoint") 

107 

108 return config_response.json() 

109 

110 def _get_authorization_code(_auth_endpoint: str, _username: str, _password: str, _verify_ssl: bool) -> str: 

111 _code = None 

112 auth_params = { 

113 "client_id": "openshift-challenging-client", 

114 "response_type": "code", 

115 "state": "USER", 

116 "code_challenge_method": "S256", 

117 } 

118 

119 auth_url = f"{_auth_endpoint}?{urlencode(auth_params)}" 

120 

121 credentials = f"{_username}:{_password}" 

122 auth_header = base64.b64encode(credentials.encode()).decode() 

123 

124 auth_response = requests.get( 

125 auth_url, 

126 headers={"Authorization": f"Basic {auth_header}", "X-CSRF-Token": "USER", "Accept": "application/json"}, 

127 verify=_verify_ssl, 

128 allow_redirects=False, 

129 ) 

130 

131 if auth_response.status_code == 302: 

132 location = auth_response.headers.get("Location", "") 

133 

134 parsed_url = urlparse(location) 

135 query_params = parse_qs(parsed_url.query) 

136 _code = query_params.get("code", [None])[0] 

137 if _code: 

138 return _code 

139 

140 raise ClientWithBasicAuthError("No authorization code found") 

141 

142 def _exchange_code_for_token( 

143 _token_endpoint: str, _auth_code: str, _verify_ssl: bool 

144 ) -> kubernetes.client.ApiClient: 

145 _client = None 

146 

147 token_data = { 

148 "grant_type": "authorization_code", 

149 "code": _auth_code, 

150 "client_id": "openshift-challenging-client", 

151 } 

152 

153 token_response = requests.post( 

154 _token_endpoint, 

155 data=token_data, 

156 headers={ 

157 "Content-Type": "application/x-www-form-urlencoded", 

158 "Accept": "application/json", 

159 "Authorization": "Basic b3BlbnNoaWZ0LWNoYWxsZW5naW5nLWNsaWVudDo=", # openshift-challenging-client: 

160 }, 

161 verify=_verify_ssl, 

162 ) 

163 

164 if token_response.status_code == 200: 

165 token_json = token_response.json() 

166 access_token = token_json.get("access_token") 

167 

168 configuration.host = host 

169 configuration.api_key = {"authorization": f"Bearer {access_token}"} 

170 _client = kubernetes.client.ApiClient(configuration=configuration) 

171 

172 if _client: 

173 return _client 

174 

175 raise ClientWithBasicAuthError("Failed to authenticate with basic auth") 

176 

177 oauth_config = _fetch_oauth_config(_host=host, _verify_ssl=verify_ssl) 

178 

179 auth_endpoint = oauth_config.get("authorization_endpoint") 

180 if not auth_endpoint: 

181 raise ClientWithBasicAuthError("No authorization_endpoint found in well-known file") 

182 

183 _code = _get_authorization_code( 

184 _auth_endpoint=auth_endpoint, _username=username, _password=password, _verify_ssl=verify_ssl 

185 ) 

186 

187 return _exchange_code_for_token( 

188 _token_endpoint=oauth_config.get("token_endpoint"), _auth_code=_code, _verify_ssl=verify_ssl 

189 ) 

190 

191 

192def get_client( 

193 config_file: str | None = None, 

194 config_dict: dict[str, Any] | None = None, 

195 context: str | None = None, 

196 client_configuration: kubernetes.client.Configuration | None = None, 

197 persist_config: bool = True, 

198 temp_file_path: str | None = None, 

199 try_refresh_token: bool = True, 

200 username: str | None = None, 

201 password: str | None = None, 

202 host: str | None = None, 

203 verify_ssl: bool | None = None, 

204 token: str | None = None, 

205 fake: bool = False, 

206) -> DynamicClient | FakeDynamicClient: 

207 """ 

208 Get a kubernetes client. 

209 

210 

211 This function is a replica of `ocp_utilities.infra.get_client` which cannot be imported as ocp_utilities imports 

212 from ocp_resources. 

213 

214 Pass either config_file or config_dict. 

215 If none of them are passed, client will be created from default OS kubeconfig 

216 (environment variable or .kube folder). 

217 

218 Args: 

219 config_file (str): path to a kubeconfig file. 

220 config_dict (dict): dict with kubeconfig configuration. 

221 context (str): name of the context to use. 

222 persist_config (bool): whether to persist config file. 

223 temp_file_path (str): path to a temporary kubeconfig file. 

224 try_refresh_token (bool): try to refresh token 

225 username (str): username for basic auth 

226 password (str): password for basic auth 

227 host (str): host for the cluster 

228 verify_ssl (bool): whether to verify ssl 

229 token (str): Use token to login 

230 

231 Returns: 

232 DynamicClient: a kubernetes client. 

233 """ 

234 if fake: 

235 return FakeDynamicClient() 

236 

237 proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("HTTP_PROXY") 

238 

239 client_configuration = client_configuration or kubernetes.client.Configuration() 

240 

241 if verify_ssl is not None: 

242 client_configuration.verify_ssl = verify_ssl 

243 

244 if not client_configuration.proxy and proxy: 

245 LOGGER.info(f"Setting proxy from environment variable: {proxy}") 

246 client_configuration.proxy = proxy 

247 

248 if username and password and host: 

249 _client = client_configuration_with_basic_auth( 

250 username=username, password=password, host=host, configuration=client_configuration 

251 ) 

252 

253 elif host and token: 

254 client_configuration.host = host 

255 client_configuration.api_key = {"authorization": f"Bearer {token}"} 

256 _client = kubernetes.client.ApiClient(client_configuration) 

257 

258 # Ref: https://github.com/kubernetes-client/python/blob/v26.1.0/kubernetes/base/config/kube_config.py 

259 elif config_dict: 

260 _client = kubernetes.config.new_client_from_config_dict( 

261 config_dict=config_dict, 

262 context=context, 

263 client_configuration=client_configuration, 

264 persist_config=persist_config, 

265 temp_file_path=temp_file_path, 

266 ) 

267 else: 

268 # Ref: https://github.com/kubernetes-client/python/blob/v26.1.0/kubernetes/base/config/__init__.py 

269 LOGGER.info("Trying to get client via new_client_from_config") 

270 

271 # kubernetes.config.kube_config.load_kube_config sets KUBE_CONFIG_DEFAULT_LOCATION during module import. 

272 # If `KUBECONFIG` environment variable is set via code, the `KUBE_CONFIG_DEFAULT_LOCATION` will be None since 

273 # is populated during import which comes before setting the variable in code. 

274 config_file = config_file or os.environ.get("KUBECONFIG", "~/.kube/config") 

275 

276 _client = kubernetes.config.new_client_from_config( 

277 config_file=config_file, 

278 context=context, 

279 client_configuration=client_configuration, 

280 persist_config=persist_config, 

281 ) 

282 

283 kubernetes.client.Configuration.set_default(default=client_configuration) 

284 

285 try: 

286 return kubernetes.dynamic.DynamicClient(client=_client) 

287 except MaxRetryError: 

288 # Ref: https://github.com/kubernetes-client/python/blob/v26.1.0/kubernetes/base/config/incluster_config.py 

289 LOGGER.info("Trying to get client via incluster_config") 

290 return kubernetes.dynamic.DynamicClient( 

291 client=kubernetes.config.incluster_config.load_incluster_config( 

292 client_configuration=client_configuration, try_refresh_token=try_refresh_token 

293 ), 

294 ) 

295 

296 

297def sub_resource_level(current_class: Any, owner_class: Any, parent_class: Any) -> str | None: 

298 # return the name of the last class in MRO list that is not one of base 

299 # classes; otherwise return None 

300 for class_iterator in reversed([ 

301 class_iterator 

302 for class_iterator in current_class.mro() 

303 if class_iterator not in owner_class.mro() and issubclass(class_iterator, parent_class) 

304 ]): 

305 return class_iterator.__name__ 

306 

307 return None 

308 

309 

310def replace_key_with_hashed_value(resource_dict: dict[Any, Any], key_name: str) -> dict[Any, Any]: 

311 """ 

312 Recursively search a nested dictionary for a given key and changes its value to "******" if found. 

313 

314 The function supports two key formats: 

315 1. Regular dictionary path: 

316 A key to be hashed can be found directly in a dictionary, e.g. "a>b>c", would hash the value associated with 

317 key "c", where dictionary format is: 

318 input = { 

319 "a": { 

320 "b": { 

321 "c": "sensitive data" 

322 } 

323 } 

324 } 

325 output = { 

326 "a": { 

327 "b": { 

328 "c": "*******" 

329 } 

330 } 

331 } 

332 2. list path: 

333 A key to be hashed can be found in a dictionary that is in list somewhere in a dictionary, e.g. "a>b[]>c", 

334 would hash the value associated with key "c", where dictionary format is: 

335 input = { 

336 "a": { 

337 "b": [ 

338 {"d": "not sensitive data"}, 

339 {"c": "sensitive data"} 

340 ] 

341 } 

342 } 

343 output = { 

344 "a": { 

345 "b": [ 

346 {"d": "not sensitive data"}, 

347 {"c": "*******"} 

348 ] 

349 } 

350 } 

351 

352 Args: 

353 resource_dict: The nested dictionary to search. 

354 key_name: The key path to find. 

355 

356 Returns: 

357 dict[Any, Any]: A copy of the input dictionary with the specified key's value replaced with "*******". 

358 

359 """ 

360 result = copy.deepcopy(resource_dict) 

361 

362 benedict_resource_dict = benedict(result, keypath_separator=">") 

363 

364 if "[]" not in key_name: 

365 if benedict_resource_dict.get(key_name): 

366 benedict_resource_dict[key_name] = "*******" 

367 return dict(benedict_resource_dict) 

368 

369 key_prefix, remaining_key = key_name.split("[]>", 1) 

370 if not benedict_resource_dict.get(key_prefix): 

371 return dict(benedict_resource_dict) 

372 

373 resource_data = benedict_resource_dict[key_prefix] 

374 if not isinstance(resource_data, list): 

375 return dict(benedict_resource_dict) 

376 

377 for index, element in enumerate(resource_data): 

378 if isinstance(element, dict): 

379 resource_data[index] = replace_key_with_hashed_value(resource_dict=element, key_name=remaining_key) 

380 

381 return dict(benedict_resource_dict) 

382 

383 

384class KubeAPIVersion(Version): 

385 """ 

386 Implement the Kubernetes API versioning scheme from 

387 https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-versioning 

388 """ 

389 

390 component_re = re.compile(r"(\d+ | [a-z]+)", re.VERBOSE) 

391 

392 def __init__(self, vstring: str): 

393 self.vstring = vstring 

394 self.version: list[str | Any] = [] 

395 super().__init__(version=vstring) 

396 

397 def parse(self, vstring: str) -> None: 

398 components = [comp for comp in self.component_re.split(vstring) if comp] 

399 for idx, obj in enumerate(components): 

400 with contextlib.suppress(ValueError): 

401 components[idx] = int(obj) 

402 

403 errmsg = f"version '{vstring}' does not conform to kubernetes api versioning guidelines" 

404 

405 if len(components) not in (2, 4) or components[0] != "v" or not isinstance(components[1], int): 

406 raise ValueError(errmsg) 

407 

408 if len(components) == 4 and (components[2] not in ("alpha", "beta") or not isinstance(components[3], int)): 

409 raise ValueError(errmsg) 

410 

411 self.version = components 

412 

413 def __str__(self): 

414 return self.vstring 

415 

416 def __repr__(self): 

417 return "KubeAPIVersion ('{0}')".format(str(self)) 

418 

419 def _cmp(self, other): 

420 if isinstance(other, str): 

421 other = KubeAPIVersion(vstring=other) 

422 

423 myver = self.version 

424 otherver = other.version 

425 

426 for ver in myver, otherver: 

427 if len(ver) == 2: 

428 ver.extend(["zeta", 9999]) 

429 

430 if myver == otherver: 

431 return 0 

432 if myver < otherver: 

433 return -1 

434 if myver > otherver: 

435 return 1 

436 

437 

438class ClassProperty: 

439 def __init__(self, func: Callable) -> None: 

440 self.func = func 

441 

442 def __get__(self, obj: Any, owner: Any) -> Any: 

443 return self.func(owner) 

444 

445 

446class Resource(ResourceConstants): 

447 """ 

448 Base class for API resources 

449 

450 Provides common functionality for all Kubernetes/OpenShift resources including 

451 CRUD operations, resource management, and schema validation. 

452 

453 Attributes: 

454 api_group (str): API group for the resource (e.g., "apps", "batch") 

455 api_version (str): API version (e.g., "v1", "v1beta1") 

456 singular_name (str): Singular resource name for API calls 

457 timeout_seconds (int): Default timeout for API operations 

458 schema_validation_enabled (bool): Enable automatic validation on create/update 

459 """ 

460 

461 api_group: str = "" 

462 api_version: str = "" 

463 singular_name: str = "" 

464 timeout_seconds: int = TIMEOUT_1MINUTE 

465 

466 class ApiGroup: 

467 AAQ_KUBEVIRT_IO: str = "aaq.kubevirt.io" 

468 ADMISSIONREGISTRATION_K8S_IO: str = "admissionregistration.k8s.io" 

469 APIEXTENSIONS_K8S_IO: str = "apiextensions.k8s.io" 

470 APIREGISTRATION_K8S_IO: str = "apiregistration.k8s.io" 

471 APP_KUBERNETES_IO: str = "app.kubernetes.io" 

472 APPS: str = "apps" 

473 APPSTUDIO_REDHAT_COM: str = "appstudio.redhat.com" 

474 AUTHENTICATION_K8S_IO: str = "authentication.k8s.io" 

475 BATCH: str = "batch" 

476 BITNAMI_COM: str = "bitnami.com" 

477 CACHING_INTERNAL_KNATIVE_DEV: str = "caching.internal.knative.dev" 

478 CDI_KUBEVIRT_IO: str = "cdi.kubevirt.io" 

479 CLONE_KUBEVIRT_IO: str = "clone.kubevirt.io" 

480 CLUSTER_OPEN_CLUSTER_MANAGEMENT_IO: str = "cluster.open-cluster-management.io" 

481 COMPONENTS_PLATFORM_OPENDATAHUB_IO = "components.platform.opendatahub.io" 

482 CONFIG_OPENSHIFT_IO: str = "config.openshift.io" 

483 CONSOLE_OPENSHIFT_IO: str = "console.openshift.io" 

484 COORDINATION_K8S_IO: str = "coordination.k8s.io" 

485 CSIADDONS_OPENSHIFT_IO: str = "csiaddons.openshift.io" 

486 DATA_IMPORT_CRON_TEMPLATE_KUBEVIRT_IO: str = "dataimportcrontemplate.kubevirt.io" 

487 DATASCIENCECLUSTER_OPENDATAHUB_IO: str = "datasciencecluster.opendatahub.io" 

488 DISCOVERY_K8S_IO: str = "discovery.k8s.io" 

489 DSCINITIALIZATION_OPENDATAHUB_IO: str = "dscinitialization.opendatahub.io" 

490 EVENTS_K8S_IO: str = "events.k8s.io" 

491 EXPORT_KUBEVIRT_IO: str = "export.kubevirt.io" 

492 FENCE_AGENTS_REMEDIATION_MEDIK8S_IO: str = "fence-agents-remediation.medik8s.io" 

493 FORKLIFT_KONVEYOR_IO: str = "forklift.konveyor.io" 

494 HCO_KUBEVIRT_IO: str = "hco.kubevirt.io" 

495 HELM_MARIADB_MMONTES_IO: str = "helm.mariadb.mmontes.io" 

496 HIVE_OPENSHIFT_IO: str = "hive.openshift.io" 

497 HOSTPATHPROVISIONER_KUBEVIRT_IO: str = "hostpathprovisioner.kubevirt.io" 

498 IMAGE_OPENSHIFT_IO: str = "image.openshift.io" 

499 IMAGE_REGISTRY: str = "registry.redhat.io" 

500 INSTANCETYPE_KUBEVIRT_IO: str = "instancetype.kubevirt.io" 

501 INTEGREATLY_ORG: str = "integreatly.org" 

502 K8S_CNI_CNCF_IO: str = "k8s.cni.cncf.io" 

503 K8S_MARIADB_COM: str = "k8s.mariadb.com" 

504 K8S_OVN_ORG: str = "k8s.ovn.org" 

505 K8S_V1_CNI_CNCF_IO: str = "k8s.v1.cni.cncf.io" 

506 KUBEFLOW_ORG: str = "kubeflow.org" 

507 KUBERNETES_IO: str = "kubernetes.io" 

508 KUBEVIRT_IO: str = "kubevirt.io" 

509 KUBEVIRT_KUBEVIRT_IO: str = "kubevirt.kubevirt.io" 

510 LITMUS_IO: str = "litmuschaos.io" 

511 LLAMASTACK_IO: str = "llamastack.io" 

512 MACHINE_OPENSHIFT_IO: str = "machine.openshift.io" 

513 MACHINECONFIGURATION_OPENSHIFT_IO: str = "machineconfiguration.openshift.io" 

514 MAISTRA_IO: str = "maistra.io" 

515 METALLB_IO: str = "metallb.io" 

516 METRICS_K8S_IO: str = "metrics.k8s.io" 

517 MIGRATION_OPENSHIFT_IO: str = "migration.openshift.io" 

518 MIGRATIONS_KUBEVIRT_IO: str = "migrations.kubevirt.io" 

519 MODELREGISTRY_OPENDATAHUB_IO: str = "modelregistry.opendatahub.io" 

520 MONITORING_COREOS_COM: str = "monitoring.coreos.com" 

521 MTQ_KUBEVIRT_IO: str = "mtq.kubevirt.io" 

522 NETWORKADDONSOPERATOR_NETWORK_KUBEVIRT_IO: str = "networkaddonsoperator.network.kubevirt.io" 

523 NETWORKING_ISTIO_IO: str = "networking.istio.io" 

524 NETWORKING_K8S_IO: str = "networking.k8s.io" 

525 NMSTATE_IO: str = "nmstate.io" 

526 NODE_LABELLER_KUBEVIRT_IO: str = "node-labeller.kubevirt.io" 

527 NODEMAINTENANCE_KUBEVIRT_IO: str = "nodemaintenance.kubevirt.io" 

528 OBSERVABILITY_OPEN_CLUSTER_MANAGEMENT_IO: str = "observability.open-cluster-management.io" 

529 OCS_OPENSHIFT_IO: str = "ocs.openshift.io" 

530 OPERATOR_AUTHORINO_KUADRANT_IO: str = "operator.authorino.kuadrant.io" 

531 OPERATOR_OPEN_CLUSTER_MANAGEMENT_IO: str = "operator.open-cluster-management.io" 

532 OPERATOR_OPENSHIFT_IO: str = "operator.openshift.io" 

533 OPERATORS_COREOS_COM: str = "operators.coreos.com" 

534 OPERATORS_OPENSHIFT_IO: str = "operators.openshift.io" 

535 OS_TEMPLATE_KUBEVIRT_IO: str = "os.template.kubevirt.io" 

536 PACKAGES_OPERATORS_COREOS_COM: str = "packages.operators.coreos.com" 

537 PERFORMANCE_OPENSHIFT_IO: str = "performance.openshift.io" 

538 POLICY: str = "policy" 

539 POOL_KUBEVIRT_IO: str = "pool.kubevirt.io" 

540 PROJECT_OPENSHIFT_IO: str = "project.openshift.io" 

541 QUOTA_OPENSHIFT_IO: str = "quota.openshift.io" 

542 RBAC_AUTHORIZATION_K8S_IO: str = "rbac.authorization.k8s.io" 

543 REMEDIATION_MEDIK8S_IO: str = "remediation.medik8s.io" 

544 RIPSAW_CLOUDBULLDOZER_IO: str = "ripsaw.cloudbulldozer.io" 

545 ROUTE_OPENSHIFT_IO: str = "route.openshift.io" 

546 SCHEDULING_K8S_IO: str = "scheduling.k8s.io" 

547 SECURITY_ISTIO_IO: str = "security.istio.io" 

548 SECURITY_OPENSHIFT_IO: str = "security.openshift.io" 

549 SELF_NODE_REMEDIATION_MEDIK8S_IO: str = "self-node-remediation.medik8s.io" 

550 SERVING_KNATIVE_DEV: str = "serving.knative.dev" 

551 SERVING_KSERVE_IO: str = "serving.kserve.io" 

552 SNAPSHOT_KUBEVIRT_IO: str = "snapshot.kubevirt.io" 

553 SNAPSHOT_STORAGE_K8S_IO: str = "snapshot.storage.k8s.io" 

554 SRIOVNETWORK_OPENSHIFT_IO: str = "sriovnetwork.openshift.io" 

555 SSP_KUBEVIRT_IO: str = "ssp.kubevirt.io" 

556 STORAGE_K8S_IO: str = "storage.k8s.io" 

557 STORAGECLASS_KUBERNETES_IO: str = "storageclass.kubernetes.io" 

558 STORAGECLASS_KUBEVIRT_IO: str = "storageclass.kubevirt.io" 

559 SUBRESOURCES_KUBEVIRT_IO: str = "subresources.kubevirt.io" 

560 TEKTON_DEV: str = "tekton.dev" 

561 TEKTONTASKS_KUBEVIRT_IO: str = "tektontasks.kubevirt.io" 

562 TEMPLATE_KUBEVIRT_IO: str = "template.kubevirt.io" 

563 TEMPLATE_OPENSHIFT_IO: str = "template.openshift.io" 

564 TRUSTYAI_OPENDATAHUB_IO: str = "trustyai.opendatahub.io" 

565 UPLOAD_CDI_KUBEVIRT_IO: str = "upload.cdi.kubevirt.io" 

566 USER_OPENSHIFT_IO: str = "user.openshift.io" 

567 V2V_KUBEVIRT_IO: str = "v2v.kubevirt.io" 

568 VELERO_IO: str = "velero.io" 

569 VM_KUBEVIRT_IO: str = "vm.kubevirt.io" 

570 

571 class ApiVersion: 

572 V1: str = "v1" 

573 V1BETA1: str = "v1beta1" 

574 V1ALPHA1: str = "v1alpha1" 

575 V1ALPHA3: str = "v1alpha3" 

576 

577 def __init__( 

578 self, 

579 name: str | None = None, 

580 client: DynamicClient | None = None, 

581 teardown: bool = True, 

582 yaml_file: str | None = None, 

583 delete_timeout: int = TIMEOUT_4MINUTES, 

584 dry_run: bool = False, 

585 node_selector: dict[str, Any] | None = None, 

586 node_selector_labels: dict[str, str] | None = None, 

587 config_file: str | None = None, 

588 config_dict: dict[str, Any] | None = None, 

589 context: str | None = None, 

590 label: dict[str, str] | None = None, 

591 annotations: dict[str, str] | None = None, 

592 api_group: str = "", 

593 hash_log_data: bool = True, 

594 ensure_exists: bool = False, 

595 kind_dict: dict[Any, Any] | None = None, 

596 wait_for_resource: bool = False, 

597 schema_validation_enabled: bool = False, 

598 ): 

599 """ 

600 Create an API resource 

601 

602 If `yaml_file` or `kind_dict` are passed, logic in `to_dict` is bypassed. 

603 

604 Args: 

605 name (str): Resource name 

606 client (DynamicClient): Dynamic client for connecting to a remote cluster 

607 teardown (bool): Indicates if this resource would need to be deleted 

608 yaml_file (str): yaml file for the resource 

609 delete_timeout (int): timeout associated with delete action 

610 dry_run (bool): dry run 

611 node_selector (dict): node selector 

612 node_selector_labels (str): node selector labels 

613 config_file (str): Path to config file for connecting to remote cluster. 

614 context (str): Context name for connecting to remote cluster. 

615 label (dict): Resource labels 

616 annotations (dict[str, str] | None): Resource annotations 

617 api_group (str): Resource API group; will overwrite API group definition in resource class 

618 hash_log_data (bool): Hash resource content based on resource keys_to_hash property 

619 (example: Secret resource) 

620 ensure_exists (bool): Whether to check if the resource exists before when initializing the resource, raise if not. 

621 kind_dict (dict): dict which represents the resource object 

622 wait_for_resource (bool): Waits for the resource to be created 

623 schema_validation_enabled (bool): Enable automatic schema validation for this instance. 

624 Defaults to False. Set to True to validate on create/update operations. 

625 """ 

626 if yaml_file and kind_dict: 

627 raise ValueError("yaml_file and resource_dict are mutually exclusive") 

628 

629 self.name = name 

630 self.teardown = teardown 

631 self.yaml_file = yaml_file 

632 self.kind_dict = kind_dict 

633 self.delete_timeout = delete_timeout 

634 self.dry_run = dry_run 

635 self.node_selector = node_selector 

636 self.node_selector_labels = node_selector_labels 

637 self.config_file = config_file 

638 self.config_dict = config_dict or {} 

639 self.context = context 

640 self.label = label 

641 self.annotations = annotations 

642 self.client: DynamicClient = client or get_client(config_file=self.config_file, context=self.context) 

643 self.api_group: str = api_group or self.api_group 

644 self.hash_log_data = hash_log_data 

645 

646 if not self.api_group and not self.api_version: 

647 raise NotImplementedError("Subclasses of Resource require self.api_group or self.api_version to be defined") 

648 

649 if not (self.name or self.yaml_file or self.kind_dict): 

650 raise MissingRequiredArgumentError(argument="name") 

651 

652 self.namespace: str | None = None 

653 self.node_selector_spec = self._prepare_node_selector_spec() 

654 self.res: dict[Any, Any] = self.kind_dict or {} 

655 self.yaml_file_contents: str = "" 

656 self.initial_resource_version: str = "" 

657 self.logger = self._set_logger() 

658 self.wait_for_resource = wait_for_resource 

659 

660 if ensure_exists: 

661 self._ensure_exists() 

662 

663 # Set instance-level validation flag 

664 self.schema_validation_enabled = schema_validation_enabled 

665 

666 # self._set_client_and_api_version() must be last init line 

667 self._set_client_and_api_version() 

668 

669 def _ensure_exists(self) -> None: 

670 if not self.exists: 

671 _name_for_raise = self.name if not self.namespace else f"{self.namespace}/{self.name}" 

672 raise ResourceNotFoundError(f"Resource `{self.kind}` `{_name_for_raise}` does not exist") 

673 

674 def _set_logger(self) -> logging.Logger: 

675 log_level = os.environ.get("OPENSHIFT_PYTHON_WRAPPER_LOG_LEVEL", "INFO") 

676 log_file = os.environ.get("OPENSHIFT_PYTHON_WRAPPER_LOG_FILE", "") 

677 return get_logger( 

678 name=f"{__name__.rsplit('.')[0]} {self.kind}", 

679 level=log_level, 

680 filename=log_file, 

681 ) 

682 

683 def _prepare_node_selector_spec(self) -> dict[str, str]: 

684 return self.node_selector or self.node_selector_labels or {} 

685 

686 @ClassProperty 

687 def kind(cls) -> str | None: 

688 return sub_resource_level(cls, NamespacedResource, Resource) 

689 

690 def _base_body(self) -> None: 

691 """ 

692 Generate resource dict from yaml if self.yaml_file else return base resource dict. 

693 

694 Returns: 

695 dict: Resource dict. 

696 """ 

697 if self.kind_dict: 

698 # If `kind_dict` is provided, no additional logic should be applied 

699 self.name = self.kind_dict["metadata"]["name"] 

700 

701 elif self.yaml_file: 

702 if not self.yaml_file_contents: 

703 if isinstance(self.yaml_file, StringIO): 

704 self.yaml_file_contents = self.yaml_file.read() 

705 

706 else: 

707 with open(self.yaml_file) as stream: 

708 self.yaml_file_contents = stream.read() 

709 

710 self.res = yaml.safe_load(stream=self.yaml_file_contents) 

711 self.res.get("metadata", {}).pop("resourceVersion", None) 

712 self.name = self.res["metadata"]["name"] 

713 

714 else: 

715 self.res = { 

716 "apiVersion": self.api_version, 

717 "kind": self.kind, 

718 "metadata": {"name": self.name}, 

719 } 

720 

721 if self.label: 

722 self.res.setdefault("metadata", {}).setdefault("labels", {}).update(self.label) 

723 

724 if self.annotations: 

725 self.res.setdefault("metadata", {}).setdefault("annotations", {}).update(self.annotations) 

726 

727 if not self.res: 

728 raise MissingResourceResError(name=self.name or "") 

729 

730 def to_dict(self) -> None: 

731 """ 

732 Generate intended dict representation of the resource. 

733 """ 

734 self._base_body() 

735 

736 def __enter__(self) -> Any: 

737 if threading.current_thread().native_id == threading.main_thread().native_id: 

738 signal(SIGINT, self._sigint_handler) 

739 return self.deploy(wait=self.wait_for_resource) 

740 

741 def __exit__( 

742 self, 

743 exc_type: type[BaseException] | None = None, 

744 exc_val: BaseException | None = None, 

745 exc_tb: TracebackType | None = None, 

746 ) -> None: 

747 if self.teardown: 

748 if not self.clean_up(): 

749 raise ResourceTeardownError(resource=self) 

750 

751 def _sigint_handler(self, signal_received: int, frame: Any) -> None: 

752 self.__exit__() 

753 sys.exit(signal_received) 

754 

755 def deploy(self, wait: bool = False) -> Self: 

756 """ 

757 For debug, export REUSE_IF_RESOURCE_EXISTS to skip resource create. 

758 Spaces are important in the export dict 

759 

760 Examples: 

761 To skip creation of all resources by kind: 

762 export REUSE_IF_RESOURCE_EXISTS="{Pod: {}}" 

763 

764 To skip creation of resource by name (on all namespaces or non-namespaced resources): 

765 export REUSE_IF_RESOURCE_EXISTS="{Pod: {<pod-name>:}}" 

766 

767 To skip creation of resource by name and namespace: 

768 export REUSE_IF_RESOURCE_EXISTS="{Pod: {<pod-name>: <pod-namespace>}}" 

769 

770 To skip creation of multiple resources: 

771 export REUSE_IF_RESOURCE_EXISTS="{Namespace: {<namespace-name>:}, Pod: {<pod-name>: <pod-namespace>}}" 

772 """ 

773 _resource = None 

774 _export_str = "REUSE_IF_RESOURCE_EXISTS" 

775 skip_resource_kind_create_if_exists = os.environ.get(_export_str) 

776 if skip_resource_kind_create_if_exists: 

777 _resource = skip_existing_resource_creation_teardown( 

778 resource=self, 

779 export_str=_export_str, 

780 user_exported_args=skip_resource_kind_create_if_exists, 

781 ) 

782 

783 if _resource: 

784 return _resource 

785 

786 self.create(wait=wait) 

787 return self 

788 

789 def clean_up(self, wait: bool = True, timeout: int | None = None) -> bool: 

790 """ 

791 For debug, export SKIP_RESOURCE_TEARDOWN to skip resource teardown. 

792 Spaces are important in the export dict 

793 

794 Args: 

795 wait (bool, optional): Wait for resource deletion. Defaults to True. 

796 timeout (int, optional): Timeout in seconds to wait for resource to be deleted. Defaults to 240. 

797 

798 Returns: 

799 bool: True if resource was deleted else False. 

800 

801 Examples: 

802 To skip teardown of all resources by kind: 

803 export SKIP_RESOURCE_TEARDOWN="{Pod: {}}" 

804 

805 To skip teardown of resource by name (on all namespaces): 

806 export SKIP_RESOURCE_TEARDOWN="{Pod: {<pod-name>:}}" 

807 

808 To skip teardown of resource by name and namespace: 

809 export SKIP_RESOURCE_TEARDOWN="{Pod: {<pod-name>: <pod-namespace>}}" 

810 

811 To skip teardown of multiple resources: 

812 export SKIP_RESOURCE_TEARDOWN="{Namespace: {<namespace-name>:}, Pod: {<pod-name>: <pod-namespace>}}" 

813 """ 

814 _export_str = "SKIP_RESOURCE_TEARDOWN" 

815 skip_resource_teardown = os.environ.get(_export_str) 

816 if skip_resource_teardown and skip_existing_resource_creation_teardown( 

817 resource=self, 

818 export_str=_export_str, 

819 user_exported_args=skip_resource_teardown, 

820 check_exists=False, 

821 ): 

822 self.logger.warning( 

823 f"Skip resource {self.kind} {self.name} teardown. Got {_export_str}={skip_resource_teardown}" 

824 ) 

825 return True 

826 

827 return self.delete(wait=wait, timeout=timeout or self.delete_timeout) 

828 

829 @classmethod 

830 def _prepare_resources( 

831 cls, dyn_client: DynamicClient, singular_name: str, *args: Any, **kwargs: Any 

832 ) -> ResourceInstance: 

833 if not cls.api_version: 

834 cls.api_version = _get_api_version(dyn_client=dyn_client, api_group=cls.api_group, kind=cls.kind) 

835 

836 get_kwargs = {"singular_name": singular_name} if singular_name else {} 

837 return dyn_client.resources.get( 

838 kind=cls.kind, 

839 api_version=cls.api_version, 

840 **get_kwargs, 

841 ).get(*args, **kwargs, timeout_seconds=cls.timeout_seconds) 

842 

843 def _prepare_singular_name_kwargs(self, **kwargs: Any) -> dict[str, Any]: 

844 kwargs = kwargs if kwargs else {} 

845 if self.singular_name: 

846 kwargs["singular_name"] = self.singular_name 

847 

848 return kwargs 

849 

850 def _set_client_and_api_version(self) -> None: 

851 if not self.client: 

852 self.client = get_client(config_file=self.config_file, context=self.context) 

853 

854 if not self.api_version: 

855 self.api_version = _get_api_version(dyn_client=self.client, api_group=self.api_group, kind=self.kind) 

856 

857 def full_api(self, **kwargs: Any) -> ResourceInstance: 

858 """ 

859 Get resource API 

860 

861 Keyword Args: 

862 pretty 

863 _continue 

864 include_uninitialized 

865 field_selector 

866 label_selector 

867 limit 

868 resource_version 

869 timeout_seconds 

870 watch 

871 async_req 

872 

873 Returns: 

874 Resource: Resource object. 

875 """ 

876 self._set_client_and_api_version() 

877 

878 kwargs = self._prepare_singular_name_kwargs(**kwargs) 

879 

880 return self.client.resources.get(api_version=self.api_version, kind=self.kind, **kwargs) 

881 

882 @property 

883 def api(self) -> ResourceInstance: 

884 return self.full_api() 

885 

886 def wait(self, timeout: int = TIMEOUT_4MINUTES, sleep: int = 1) -> None: 

887 """ 

888 Wait for resource 

889 

890 Args: 

891 timeout (int): Time to wait for the resource. 

892 sleep (int): Time to wait between retries 

893 

894 Raises: 

895 TimeoutExpiredError: If resource not exists. 

896 """ 

897 self.logger.info(f"Wait until {self.kind} {self.name} is created") 

898 samples = TimeoutSampler( 

899 wait_timeout=timeout, 

900 sleep=sleep, 

901 exceptions_dict={ 

902 **PROTOCOL_ERROR_EXCEPTION_DICT, 

903 **NOT_FOUND_ERROR_EXCEPTION_DICT, 

904 **DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

905 }, 

906 func=lambda: self.exists, 

907 ) 

908 for sample in samples: 

909 if sample: 

910 return 

911 

912 def wait_deleted(self, timeout: int = TIMEOUT_4MINUTES) -> bool: 

913 """ 

914 Wait until resource is deleted 

915 

916 Args: 

917 timeout (int): Time to wait for the resource. 

918 

919 Raises: 

920 TimeoutExpiredError: If resource still exists. 

921 """ 

922 self.logger.info(f"Wait until {self.kind} {self.name} is deleted") 

923 try: 

924 for sample in TimeoutSampler(wait_timeout=timeout, sleep=1, func=lambda: self.exists): 

925 if not sample: 

926 return True 

927 except TimeoutExpiredError: 

928 self.logger.warning(f"Timeout expired while waiting for {self.kind} {self.name} to be deleted") 

929 return False 

930 

931 return False 

932 

933 @property 

934 def exists(self) -> ResourceInstance | None: 

935 """ 

936 Whether self exists on the server 

937 """ 

938 try: 

939 return self.instance 

940 except NotFoundError: 

941 return None 

942 

943 @property 

944 def _kube_v1_api(self) -> kubernetes.client.CoreV1Api: 

945 return kubernetes.client.CoreV1Api(api_client=self.client.client) 

946 

947 def wait_for_status( 

948 self, status: str, timeout: int = TIMEOUT_4MINUTES, stop_status: str | None = None, sleep: int = 1 

949 ) -> None: 

950 """ 

951 Wait for resource to be in status 

952 

953 Args: 

954 status (str): Expected status. 

955 timeout (int): Time to wait for the resource. 

956 stop_status (str): Status which should stop the wait and failed. 

957 

958 Raises: 

959 TimeoutExpiredError: If resource in not in desire status. 

960 """ 

961 stop_status = stop_status if stop_status else self.Status.FAILED 

962 self.logger.info(f"Wait for {self.kind} {self.name} status to be {status}") 

963 samples = TimeoutSampler( 

964 wait_timeout=timeout, 

965 sleep=sleep, 

966 exceptions_dict={ 

967 **PROTOCOL_ERROR_EXCEPTION_DICT, 

968 **DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

969 }, 

970 func=self.api.get, 

971 field_selector=f"metadata.name=={self.name}", 

972 namespace=self.namespace, 

973 ) 

974 current_status = None 

975 last_logged_status = None 

976 try: 

977 for sample in samples: 

978 if sample.items: 

979 sample_status = sample.items[0].status 

980 if sample_status: 

981 current_status = sample_status.phase 

982 if current_status != last_logged_status: 

983 last_logged_status = current_status 

984 self.logger.info(f"Status of {self.kind} {self.name} is {current_status}") 

985 

986 if current_status == status: 

987 return 

988 

989 if current_status == stop_status: 

990 raise TimeoutExpiredError(f"Status of {self.kind} {self.name} is {current_status}") 

991 

992 except TimeoutExpiredError: 

993 if current_status: 

994 self.logger.error(f"Status of {self.kind} {self.name} is {current_status}") 

995 raise 

996 

997 def create(self, wait: bool = False) -> ResourceInstance | None: 

998 """ 

999 Create resource. 

1000 

1001 Args: 

1002 wait (bool) : True to wait for resource status. 

1003 

1004 Returns: 

1005 bool: True if create succeeded, False otherwise. 

1006 """ 

1007 self.to_dict() 

1008 

1009 # Validate the resource if auto-validation is enabled 

1010 if self.schema_validation_enabled: 

1011 self.validate() 

1012 

1013 hashed_res = self.hash_resource_dict(resource_dict=self.res) 

1014 self.logger.info(f"Create {self.kind} {self.name}") 

1015 self.logger.info(f"Posting {hashed_res}") 

1016 self.logger.debug(f"\n{yaml.dump(hashed_res)}") 

1017 resource_kwargs = {"body": self.res, "namespace": self.namespace} 

1018 if self.dry_run: 

1019 resource_kwargs["dry_run"] = "All" 

1020 resource_ = self.api.create(**resource_kwargs) 

1021 with contextlib.suppress(ForbiddenError, AttributeError, NotFoundError): 

1022 # some resources do not support get() (no instance) or the client do not have permissions 

1023 self.initial_resource_version = self.instance.metadata.resourceVersion 

1024 

1025 if wait and resource_: 

1026 self.wait() 

1027 return resource_ 

1028 

1029 def delete(self, wait: bool = False, timeout: int = TIMEOUT_4MINUTES, body: dict[str, Any] | None = None) -> bool: 

1030 self.logger.info(f"Delete {self.kind} {self.name}") 

1031 

1032 if self.exists: 

1033 _instance_dict = self.instance.to_dict() 

1034 if isinstance(_instance_dict, dict): 

1035 hashed_data = self.hash_resource_dict(resource_dict=_instance_dict) 

1036 self.logger.info(f"Deleting {hashed_data}") 

1037 self.logger.debug(f"\n{yaml.dump(hashed_data)}") 

1038 

1039 else: 

1040 self.logger.warning(f"{self.kind}: {self.name} instance.to_dict() return was not a dict") 

1041 

1042 self.api.delete(name=self.name, namespace=self.namespace, body=body) 

1043 

1044 if wait: 

1045 return self.wait_deleted(timeout=timeout) 

1046 

1047 return True 

1048 

1049 self.logger.warning(f"Resource {self.kind} {self.name} was not found, and wasn't deleted") 

1050 return True 

1051 

1052 @property 

1053 def status(self) -> str: 

1054 """ 

1055 Get resource status 

1056 

1057 Status: Running, Scheduling, Pending, Unknown, CrashLoopBackOff 

1058 

1059 Returns: 

1060 str: Status 

1061 """ 

1062 self.logger.info(f"Get {self.kind} {self.name} status") 

1063 return self.instance.status.phase 

1064 

1065 def update(self, resource_dict: dict[str, Any]) -> None: 

1066 """ 

1067 Update resource with resource dict 

1068 

1069 Args: 

1070 resource_dict: Resource dictionary 

1071 """ 

1072 # Note: We don't validate on update() because this method sends a patch, 

1073 # not a complete resource. Patches are partial updates that would fail 

1074 # full schema validation. 

1075 

1076 hashed_resource_dict = self.hash_resource_dict(resource_dict=resource_dict) 

1077 self.logger.info(f"Update {self.kind} {self.name}:\n{hashed_resource_dict}") 

1078 self.logger.debug(f"\n{yaml.dump(hashed_resource_dict)}") 

1079 self.api.patch( 

1080 body=resource_dict, 

1081 namespace=self.namespace, 

1082 content_type="application/merge-patch+json", 

1083 ) 

1084 

1085 def update_replace(self, resource_dict: dict[str, Any]) -> None: 

1086 """ 

1087 Replace resource metadata. 

1088 Use this to remove existing field. (update() will only update existing fields) 

1089 """ 

1090 # Validate the resource if auto-validation is enabled 

1091 # For replace operations, we validate the full resource_dict 

1092 if self.schema_validation_enabled: 

1093 # Use validate_dict to validate the replacement resource 

1094 self.__class__.validate_dict(resource_dict) 

1095 

1096 hashed_resource_dict = self.hash_resource_dict(resource_dict=resource_dict) 

1097 self.logger.info(f"Replace {self.kind} {self.name}: \n{hashed_resource_dict}") 

1098 self.logger.debug(f"\n{yaml.dump(hashed_resource_dict)}") 

1099 self.api.replace(body=resource_dict, name=self.name, namespace=self.namespace) 

1100 

1101 @staticmethod 

1102 def retry_cluster_exceptions( 

1103 func: Callable, 

1104 exceptions_dict: dict[type[Exception], list[str]] = DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

1105 timeout: int = TIMEOUT_10SEC, 

1106 sleep_time: int = 1, 

1107 **kwargs: Any, 

1108 ) -> Any: 

1109 try: 

1110 sampler = TimeoutSampler( 

1111 wait_timeout=timeout, 

1112 sleep=sleep_time, 

1113 func=func, 

1114 print_log=False, 

1115 exceptions_dict=exceptions_dict, 

1116 **kwargs, 

1117 ) 

1118 for sample in sampler: 

1119 return sample 

1120 

1121 except TimeoutExpiredError as exp: 

1122 if exp.last_exp: 

1123 raise exp.last_exp 

1124 

1125 raise 

1126 

1127 @classmethod 

1128 def get( 

1129 cls, 

1130 config_file: str = "", 

1131 singular_name: str = "", 

1132 exceptions_dict: dict[type[Exception], list[str]] = DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

1133 raw: bool = False, 

1134 context: str | None = None, 

1135 dyn_client: DynamicClient | None = None, 

1136 *args: Any, 

1137 **kwargs: Any, 

1138 ) -> Generator[Any, None, None]: 

1139 """ 

1140 Get resources 

1141 

1142 Args: 

1143 dyn_client (DynamicClient): Open connection to remote cluster. 

1144 config_file (str): Path to config file for connecting to remote cluster. 

1145 context (str): Context name for connecting to remote cluster. 

1146 singular_name (str): Resource kind (in lowercase), in use where we have multiple matches for resource. 

1147 raw (bool): If True return raw object. 

1148 exceptions_dict (dict): Exceptions dict for TimeoutSampler 

1149 

1150 Returns: 

1151 generator: Generator of Resources of cls.kind. 

1152 """ 

1153 if not dyn_client: 

1154 dyn_client = get_client(config_file=config_file, context=context) 

1155 

1156 def _get() -> Generator["Resource|ResourceInstance", None, None]: 

1157 _resources = cls._prepare_resources(dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs) # type: ignore[misc] 

1158 try: 

1159 for resource_field in _resources.items: 

1160 if raw: 

1161 yield _resources 

1162 else: 

1163 yield cls(client=dyn_client, name=resource_field.metadata.name) 

1164 

1165 except TypeError: 

1166 if raw: 

1167 yield _resources 

1168 else: 

1169 yield cls(client=dyn_client, name=_resources.metadata.name) 

1170 

1171 return Resource.retry_cluster_exceptions(func=_get, exceptions_dict=exceptions_dict) 

1172 

1173 @property 

1174 def instance(self) -> ResourceInstance: 

1175 """ 

1176 Get resource instance 

1177 

1178 Returns: 

1179 openshift.dynamic.client.ResourceInstance 

1180 """ 

1181 

1182 def _instance() -> ResourceInstance | None: 

1183 return self.api.get(name=self.name) 

1184 

1185 return self.retry_cluster_exceptions(func=_instance) 

1186 

1187 @property 

1188 def labels(self) -> ResourceField: 

1189 """ 

1190 Method to get labels for this resource 

1191 

1192 Returns: 

1193 openshift.dynamic.resource.ResourceField: Representation of labels 

1194 """ 

1195 return self.instance.get("metadata", {})["labels"] 

1196 

1197 def watcher(self, timeout: int, resource_version: str = "") -> Generator[dict[str, Any], None, None]: 

1198 """ 

1199 Get resource for a given timeout. 

1200 

1201 Args: 

1202 timeout (int): Time to get conditions. 

1203 resource_version (str): The version with which to filter results. Only events with 

1204 a resource_version greater than this value will be returned 

1205 

1206 Yield: 

1207 Event object with these keys: 

1208 'type': The type of event such as "ADDED", "DELETED", etc. 

1209 'raw_object': a dict representing the watched object. 

1210 'object': A ResourceInstance wrapping raw_object. 

1211 """ 

1212 yield from self.api.watch( 

1213 timeout=timeout, 

1214 namespace=self.namespace, 

1215 field_selector=f"metadata.name=={self.name}", 

1216 resource_version=resource_version or self.initial_resource_version, 

1217 ) 

1218 

1219 def wait_for_condition(self, condition: str, status: str, timeout: int = 300, sleep_time: int = 1) -> None: 

1220 """ 

1221 Wait for Resource condition to be in desire status. 

1222 

1223 Args: 

1224 condition (str): Condition to query. 

1225 status (str): Expected condition status. 

1226 timeout (int): Time to wait for the resource. 

1227 sleep_time(int): Interval between each retry when checking the resource's condition. 

1228 

1229 Raises: 

1230 TimeoutExpiredError: If Resource condition in not in desire status. 

1231 """ 

1232 self.logger.info(f"Wait for {self.kind}/{self.name}'s '{condition}' condition to be '{status}'") 

1233 

1234 timeout_watcher = TimeoutWatch(timeout=timeout) 

1235 for sample in TimeoutSampler( 

1236 wait_timeout=timeout, 

1237 sleep=sleep_time, 

1238 func=lambda: self.exists, 

1239 ): 

1240 if sample: 

1241 break 

1242 

1243 for sample in TimeoutSampler( 

1244 wait_timeout=timeout_watcher.remaining_time(), 

1245 sleep=sleep_time, 

1246 func=lambda: self.instance, 

1247 ): 

1248 if sample: 

1249 for cond in sample.get("status", {}).get("conditions", []): 

1250 if cond["type"] == condition and cond["status"] == status: 

1251 return 

1252 

1253 def api_request( 

1254 self, method: str, action: str, url: str, retry_params: dict[str, int] | None = None, **params: Any 

1255 ) -> dict[str, Any]: 

1256 """ 

1257 Handle API requests to resource. 

1258 

1259 Args: 

1260 method (str): Request method (GET/PUT etc.). 

1261 action (str): Action to perform (stop/start/guestosinfo etc.). 

1262 url (str): URL of resource. 

1263 retry_params (dict): dict of timeout and sleep_time values for retrying the api request call 

1264 

1265 Returns: 

1266 data(dict): response data 

1267 

1268 """ 

1269 client: DynamicClient = self.client 

1270 api_request_params = { 

1271 "url": f"{url}/{action}", 

1272 "method": method, 

1273 "headers": client.client.configuration.api_key, 

1274 } 

1275 if retry_params: 

1276 response = self.retry_cluster_exceptions( 

1277 func=client.client.request, 

1278 timeout=retry_params.get("timeout", TIMEOUT_10SEC), 

1279 sleep_time=retry_params.get("sleep_time", TIMEOUT_1SEC), 

1280 **api_request_params, 

1281 **params, 

1282 ) 

1283 else: 

1284 response = client.client.request( 

1285 **api_request_params, 

1286 **params, 

1287 ) 

1288 try: 

1289 return json.loads(response.data) 

1290 except json.decoder.JSONDecodeError: 

1291 return response.data 

1292 

1293 def wait_for_conditions(self) -> None: 

1294 timeout_watcher = TimeoutWatch(timeout=30) 

1295 for sample in TimeoutSampler( 

1296 wait_timeout=TIMEOUT_30SEC, 

1297 sleep=1, 

1298 func=lambda: self.exists, 

1299 ): 

1300 if sample: 

1301 break 

1302 

1303 samples = TimeoutSampler( 

1304 wait_timeout=timeout_watcher.remaining_time(), 

1305 sleep=1, 

1306 func=lambda: self.instance.status.conditions, 

1307 ) 

1308 for sample in samples: 

1309 if sample: 

1310 return 

1311 

1312 def events( 

1313 self, 

1314 name: str = "", 

1315 label_selector: str = "", 

1316 field_selector: str = "", 

1317 resource_version: str = "", 

1318 timeout: int = TIMEOUT_4MINUTES, 

1319 ) -> Generator[Any, Any, None]: 

1320 """ 

1321 get - retrieves K8s events. 

1322 

1323 Args: 

1324 name (str): event name 

1325 label_selector (str): filter events by labels; comma separated string of key=value 

1326 field_selector (str): filter events by fields; comma separated string of key=valueevent fields; 

1327 comma separated string of key=value 

1328 resource_version (str): filter events by their resource's version 

1329 timeout (int): timeout in seconds 

1330 

1331 Returns 

1332 list: event objects 

1333 

1334 example: reading all CSV Warning events in namespace "my-namespace", with reason of "AnEventReason" 

1335 pod = Pod(client=client, name="pod", namespace="my-namespace") 

1336 for event in pod.events( 

1337 namespace="my-namespace", 

1338 field_selector="involvedObject.kind==ClusterServiceVersion,type==Warning,reason=AnEventReason", 

1339 timeout=10, 

1340 ): 

1341 print(event.object) 

1342 """ 

1343 _field_selector = f"involvedObject.name=={self.name}" 

1344 if field_selector: 

1345 field_selector = f"{_field_selector},{field_selector}" 

1346 yield from Event.get( 

1347 dyn_client=self.client, 

1348 namespace=self.namespace, 

1349 name=name, 

1350 label_selector=label_selector, 

1351 field_selector=field_selector or _field_selector, 

1352 resource_version=resource_version, 

1353 timeout=timeout, 

1354 ) 

1355 

1356 @staticmethod 

1357 def get_all_cluster_resources( 

1358 client: DynamicClient | None = None, 

1359 config_file: str = "", 

1360 context: str | None = None, 

1361 config_dict: dict[str, Any] | None = None, 

1362 *args: Any, 

1363 **kwargs: Any, 

1364 ) -> Generator[ResourceField, None, None]: 

1365 """ 

1366 Get all cluster resources 

1367 

1368 Args: 

1369 client (DynamicClient): k8s client 

1370 config_file (str): path to a kubeconfig file. 

1371 config_dict (dict): dict with kubeconfig configuration. 

1372 context (str): name of the context to use. 

1373 *args (tuple): args to pass to client.get() 

1374 **kwargs (dict): kwargs to pass to client.get() 

1375 

1376 Yields: 

1377 kubernetes.dynamic.resource.ResourceField: Cluster resource. 

1378 

1379 Example: 

1380 for resource in get_all_cluster_resources(label_selector="my-label=value"): 

1381 print(f"Resource: {resource}") 

1382 """ 

1383 if not client: 

1384 client = get_client(config_file=config_file, config_dict=config_dict, context=context) 

1385 

1386 for _resource in client.resources.search(): 

1387 try: 

1388 _resources = client.get(_resource, *args, **kwargs) 

1389 yield from _resources.items 

1390 

1391 except (NotFoundError, TypeError, MethodNotAllowedError): 

1392 continue 

1393 

1394 def to_yaml(self) -> str: 

1395 """ 

1396 Get resource as YAML representation. 

1397 

1398 Returns: 

1399 str: Resource YAML representation. 

1400 """ 

1401 if not self.res: 

1402 self.to_dict() 

1403 resource_yaml = yaml.dump(self.res) 

1404 self.logger.info(f"\n{resource_yaml}") 

1405 return resource_yaml 

1406 

1407 @property 

1408 def keys_to_hash(self) -> list[str]: 

1409 """ 

1410 Resource attributes list to hash in the logs. 

1411 

1412 The list should hold absolute key paths in resource dict. 

1413 

1414 Example: 

1415 given a dict: {"spec": {"data": <value_to_hash>}} 

1416 To hash spec['data'] key pass: ["spec>data"] 

1417 """ 

1418 return [] 

1419 

1420 def hash_resource_dict(self, resource_dict: dict[Any, Any]) -> dict[Any, Any]: 

1421 if not isinstance(resource_dict, dict): 

1422 raise ValueError("Expected a dictionary as the first argument") 

1423 

1424 if os.environ.get("OPENSHIFT_PYTHON_WRAPPER_HASH_LOG_DATA", "true") == "false": 

1425 return resource_dict 

1426 

1427 if self.keys_to_hash and self.hash_log_data: 

1428 resource_dict = copy.deepcopy(resource_dict) 

1429 for key_name in self.keys_to_hash: 

1430 resource_dict = replace_key_with_hashed_value(resource_dict=resource_dict, key_name=key_name) 

1431 

1432 return resource_dict 

1433 

1434 def get_condition_message(self, condition_type: str, condition_status: str = "") -> str: 

1435 """ 

1436 Get condition message by condition type and condition status 

1437 

1438 Args: 

1439 condition_type (str): condition type name 

1440 condition_status (str, optional): condition status to match 

1441 

1442 Returns: 

1443 str: condition message or empty string if condition status doesn't match 

1444 """ 

1445 if _conditions := self.instance.status.conditions: 

1446 for condition in _conditions: 

1447 if condition_type == condition.type: 

1448 if not condition_status: 

1449 return condition.message 

1450 

1451 if condition_status == condition.status: 

1452 return condition.message 

1453 

1454 self.logger.error( 

1455 f"Condition `{condition_type}` status is not `{condition_status}`, got `{condition.status}`" 

1456 ) 

1457 break 

1458 

1459 return "" 

1460 

1461 def validate(self) -> None: 

1462 """ 

1463 Validate the resource against its OpenAPI schema. 

1464 

1465 This method validates the resource dictionary (self.res) against the 

1466 appropriate OpenAPI schema for this resource type. If validation fails, 

1467 a ValidationError is raised with details about what is invalid. 

1468 

1469 Note: This method is called automatically during create() and update() 

1470 operations if schema_validation_enabled was set to True when creating 

1471 the resource instance. 

1472 

1473 Raises: 

1474 ValidationError: If the resource is invalid according to the schema 

1475 """ 

1476 

1477 # Get resource dict - if self.res is already populated, use it directly 

1478 # Otherwise, try to build it with to_dict() 

1479 if not self.res: 

1480 try: 

1481 self.to_dict() # This populates self.res 

1482 except Exception: 

1483 # If to_dict fails (e.g., missing required fields), 

1484 # we can't validate - let the original error propagate 

1485 raise 

1486 

1487 resource_dict = self.res 

1488 

1489 # Validate using shared validator 

1490 try: 

1491 SchemaValidator.validate(resource_dict=resource_dict, kind=self.kind, api_group=self.api_group) 

1492 except jsonschema.ValidationError as e: 

1493 error_msg = SchemaValidator.format_validation_error( 

1494 error=e, kind=self.kind, name=self.name or "unnamed", api_group=self.api_group 

1495 ) 

1496 raise ValidationError(error_msg) 

1497 except Exception as e: 

1498 LOGGER.error(f"Unexpected error during validation: {e}") 

1499 raise 

1500 

1501 @classmethod 

1502 def validate_dict(cls, resource_dict: dict[str, Any]) -> None: 

1503 """ 

1504 Validate a resource dictionary against the schema. 

1505 

1506 Args: 

1507 resource_dict: Dictionary representation of the resource 

1508 

1509 Raises: 

1510 ValidationError: If the resource dict is invalid 

1511 """ 

1512 

1513 # Get name for error messages 

1514 name = resource_dict.get("metadata", {}).get("name", "unnamed") 

1515 

1516 # Validate using shared validator 

1517 try: 

1518 SchemaValidator.validate(resource_dict=resource_dict, kind=cls.kind, api_group=cls.api_group) 

1519 except jsonschema.ValidationError as e: 

1520 error_msg = SchemaValidator.format_validation_error( 

1521 error=e, kind=cls.kind, name=name, api_group=cls.api_group 

1522 ) 

1523 raise ValidationError(error_msg) 

1524 except Exception as e: 

1525 LOGGER.error(f"Unexpected error during validation: {e}") 

1526 raise 

1527 

1528 

1529class NamespacedResource(Resource): 

1530 """ 

1531 Namespaced object, inherited from Resource. 

1532 """ 

1533 

1534 def __init__( 

1535 self, 

1536 name: str | None = None, 

1537 namespace: str | None = None, 

1538 teardown: bool = True, 

1539 yaml_file: str | None = None, 

1540 delete_timeout: int = TIMEOUT_4MINUTES, 

1541 client: DynamicClient | None = None, 

1542 ensure_exists: bool = False, 

1543 **kwargs: Any, 

1544 ): 

1545 super().__init__( 

1546 name=name, 

1547 client=client, 

1548 teardown=teardown, 

1549 yaml_file=yaml_file, 

1550 delete_timeout=delete_timeout, 

1551 **kwargs, 

1552 ) 

1553 self.namespace = namespace 

1554 if not (self.name and self.namespace) and not self.yaml_file and not self.kind_dict: 

1555 raise MissingRequiredArgumentError(argument="'name' and 'namespace'") 

1556 

1557 if ensure_exists: 

1558 self._ensure_exists() 

1559 

1560 @classmethod 

1561 def get( 

1562 cls, 

1563 config_file: str = "", 

1564 singular_name: str = "", 

1565 exceptions_dict: dict[type[Exception], list[str]] = DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

1566 raw: bool = False, 

1567 context: str | None = None, 

1568 dyn_client: DynamicClient | None = None, 

1569 *args: Any, 

1570 **kwargs: Any, 

1571 ) -> Generator[Any, None, None]: 

1572 """ 

1573 Get resources 

1574 

1575 Args: 

1576 dyn_client (DynamicClient): Open connection to remote cluster 

1577 config_file (str): Path to config file for connecting to remote cluster. 

1578 context (str): Context name for connecting to remote cluster. 

1579 singular_name (str): Resource kind (in lowercase), in use where we have multiple matches for resource. 

1580 raw (bool): If True return raw object. 

1581 exceptions_dict (dict): Exceptions dict for TimeoutSampler 

1582 

1583 Returns: 

1584 generator: Generator of Resources of cls.kind 

1585 """ 

1586 if not dyn_client: 

1587 dyn_client = get_client(config_file=config_file, context=context) 

1588 

1589 def _get() -> Generator["NamespacedResource|ResourceInstance", None, None]: 

1590 _resources = cls._prepare_resources(dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs) # type: ignore[misc] 

1591 try: 

1592 for resource_field in _resources.items: 

1593 if raw: 

1594 yield resource_field 

1595 else: 

1596 yield cls( 

1597 client=dyn_client, 

1598 name=resource_field.metadata.name, 

1599 namespace=resource_field.metadata.namespace, 

1600 ) 

1601 except TypeError: 

1602 if raw: 

1603 yield _resources 

1604 else: 

1605 yield cls( 

1606 client=dyn_client, 

1607 name=_resources.metadata.name, 

1608 namespace=_resources.metadata.namespace, 

1609 ) 

1610 

1611 return Resource.retry_cluster_exceptions(func=_get, exceptions_dict=exceptions_dict) 

1612 

1613 @property 

1614 def instance(self) -> ResourceInstance: 

1615 """ 

1616 Get resource instance 

1617 

1618 Returns: 

1619 openshift.dynamic.client.ResourceInstance 

1620 """ 

1621 

1622 def _instance() -> ResourceInstance: 

1623 return self.api.get(name=self.name, namespace=self.namespace) 

1624 

1625 return self.retry_cluster_exceptions(func=_instance) 

1626 

1627 def _base_body(self) -> None: 

1628 if self.yaml_file or self.kind_dict: 

1629 self.namespace = self.res["metadata"].get("namespace", self.namespace) 

1630 

1631 else: 

1632 self.res["metadata"]["namespace"] = self.namespace 

1633 

1634 if not self.namespace: 

1635 raise MissingRequiredArgumentError(argument="namespace") 

1636 

1637 def to_dict(self) -> None: 

1638 super(NamespacedResource, self)._base_body() 

1639 self._base_body() 

1640 

1641 

1642class ResourceEditor: 

1643 def __init__( 

1644 self, patches: dict[Any, Any], action: str = "update", user_backups: dict[Any, Any] | None = None 

1645 ) -> None: 

1646 """ 

1647 Args: 

1648 patches (dict): {<Resource object>: <yaml patch as dict>} 

1649 e.g. {<Resource object>: 

1650 {'metadata': {'labels': {'label1': 'true'}}} 

1651 

1652 Allows for temporary edits to cluster resources for tests. During 

1653 __enter__ user-specified patches (see args) are applied and old values 

1654 are backed up, and during __exit__ these backups are used to reverse 

1655 all changes made. 

1656 

1657 Flow: 

1658 1) apply patches 

1659 2) automation runs 

1660 3) edits made to resources are reversed 

1661 

1662 May also be used without being treated as a context manager by 

1663 calling the methods update() and restore() after instantiation. 

1664 

1665 *** the DynamicClient object used to get the resources must not be 

1666 using an unprivileged_user; use default_client or similar instead.*** 

1667 """ 

1668 

1669 self._patches = self._dictify_resourcefield(res=patches) 

1670 self.action = action 

1671 self.user_backups = user_backups 

1672 self._backups: dict[Any, Any] = {} 

1673 

1674 @property 

1675 def backups(self) -> dict[Any, Any]: 

1676 """Returns a dict {<Resource object>: <backup_as_dict>} 

1677 The backup dict kept for each resource edited""" 

1678 return self._backups 

1679 

1680 @property 

1681 def patches(self) -> dict[Any, Any]: 

1682 """Returns the patches dict provided in the constructor""" 

1683 return self._patches 

1684 

1685 def update(self, backup_resources: bool = False) -> None: 

1686 """Prepares backup dicts (where necessary) and applies patches""" 

1687 # prepare update dicts and backups 

1688 resource_to_patch = [] 

1689 if backup_resources: 

1690 LOGGER.info("ResourceEdit: Backing up old data") 

1691 if self.user_backups: 

1692 resource_to_patch = self._patches 

1693 self._backups = self.user_backups 

1694 

1695 else: 

1696 for resource, update in self._patches.items(): 

1697 namespace = None 

1698 # prepare backup 

1699 try: 

1700 original_resource_dict = resource.instance.to_dict() 

1701 except NotFoundError: 

1702 # Some resource cannot be found by name. 

1703 # happens in 'ServiceMonitor' resource. 

1704 original_resource_dict = list( 

1705 resource.get( 

1706 dyn_client=resource.client, 

1707 field_selector=f"metadata.name={resource.name}", 

1708 ) 

1709 )[0].to_dict() 

1710 namespace = update.get("metadata", {}).get("namespace") 

1711 

1712 backup = self._create_backup(original=original_resource_dict, patch=update) 

1713 if namespace: 

1714 # Add namespace to metadata for restore. 

1715 backup["metadata"]["namespace"] = namespace 

1716 

1717 # no need to back up if no changes have been made 

1718 # if action is 'replace' we need to update even if no backup (replace update can be empty ) 

1719 if backup or self.action == "replace": 

1720 resource_to_patch.append(resource) 

1721 self._backups[resource] = backup 

1722 else: 

1723 LOGGER.warning(f"ResourceEdit: no diff found in patch for {resource.name} -- skipping") 

1724 if not resource_to_patch: 

1725 return 

1726 else: 

1727 resource_to_patch = self._patches 

1728 

1729 patches_to_apply = {resource: self._patches[resource] for resource in resource_to_patch} 

1730 

1731 # apply changes 

1732 self._apply_patches_sampler(patches=patches_to_apply, action_text="Updating", action=self.action) 

1733 

1734 def restore(self) -> None: 

1735 self._apply_patches_sampler(patches=self._backups, action_text="Restoring", action=self.action) 

1736 

1737 def __enter__(self) -> Self: 

1738 self.update(backup_resources=True) 

1739 return self 

1740 

1741 def __exit__( 

1742 self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None 

1743 ) -> None: 

1744 # restore backups 

1745 self.restore() 

1746 

1747 @staticmethod 

1748 def _dictify_resourcefield(res: Any) -> Any: 

1749 """Recursively turns any ResourceField objects into dicts to avoid issues caused by appending lists, etc.""" 

1750 if isinstance(res, ResourceField): 

1751 return ResourceEditor._dictify_resourcefield(res=dict(res.items())) 

1752 

1753 elif isinstance(res, dict): 

1754 return { 

1755 ResourceEditor._dictify_resourcefield(res=key): ResourceEditor._dictify_resourcefield(res=value) 

1756 for key, value in res.items() 

1757 } 

1758 

1759 elif isinstance(res, list): 

1760 return [ResourceEditor._dictify_resourcefield(res=x) for x in res] 

1761 

1762 return res 

1763 

1764 @staticmethod 

1765 def _create_backup(original: dict[Any, Any], patch: dict[Any, Any]) -> dict[Any, Any]: 

1766 """ 

1767 Args: 

1768 original (dict*): source of values to back up if necessary 

1769 patch (dict*): 'new' values; keys needn't necessarily all be 

1770 contained in original 

1771 

1772 Returns a dict containing the fields in original that are different 

1773 from update. Performs the 

1774 

1775 Places None for fields in update that don't appear in 

1776 original (because that's how the API knows to remove those fields from 

1777 the yaml). 

1778 

1779 * the first call will be with both of these arguments as dicts but 

1780 this will not necessarily be the case during recursion""" 

1781 

1782 # when both are dicts, get the diff (recursively if need be) 

1783 if isinstance(original, dict) and isinstance(patch, dict): 

1784 diff_dict: dict[Any, Any] = {} 

1785 for key, value in patch.items(): 

1786 if key not in original: 

1787 diff_dict[key] = None 

1788 continue 

1789 

1790 # recursive call 

1791 key_diff = ResourceEditor._create_backup(original=original[key], patch=value) 

1792 

1793 if key_diff is not None: 

1794 diff_dict[key] = key_diff 

1795 

1796 return diff_dict 

1797 

1798 # for one or more non-dict values, just compare them 

1799 if patch != original: 

1800 return original 

1801 else: 

1802 # this return value will be received by key_diff above 

1803 return None 

1804 

1805 @staticmethod 

1806 def _apply_patches(patches: dict[Any, Any], action_text: str, action: str) -> None: 

1807 """ 

1808 Updates provided Resource objects with provided yaml patches 

1809 

1810 Args: 

1811 patches (dict): {<Resource object>: <yaml patch as dict>} 

1812 action_text (str): 

1813 "ResourceEdit <action_text> for resource <resource name>" 

1814 will be printed for each resource; see below 

1815 """ 

1816 

1817 for resource, patch in patches.items(): 

1818 LOGGER.info(f"ResourceEdits: {action_text} data for resource {resource.kind} {resource.name}") 

1819 

1820 # add name to patch 

1821 if "metadata" not in patch: 

1822 patch["metadata"] = {} 

1823 

1824 # the api requires this field to be present in a yaml patch for 

1825 # some resource kinds even if it is not changed 

1826 if "name" not in patch["metadata"]: 

1827 patch["metadata"]["name"] = resource.name 

1828 

1829 if action == "update": 

1830 resource.update(resource_dict=patch) # update the resource 

1831 

1832 if action == "replace": 

1833 if "metadata" not in patch: 

1834 patch["metadata"] = {} 

1835 

1836 patch["metadata"]["name"] = resource.name 

1837 patch["metadata"]["namespace"] = resource.namespace 

1838 patch["metadata"]["resourceVersion"] = resource.instance.metadata.resourceVersion 

1839 patch["kind"] = resource.kind 

1840 patch["apiVersion"] = resource.api_version 

1841 

1842 resource.update_replace(resource_dict=patch) # replace the resource metadata 

1843 

1844 def _apply_patches_sampler(self, patches: dict[Any, Any], action_text: str, action: str) -> ResourceInstance: 

1845 exceptions_dict: dict[type[Exception], list[str]] = {ConflictError: []} 

1846 exceptions_dict.update(DEFAULT_CLUSTER_RETRY_EXCEPTIONS) 

1847 return Resource.retry_cluster_exceptions( 

1848 func=self._apply_patches, 

1849 exceptions_dict=exceptions_dict, 

1850 patches=patches, 

1851 action_text=action_text, 

1852 action=action, 

1853 timeout=TIMEOUT_30SEC, 

1854 sleep_time=TIMEOUT_5SEC, 

1855 ) 

1856 

1857 

1858class BaseResourceList(ABC): 

1859 """ 

1860 Abstract base class for managing collections of resources. 

1861 

1862 Provides common functionality for resource lists including context management, 

1863 iteration, indexing, deployment, and cleanup operations. 

1864 """ 

1865 

1866 def __init__(self, client: DynamicClient) -> None: 

1867 self.resources: list[Resource] = [] 

1868 self.client = client 

1869 

1870 def __enter__(self) -> Self: 

1871 """Enters the runtime context and deploys all resources.""" 

1872 self.deploy() 

1873 return self 

1874 

1875 def __exit__( 

1876 self, 

1877 exc_type: type[BaseException] | None, 

1878 exc_val: BaseException | None, 

1879 exc_tb: TracebackType | None, 

1880 ) -> None: 

1881 """Exits the runtime context and cleans up all resources.""" 

1882 self.clean_up() 

1883 

1884 def __iter__(self) -> Generator[Resource | NamespacedResource, None, None]: 

1885 """Allows iteration over the resources in the list.""" 

1886 yield from self.resources 

1887 

1888 def __getitem__(self, index: int) -> Resource | NamespacedResource: 

1889 """Retrieves a resource from the list by its index.""" 

1890 return self.resources[index] 

1891 

1892 def __len__(self) -> int: 

1893 """Returns the number of resources in the list.""" 

1894 return len(self.resources) 

1895 

1896 def deploy(self, wait: bool = False) -> list[Resource | NamespacedResource]: 

1897 """ 

1898 Deploys all resources in the list. 

1899 

1900 Args: 

1901 wait (bool): If True, wait for each resource to be ready. 

1902 

1903 Returns: 

1904 List[Any]: A list of the results from each resource's deploy() call. 

1905 """ 

1906 return [resource.deploy(wait=wait) for resource in self.resources] 

1907 

1908 def clean_up(self, wait: bool = True) -> bool: 

1909 """ 

1910 Deletes all resources in the list. 

1911 

1912 Args: 

1913 wait (bool): If True, wait for each resource to be deleted. 

1914 

1915 Returns: 

1916 bool: Returns True if all resources are cleaned up correclty. 

1917 """ 

1918 # Deleting in reverse order to resolve dependencies correctly. 

1919 return all(resource.clean_up(wait=wait) for resource in reversed(self.resources)) 

1920 

1921 @abstractmethod 

1922 def _create_resources(self, resource_class: Type, **kwargs: Any) -> None: 

1923 """Abstract method to create resources based on specific logic.""" 

1924 pass 

1925 

1926 

1927class ResourceList(BaseResourceList): 

1928 """ 

1929 A class to manage a collection of a specific resource type. 

1930 

1931 This class creates and manages N copies of a given resource, 

1932 each with a unique name derived from a base name. 

1933 """ 

1934 

1935 def __init__( 

1936 self, 

1937 resource_class: Type[Resource], 

1938 num_resources: int, 

1939 client: DynamicClient, 

1940 **kwargs: Any, 

1941 ) -> None: 

1942 """ 

1943 Initializes a list of N resource objects. 

1944 

1945 Args: 

1946 resource_class (Type[Resource]): The resource class to instantiate (e.g., Namespace). 

1947 num_resources (int): The number of resource copies to create. 

1948 client (DynamicClient): The dynamic client to use. Defaults to None. 

1949 **kwargs (Any): Arguments to be passed to the constructor of the resource_class. 

1950 A 'name' key is required in kwargs to serve as the base name for the resources. 

1951 """ 

1952 super().__init__(client) 

1953 

1954 self.num_resources = num_resources 

1955 self._create_resources(resource_class, **kwargs) 

1956 

1957 def _create_resources(self, resource_class: Type[Resource], **kwargs: Any) -> None: 

1958 """Creates N resources with indexed names.""" 

1959 base_name = kwargs["name"] 

1960 

1961 for i in range(1, self.num_resources + 1): 

1962 resource_name = f"{base_name}-{i}" 

1963 resource_kwargs = kwargs.copy() 

1964 resource_kwargs["name"] = resource_name 

1965 

1966 instance = resource_class(client=self.client, **resource_kwargs) 

1967 self.resources.append(instance) 

1968 

1969 

1970class NamespacedResourceList(BaseResourceList): 

1971 """ 

1972 Manages a collection of a specific namespaced resource (e.g., Pod, Service, etc), creating one instance per provided namespace. 

1973 

1974 This class creates one copy of a given namespaced resource in each of the 

1975 namespaces provided in a list. 

1976 """ 

1977 

1978 def __init__( 

1979 self, 

1980 resource_class: Type[NamespacedResource], 

1981 namespaces: ResourceList, 

1982 client: DynamicClient, 

1983 **kwargs: Any, 

1984 ) -> None: 

1985 """ 

1986 Initializes a list of resource objects, one for each specified namespace. 

1987 

1988 Args: 

1989 resource_class (Type[NamespacedResource]): The namespaced resource class to instantiate (e.g., Pod). 

1990 namespaces (ResourceList): A ResourceList containing namespaces where the resources will be created. 

1991 client (DynamicClient): The dynamic client to use for cluster communication. 

1992 **kwargs (Any): Additional arguments to be passed to the resource_class constructor. 

1993 A 'name' key is required in kwargs to serve as the base name for the resources. 

1994 """ 

1995 for ns in namespaces: 

1996 if ns.kind != "Namespace": 

1997 raise TypeError("All the resources in namespaces should be namespaces.") 

1998 

1999 super().__init__(client) 

2000 

2001 self.namespaces = namespaces 

2002 self._create_resources(resource_class, **kwargs) 

2003 

2004 def _create_resources(self, resource_class: Type[NamespacedResource], **kwargs: Any) -> None: 

2005 """Creates one resource per namespace.""" 

2006 for ns in self.namespaces: 

2007 instance = resource_class( 

2008 namespace=ns.name, 

2009 client=self.client, 

2010 **kwargs, 

2011 ) 

2012 self.resources.append(instance)