Coverage for ocp_resources/resource.py: 34%

686 statements  

« prev     ^ index     » next       coverage.py v7.6.10, created at 2025-02-12 18:11 +0200

1from __future__ import annotations 

2 

3import contextlib 

4import copy 

5import json 

6import os 

7import re 

8import sys 

9from collections.abc import Callable, Generator 

10from io import StringIO 

11from signal import SIGINT, signal 

12from types import TracebackType 

13from typing import Any 

14from warnings import warn 

15 

16import kubernetes 

17import yaml 

18from benedict import benedict 

19from kubernetes.dynamic import DynamicClient, ResourceInstance 

20from kubernetes.dynamic.exceptions import ( 

21 ConflictError, 

22 ForbiddenError, 

23 MethodNotAllowedError, 

24 NotFoundError, 

25 ResourceNotFoundError, 

26) 

27from kubernetes.dynamic.resource import ResourceField 

28from packaging.version import Version 

29from simple_logger.logger import get_logger, logging 

30from timeout_sampler import ( 

31 TimeoutExpiredError, 

32 TimeoutSampler, 

33 TimeoutWatch, 

34) 

35from urllib3.exceptions import MaxRetryError 

36 

37from ocp_resources.event import Event 

38from ocp_resources.exceptions import MissingRequiredArgumentError, MissingResourceResError, ResourceTeardownError 

39from ocp_resources.utils.constants import ( 

40 DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

41 NOT_FOUND_ERROR_EXCEPTION_DICT, 

42 PROTOCOL_ERROR_EXCEPTION_DICT, 

43 TIMEOUT_1MINUTE, 

44 TIMEOUT_1SEC, 

45 TIMEOUT_4MINUTES, 

46 TIMEOUT_5SEC, 

47 TIMEOUT_10SEC, 

48 TIMEOUT_30SEC, 

49) 

50from ocp_resources.utils.resource_constants import ResourceConstants 

51from ocp_resources.utils.utils import skip_existing_resource_creation_teardown 

52 

53LOGGER = get_logger(name=__name__) 

54MAX_SUPPORTED_API_VERSION = "v2" 

55 

56 

57def _find_supported_resource(dyn_client: DynamicClient, api_group: str, kind: str) -> ResourceField | None: 

58 results = dyn_client.resources.search(group=api_group, kind=kind) 

59 sorted_results = sorted(results, key=lambda result: KubeAPIVersion(result.api_version), reverse=True) 

60 for result in sorted_results: 

61 if KubeAPIVersion(result.api_version) <= KubeAPIVersion(MAX_SUPPORTED_API_VERSION): 

62 return result 

63 return None 

64 

65 

66def _get_api_version(dyn_client: DynamicClient, api_group: str, kind: str) -> str: 

67 # Returns api_group/api_version 

68 res = _find_supported_resource(dyn_client=dyn_client, api_group=api_group, kind=kind) 

69 if not res: 

70 log = f"Couldn't find {kind} in {api_group} api group" 

71 LOGGER.warning(log) 

72 raise NotImplementedError(log) 

73 

74 LOGGER.info(f"kind: {kind} api version: {res.group_version}") 

75 return res.group_version 

76 

77 

78def get_client( 

79 config_file: str = "", 

80 config_dict: dict[str, Any] | None = None, 

81 context: str = "", 

82 **kwargs: Any, 

83) -> DynamicClient: 

84 """ 

85 Get a kubernetes client. 

86 

87 

88 This function is a replica of `ocp_utilities.infra.get_client` which cannot be imported as ocp_utilities imports 

89 from ocp_resources. 

90 

91 Pass either config_file or config_dict. 

92 If none of them are passed, client will be created from default OS kubeconfig 

93 (environment variable or .kube folder). 

94 

95 Args: 

96 config_file (str): path to a kubeconfig file. 

97 config_dict (dict): dict with kubeconfig configuration. 

98 context (str): name of the context to use. 

99 

100 Returns: 

101 DynamicClient: a kubernetes client. 

102 """ 

103 # Ref: https://github.com/kubernetes-client/python/blob/v26.1.0/kubernetes/base/config/kube_config.py 

104 if config_dict: 

105 return kubernetes.dynamic.DynamicClient( 

106 client=kubernetes.config.new_client_from_config_dict( 

107 config_dict=config_dict, context=context or None, **kwargs 

108 ) 

109 ) 

110 client_configuration = kwargs.get("client_configuration", kubernetes.client.Configuration()) 

111 try: 

112 # Ref: https://github.com/kubernetes-client/python/blob/v26.1.0/kubernetes/base/config/__init__.py 

113 LOGGER.info("Trying to get client via new_client_from_config") 

114 

115 # kubernetes.config.kube_config.load_kube_config sets KUBE_CONFIG_DEFAULT_LOCATION during module import. 

116 # If `KUBECONFIG` environment variable is set via code, the `KUBE_CONFIG_DEFAULT_LOCATION` will be None since 

117 # is populated during import which comes before setting the variable in code. 

118 config_file = config_file or os.environ.get("KUBECONFIG", "~/.kube/config") 

119 

120 if os.environ.get("OPENSHIFT_PYTHON_WRAPPER_CLIENT_USE_PROXY"): 

121 proxy = os.environ.get("HTTPS_PROXY") or os.environ.get("HTTP_PROXY") 

122 if not proxy: 

123 raise ValueError( 

124 "Proxy configuration is enabled but neither HTTPS_PROXY nor HTTP_PROXY environment variables are set." 

125 ) 

126 if client_configuration.proxy and client_configuration.proxy != proxy: 

127 raise ValueError( 

128 f"Conflicting proxy settings: client_configuration.proxy={client_configuration.proxy}, " 

129 f"but the environment variable 'HTTPS_PROXY/HTTP_PROXY' defines proxy as {proxy}." 

130 ) 

131 client_configuration.proxy = proxy 

132 

133 return kubernetes.dynamic.DynamicClient( 

134 client=kubernetes.config.new_client_from_config( 

135 config_file=config_file, 

136 client_configuration=client_configuration, 

137 context=context or None, 

138 **kwargs, 

139 ) 

140 ) 

141 except MaxRetryError: 

142 # Ref: https://github.com/kubernetes-client/python/blob/v26.1.0/kubernetes/base/config/incluster_config.py 

143 LOGGER.info("Trying to get client via incluster_config") 

144 return kubernetes.dynamic.DynamicClient( 

145 client=kubernetes.config.incluster_config.load_incluster_config( 

146 client_configuration=client_configuration, 

147 try_refresh_token=kwargs.get("try_refresh_token", True), 

148 ) 

149 ) 

150 

151 

152def sub_resource_level(current_class: Any, owner_class: Any, parent_class: Any) -> str | None: 

153 # return the name of the last class in MRO list that is not one of base 

154 # classes; otherwise return None 

155 for class_iterator in reversed([ 

156 class_iterator 

157 for class_iterator in current_class.mro() 

158 if class_iterator not in owner_class.mro() and issubclass(class_iterator, parent_class) 

159 ]): 

160 return class_iterator.__name__ 

161 

162 return None 

163 

164 

165def replace_key_with_hashed_value(resource_dict: dict[Any, Any], key_name: str) -> dict[Any, Any]: 

166 """ 

167 Recursively search a nested dictionary for a given key and changes its value to "******" if found. 

168 

169 The function supports two key formats: 

170 1. Regular dictionary path: 

171 A key to be hashed can be found directly in a dictionary, e.g. "a>b>c", would hash the value associated with 

172 key "c", where dictionary format is: 

173 input = { 

174 "a": { 

175 "b": { 

176 "c": "sensitive data" 

177 } 

178 } 

179 } 

180 output = { 

181 "a": { 

182 "b": { 

183 "c": "*******" 

184 } 

185 } 

186 } 

187 2. list path: 

188 A key to be hashed can be found in a dictionary that is in list somewhere in a dictionary, e.g. "a>b[]>c", 

189 would hash the value associated with key "c", where dictionary format is: 

190 input = { 

191 "a": { 

192 "b": [ 

193 {"d": "not sensitive data"}, 

194 {"c": "sensitive data"} 

195 ] 

196 } 

197 } 

198 output = { 

199 "a": { 

200 "b": [ 

201 {"d": "not sensitive data"}, 

202 {"c": "*******"} 

203 ] 

204 } 

205 } 

206 

207 Args: 

208 resource_dict: The nested dictionary to search. 

209 key_name: The key path to find. 

210 

211 Returns: 

212 dict[Any, Any]: A copy of the input dictionary with the specified key's value replaced with "*******". 

213 

214 """ 

215 result = copy.deepcopy(resource_dict) 

216 

217 benedict_resource_dict = benedict(result, keypath_separator=">") 

218 

219 if "[]" not in key_name: 

220 if benedict_resource_dict.get(key_name): 

221 benedict_resource_dict[key_name] = "*******" 

222 return dict(benedict_resource_dict) 

223 

224 key_prefix, remaining_key = key_name.split("[]>", 1) 

225 if not benedict_resource_dict.get(key_prefix): 

226 return dict(benedict_resource_dict) 

227 

228 resource_data = benedict_resource_dict[key_prefix] 

229 if not isinstance(resource_data, list): 

230 return dict(benedict_resource_dict) 

231 

232 for index, element in enumerate(resource_data): 

233 if isinstance(element, dict): 

234 resource_data[index] = replace_key_with_hashed_value(resource_dict=element, key_name=remaining_key) 

235 

236 return dict(benedict_resource_dict) 

237 

238 

239class KubeAPIVersion(Version): 

240 """ 

241 Implement the Kubernetes API versioning scheme from 

242 https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-versioning 

243 """ 

244 

245 component_re = re.compile(r"(\d+ | [a-z]+)", re.VERBOSE) 

246 

247 def __init__(self, vstring: str): 

248 self.vstring = vstring 

249 self.version: list[str | Any] = [] 

250 super().__init__(version=vstring) 

251 

252 def parse(self, vstring: str): 

253 components = [comp for comp in self.component_re.split(vstring) if comp] 

254 for idx, obj in enumerate(components): 

255 with contextlib.suppress(ValueError): 

256 components[idx] = int(obj) 

257 

258 errmsg = f"version '{vstring}' does not conform to kubernetes api versioning guidelines" 

259 

260 if len(components) not in (2, 4) or components[0] != "v" or not isinstance(components[1], int): 

261 raise ValueError(errmsg) 

262 

263 if len(components) == 4 and (components[2] not in ("alpha", "beta") or not isinstance(components[3], int)): 

264 raise ValueError(errmsg) 

265 

266 self.version = components 

267 

268 def __str__(self): 

269 return self.vstring 

270 

271 def __repr__(self): 

272 return "KubeAPIVersion ('{0}')".format(str(self)) 

273 

274 def _cmp(self, other): 

275 if isinstance(other, str): 

276 other = KubeAPIVersion(vstring=other) 

277 

278 myver = self.version 

279 otherver = other.version 

280 

281 for ver in myver, otherver: 

282 if len(ver) == 2: 

283 ver.extend(["zeta", 9999]) 

284 

285 if myver == otherver: 

286 return 0 

287 if myver < otherver: 

288 return -1 

289 if myver > otherver: 

290 return 1 

291 

292 

293class ClassProperty: 

294 def __init__(self, func: Callable) -> None: 

295 self.func = func 

296 

297 def __get__(self, obj: Any, owner: Any) -> Any: 

298 return self.func(owner) 

299 

300 

301class Resource(ResourceConstants): 

302 """ 

303 Base class for API resources 

304 """ 

305 

306 api_group: str = "" 

307 api_version: str = "" 

308 singular_name: str = "" 

309 timeout_seconds: int = TIMEOUT_1MINUTE 

310 

311 class ApiGroup: 

312 AAQ_KUBEVIRT_IO: str = "aaq.kubevirt.io" 

313 ADMISSIONREGISTRATION_K8S_IO: str = "admissionregistration.k8s.io" 

314 APIEXTENSIONS_K8S_IO: str = "apiextensions.k8s.io" 

315 APIREGISTRATION_K8S_IO: str = "apiregistration.k8s.io" 

316 APP_KUBERNETES_IO: str = "app.kubernetes.io" 

317 APPS: str = "apps" 

318 BATCH: str = "batch" 

319 BITNAMI_COM: str = "bitnami.com" 

320 CACHING_INTERNAL_KNATIVE_DEV: str = "caching.internal.knative.dev" 

321 CDI_KUBEVIRT_IO: str = "cdi.kubevirt.io" 

322 CLONE_KUBEVIRT_IO: str = "clone.kubevirt.io" 

323 CLUSTER_OPEN_CLUSTER_MANAGEMENT_IO: str = "cluster.open-cluster-management.io" 

324 CONFIG_OPENSHIFT_IO: str = "config.openshift.io" 

325 CONSOLE_OPENSHIFT_IO: str = "console.openshift.io" 

326 COORDINATION_K8S_IO: str = "coordination.k8s.io" 

327 CSIADDONS_OPENSHIFT_IO: str = "csiaddons.openshift.io" 

328 DATA_IMPORT_CRON_TEMPLATE_KUBEVIRT_IO: str = "dataimportcrontemplate.kubevirt.io" 

329 DATASCIENCECLUSTER_OPENDATAHUB_IO: str = "datasciencecluster.opendatahub.io" 

330 DISCOVERY_K8S_IO: str = "discovery.k8s.io" 

331 DSCINITIALIZATION_OPENDATAHUB_IO: str = "dscinitialization.opendatahub.io" 

332 EVENTS_K8S_IO: str = "events.k8s.io" 

333 EXPORT_KUBEVIRT_IO: str = "export.kubevirt.io" 

334 FENCE_AGENTS_REMEDIATION_MEDIK8S_IO: str = "fence-agents-remediation.medik8s.io" 

335 FORKLIFT_KONVEYOR_IO: str = "forklift.konveyor.io" 

336 HCO_KUBEVIRT_IO: str = "hco.kubevirt.io" 

337 HELM_MARIADB_MMONTES_IO: str = "helm.mariadb.mmontes.io" 

338 HIVE_OPENSHIFT_IO: str = "hive.openshift.io" 

339 HOSTPATHPROVISIONER_KUBEVIRT_IO: str = "hostpathprovisioner.kubevirt.io" 

340 IMAGE_OPENSHIFT_IO: str = "image.openshift.io" 

341 IMAGE_REGISTRY: str = "registry.redhat.io" 

342 INSTANCETYPE_KUBEVIRT_IO: str = "instancetype.kubevirt.io" 

343 INTEGREATLY_ORG: str = "integreatly.org" 

344 K8S_CNI_CNCF_IO: str = "k8s.cni.cncf.io" 

345 K8S_MARIADB_COM: str = "k8s.mariadb.com" 

346 K8S_OVN_ORG: str = "k8s.ovn.org" 

347 K8S_V1_CNI_CNCF_IO: str = "k8s.v1.cni.cncf.io" 

348 KUBERNETES_IO: str = "kubernetes.io" 

349 KUBEVIRT_IO: str = "kubevirt.io" 

350 KUBEVIRT_KUBEVIRT_IO: str = "kubevirt.kubevirt.io" 

351 LITMUS_IO: str = "litmuschaos.io" 

352 MACHINE_OPENSHIFT_IO: str = "machine.openshift.io" 

353 MACHINECONFIGURATION_OPENSHIFT_IO: str = "machineconfiguration.openshift.io" 

354 MAISTRA_IO: str = "maistra.io" 

355 METALLB_IO: str = "metallb.io" 

356 METRICS_K8S_IO: str = "metrics.k8s.io" 

357 MIGRATIONS_KUBEVIRT_IO: str = "migrations.kubevirt.io" 

358 MODELREGISTRY_OPENDATAHUB_IO: str = "modelregistry.opendatahub.io" 

359 MONITORING_COREOS_COM: str = "monitoring.coreos.com" 

360 MTQ_KUBEVIRT_IO: str = "mtq.kubevirt.io" 

361 NETWORKADDONSOPERATOR_NETWORK_KUBEVIRT_IO: str = "networkaddonsoperator.network.kubevirt.io" 

362 NETWORKING_ISTIO_IO: str = "networking.istio.io" 

363 NETWORKING_K8S_IO: str = "networking.k8s.io" 

364 NMSTATE_IO: str = "nmstate.io" 

365 NODE_LABELLER_KUBEVIRT_IO: str = "node-labeller.kubevirt.io" 

366 NODEMAINTENANCE_KUBEVIRT_IO: str = "nodemaintenance.kubevirt.io" 

367 OBSERVABILITY_OPEN_CLUSTER_MANAGEMENT_IO: str = "observability.open-cluster-management.io" 

368 OCS_OPENSHIFT_IO: str = "ocs.openshift.io" 

369 OPERATOR_AUTHORINO_KUADRANT_IO: str = "operator.authorino.kuadrant.io" 

370 OPERATOR_OPEN_CLUSTER_MANAGEMENT_IO: str = "operator.open-cluster-management.io" 

371 OPERATOR_OPENSHIFT_IO: str = "operator.openshift.io" 

372 OPERATORS_COREOS_COM: str = "operators.coreos.com" 

373 OPERATORS_OPENSHIFT_IO: str = "operators.openshift.io" 

374 OS_TEMPLATE_KUBEVIRT_IO: str = "os.template.kubevirt.io" 

375 PACKAGES_OPERATORS_COREOS_COM: str = "packages.operators.coreos.com" 

376 PERFORMANCE_OPENSHIFT_IO: str = "performance.openshift.io" 

377 POLICY: str = "policy" 

378 POOL_KUBEVIRT_IO: str = "pool.kubevirt.io" 

379 PROJECT_OPENSHIFT_IO: str = "project.openshift.io" 

380 RBAC_AUTHORIZATION_K8S_IO: str = "rbac.authorization.k8s.io" 

381 REMEDIATION_MEDIK8S_IO: str = "remediation.medik8s.io" 

382 RIPSAW_CLOUDBULLDOZER_IO: str = "ripsaw.cloudbulldozer.io" 

383 ROUTE_OPENSHIFT_IO: str = "route.openshift.io" 

384 SCHEDULING_K8S_IO: str = "scheduling.k8s.io" 

385 SECURITY_ISTIO_IO: str = "security.istio.io" 

386 SECURITY_OPENSHIFT_IO: str = "security.openshift.io" 

387 SELF_NODE_REMEDIATION_MEDIK8S_IO: str = "self-node-remediation.medik8s.io" 

388 SERVING_KNATIVE_DEV: str = "serving.knative.dev" 

389 SERVING_KSERVE_IO: str = "serving.kserve.io" 

390 SNAPSHOT_KUBEVIRT_IO: str = "snapshot.kubevirt.io" 

391 SNAPSHOT_STORAGE_K8S_IO: str = "snapshot.storage.k8s.io" 

392 SRIOVNETWORK_OPENSHIFT_IO: str = "sriovnetwork.openshift.io" 

393 SSP_KUBEVIRT_IO: str = "ssp.kubevirt.io" 

394 STORAGE_K8S_IO: str = "storage.k8s.io" 

395 STORAGECLASS_KUBERNETES_IO: str = "storageclass.kubernetes.io" 

396 STORAGECLASS_KUBEVIRT_IO: str = "storageclass.kubevirt.io" 

397 SUBRESOURCES_KUBEVIRT_IO: str = "subresources.kubevirt.io" 

398 TEKTON_DEV: str = "tekton.dev" 

399 TEKTONTASKS_KUBEVIRT_IO: str = "tektontasks.kubevirt.io" 

400 TEMPLATE_KUBEVIRT_IO: str = "template.kubevirt.io" 

401 TEMPLATE_OPENSHIFT_IO: str = "template.openshift.io" 

402 TRUSTYAI_OPENDATAHUB_IO: str = "trustyai.opendatahub.io" 

403 UPLOAD_CDI_KUBEVIRT_IO: str = "upload.cdi.kubevirt.io" 

404 USER_OPENSHIFT_IO: str = "user.openshift.io" 

405 V2V_KUBEVIRT_IO: str = "v2v.kubevirt.io" 

406 VELERO_IO: str = "velero.io" 

407 VM_KUBEVIRT_IO: str = "vm.kubevirt.io" 

408 

409 class ApiVersion: 

410 V1: str = "v1" 

411 V1BETA1: str = "v1beta1" 

412 V1ALPHA1: str = "v1alpha1" 

413 V1ALPHA3: str = "v1alpha3" 

414 

415 def __init__( 

416 self, 

417 name: str = "", 

418 client: DynamicClient | None = None, 

419 teardown: bool = True, 

420 privileged_client: DynamicClient | None = None, 

421 yaml_file: str = "", 

422 delete_timeout: int = TIMEOUT_4MINUTES, 

423 dry_run: bool = False, 

424 node_selector: dict[str, Any] | None = None, 

425 node_selector_labels: dict[str, str] | None = None, 

426 config_file: str = "", 

427 config_dict: dict[str, Any] | None = None, 

428 context: str = "", 

429 label: dict[str, str] | None = None, 

430 annotations: dict[str, str] | None = None, 

431 api_group: str = "", 

432 hash_log_data: bool = True, 

433 ensure_exists: bool = False, 

434 kind_dict: dict[Any, Any] | None = None, 

435 wait_for_resource: bool = False, 

436 ): 

437 """ 

438 Create an API resource 

439 

440 If `yaml_file` or `kind_dict` are passed, logic in `to_dict` is bypassed. 

441 

442 Args: 

443 name (str): Resource name 

444 client (DynamicClient): Dynamic client for connecting to a remote cluster 

445 teardown (bool): Indicates if this resource would need to be deleted 

446 privileged_client (DynamicClient): Instance of Dynamic client 

447 yaml_file (str): yaml file for the resource 

448 delete_timeout (int): timeout associated with delete action 

449 dry_run (bool): dry run 

450 node_selector (dict): node selector 

451 node_selector_labels (str): node selector labels 

452 config_file (str): Path to config file for connecting to remote cluster. 

453 context (str): Context name for connecting to remote cluster. 

454 label (dict): Resource labels 

455 annotations (dict[str, str] | None): Resource annotations 

456 api_group (str): Resource API group; will overwrite API group definition in resource class 

457 hash_log_data (bool): Hash resource content based on resource keys_to_hash property 

458 (example: Secret resource) 

459 ensure_exists (bool): Whether to check if the resource exists before when initializing the resource, raise if not. 

460 kind_dict (dict): dict which represents the resource object 

461 wait_for_resource (bool): Waits for the resource to be created 

462 """ 

463 if privileged_client: 

464 warn( 

465 "privileged_client is deprecated and will be removed in the future. Use client instead.", 

466 DeprecationWarning, 

467 stacklevel=2, 

468 ) 

469 

470 if yaml_file and kind_dict: 

471 raise ValueError("yaml_file and resource_dict are mutually exclusive") 

472 

473 self.name = name 

474 self.teardown = teardown 

475 self.privileged_client = client 

476 self.yaml_file = yaml_file 

477 self.kind_dict = kind_dict 

478 self.delete_timeout = delete_timeout 

479 self.dry_run = dry_run 

480 self.node_selector = node_selector 

481 self.node_selector_labels = node_selector_labels 

482 self.config_file = config_file 

483 if not isinstance(self.config_file, str): 

484 # If we pass config_file which isn't a string, get_client will fail and it will be very hard to know why. 

485 # Better fail here and let the user know. 

486 raise ValueError("config_file must be a string") 

487 

488 self.config_dict = config_dict or {} 

489 self.context = context 

490 self.label = label 

491 self.annotations = annotations 

492 self.client: DynamicClient = client or get_client(config_file=self.config_file, context=self.context) 

493 self.api_group: str = api_group or self.api_group 

494 self.hash_log_data = hash_log_data 

495 

496 if not self.api_group and not self.api_version: 

497 raise NotImplementedError("Subclasses of Resource require self.api_group or self.api_version to be defined") 

498 

499 if not (self.name or self.yaml_file or self.kind_dict): 

500 raise MissingRequiredArgumentError(argument="name") 

501 

502 self.namespace: str = "" 

503 self.node_selector_spec = self._prepare_node_selector_spec() 

504 self.res: dict[Any, Any] = self.kind_dict or {} 

505 self.yaml_file_contents: str = "" 

506 self.initial_resource_version: str = "" 

507 self.logger = self._set_logger() 

508 self.wait_for_resource = wait_for_resource 

509 

510 if ensure_exists: 

511 self._ensure_exists() 

512 

513 # self._set_client_and_api_version() must be last init line 

514 self._set_client_and_api_version() 

515 

516 def _ensure_exists(self) -> None: 

517 if not self.exists: 

518 _name_for_raise = self.name if not self.namespace else f"{self.namespace}/{self.name}" 

519 raise ResourceNotFoundError(f"Resource `{self.kind}` `{_name_for_raise}` does not exist") 

520 

521 def _set_logger(self) -> logging.Logger: 

522 log_level = os.environ.get("OPENSHIFT_PYTHON_WRAPPER_LOG_LEVEL", "INFO") 

523 log_file = os.environ.get("OPENSHIFT_PYTHON_WRAPPER_LOG_FILE", "") 

524 return get_logger( 

525 name=f"{__name__.rsplit('.')[0]} {self.kind}", 

526 level=log_level, 

527 filename=log_file, 

528 ) 

529 

530 def _prepare_node_selector_spec(self) -> dict[str, str]: 

531 return self.node_selector or self.node_selector_labels or {} 

532 

533 @ClassProperty 

534 def kind(cls) -> str | None: 

535 return sub_resource_level(cls, NamespacedResource, Resource) 

536 

537 def _base_body(self) -> None: 

538 """ 

539 Generate resource dict from yaml if self.yaml_file else return base resource dict. 

540 

541 Returns: 

542 dict: Resource dict. 

543 """ 

544 if self.kind_dict: 

545 # If `kind_dict` is provided, no additional logic should be applied 

546 self.name = self.kind_dict["metadata"]["name"] 

547 

548 elif self.yaml_file: 

549 if not self.yaml_file_contents: 

550 if isinstance(self.yaml_file, StringIO): 

551 self.yaml_file_contents = self.yaml_file.read() 

552 

553 else: 

554 with open(self.yaml_file) as stream: 

555 self.yaml_file_contents = stream.read() 

556 

557 self.res = yaml.safe_load(stream=self.yaml_file_contents) 

558 self.res.get("metadata", {}).pop("resourceVersion", None) 

559 self.name = self.res["metadata"]["name"] 

560 

561 else: 

562 self.res = { 

563 "apiVersion": self.api_version, 

564 "kind": self.kind, 

565 "metadata": {"name": self.name}, 

566 } 

567 

568 if self.label: 

569 self.res.setdefault("metadata", {}).setdefault("labels", {}).update(self.label) 

570 

571 if self.annotations: 

572 self.res.setdefault("metadata", {}).setdefault("annotations", {}).update(self.annotations) 

573 

574 if not self.res: 

575 raise MissingResourceResError(name=self.name) 

576 

577 def to_dict(self) -> None: 

578 """ 

579 Generate intended dict representation of the resource. 

580 """ 

581 self._base_body() 

582 

583 def __enter__(self) -> Any: 

584 signal(SIGINT, self._sigint_handler) 

585 return self.deploy(wait=self.wait_for_resource) 

586 

587 def __exit__( 

588 self, 

589 exc_type: type[BaseException] | None = None, 

590 exc_val: BaseException | None = None, 

591 exc_tb: TracebackType | None = None, 

592 ) -> None: 

593 if self.teardown: 

594 if not self.clean_up(): 

595 raise ResourceTeardownError(resource=self) 

596 

597 def _sigint_handler(self, signal_received: int, frame: Any) -> None: 

598 self.__exit__() 

599 sys.exit(signal_received) 

600 

601 def deploy(self, wait: bool = False) -> Any: 

602 """ 

603 For debug, export REUSE_IF_RESOURCE_EXISTS to skip resource create. 

604 Spaces are important in the export dict 

605 

606 Examples: 

607 To skip creation of all resources by kind: 

608 export REUSE_IF_RESOURCE_EXISTS="{Pod: {}}" 

609 

610 To skip creation of resource by name (on all namespaces or non-namespaced resources): 

611 export REUSE_IF_RESOURCE_EXISTS="{Pod: {<pod-name>:}}" 

612 

613 To skip creation of resource by name and namespace: 

614 export REUSE_IF_RESOURCE_EXISTS="{Pod: {<pod-name>: <pod-namespace>}}" 

615 

616 To skip creation of multiple resources: 

617 export REUSE_IF_RESOURCE_EXISTS="{Namespace: {<namespace-name>:}, Pod: {<pod-name>: <pod-namespace>}}" 

618 """ 

619 _resource = None 

620 _export_str = "REUSE_IF_RESOURCE_EXISTS" 

621 skip_resource_kind_create_if_exists = os.environ.get(_export_str) 

622 if skip_resource_kind_create_if_exists: 

623 _resource = skip_existing_resource_creation_teardown( 

624 resource=self, 

625 export_str=_export_str, 

626 user_exported_args=skip_resource_kind_create_if_exists, 

627 ) 

628 

629 if _resource: 

630 return _resource 

631 

632 self.create(wait=wait) 

633 return self 

634 

635 def clean_up(self, wait: bool = True, timeout: int | None = None) -> bool: 

636 """ 

637 For debug, export SKIP_RESOURCE_TEARDOWN to skip resource teardown. 

638 Spaces are important in the export dict 

639 

640 Args: 

641 wait (bool, optional): Wait for resource deletion. Defaults to True. 

642 timeout (int, optional): Timeout in seconds to wait for resource to be deleted. Defaults to 240. 

643 

644 Returns: 

645 bool: True if resource was deleted else False. 

646 

647 Examples: 

648 To skip teardown of all resources by kind: 

649 export SKIP_RESOURCE_TEARDOWN="{Pod: {}}" 

650 

651 To skip teardown of resource by name (on all namespaces): 

652 export SKIP_RESOURCE_TEARDOWN="{Pod: {<pod-name>:}}" 

653 

654 To skip teardown of resource by name and namespace: 

655 export SKIP_RESOURCE_TEARDOWN="{Pod: {<pod-name>: <pod-namespace>}}" 

656 

657 To skip teardown of multiple resources: 

658 export SKIP_RESOURCE_TEARDOWN="{Namespace: {<namespace-name>:}, Pod: {<pod-name>: <pod-namespace>}}" 

659 """ 

660 _export_str = "SKIP_RESOURCE_TEARDOWN" 

661 skip_resource_teardown = os.environ.get(_export_str) 

662 if skip_resource_teardown and skip_existing_resource_creation_teardown( 

663 resource=self, 

664 export_str=_export_str, 

665 user_exported_args=skip_resource_teardown, 

666 check_exists=False, 

667 ): 

668 self.logger.warning( 

669 f"Skip resource {self.kind} {self.name} teardown. Got {_export_str}={skip_resource_teardown}" 

670 ) 

671 return True 

672 

673 return self.delete(wait=wait, timeout=timeout or self.delete_timeout) 

674 

675 @classmethod 

676 def _prepare_resources( 

677 cls, dyn_client: DynamicClient, singular_name: str, *args: Any, **kwargs: Any 

678 ) -> ResourceInstance: 

679 if not cls.api_version: 

680 cls.api_version = _get_api_version(dyn_client=dyn_client, api_group=cls.api_group, kind=cls.kind) 

681 

682 get_kwargs = {"singular_name": singular_name} if singular_name else {} 

683 return dyn_client.resources.get( 

684 kind=cls.kind, 

685 api_version=cls.api_version, 

686 **get_kwargs, 

687 ).get(*args, **kwargs, timeout_seconds=cls.timeout_seconds) 

688 

689 def _prepare_singular_name_kwargs(self, **kwargs: Any) -> dict[str, Any]: 

690 kwargs = kwargs if kwargs else {} 

691 if self.singular_name: 

692 kwargs["singular_name"] = self.singular_name 

693 

694 return kwargs 

695 

696 def _set_client_and_api_version(self) -> None: 

697 if not self.client: 

698 self.client = get_client(config_file=self.config_file, context=self.context) 

699 

700 if not self.api_version: 

701 self.api_version = _get_api_version(dyn_client=self.client, api_group=self.api_group, kind=self.kind) 

702 

703 def full_api(self, **kwargs: Any) -> ResourceInstance: 

704 """ 

705 Get resource API 

706 

707 Keyword Args: 

708 pretty 

709 _continue 

710 include_uninitialized 

711 field_selector 

712 label_selector 

713 limit 

714 resource_version 

715 timeout_seconds 

716 watch 

717 async_req 

718 

719 Returns: 

720 Resource: Resource object. 

721 """ 

722 self._set_client_and_api_version() 

723 

724 kwargs = self._prepare_singular_name_kwargs(**kwargs) 

725 

726 return self.client.resources.get(api_version=self.api_version, kind=self.kind, **kwargs) 

727 

728 @property 

729 def api(self) -> ResourceInstance: 

730 return self.full_api() 

731 

732 def wait(self, timeout: int = TIMEOUT_4MINUTES, sleep: int = 1) -> None: 

733 """ 

734 Wait for resource 

735 

736 Args: 

737 timeout (int): Time to wait for the resource. 

738 sleep (int): Time to wait between retries 

739 

740 Raises: 

741 TimeoutExpiredError: If resource not exists. 

742 """ 

743 self.logger.info(f"Wait until {self.kind} {self.name} is created") 

744 samples = TimeoutSampler( 

745 wait_timeout=timeout, 

746 sleep=sleep, 

747 exceptions_dict={ 

748 **PROTOCOL_ERROR_EXCEPTION_DICT, 

749 **NOT_FOUND_ERROR_EXCEPTION_DICT, 

750 **DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

751 }, 

752 func=lambda: self.exists, 

753 ) 

754 for sample in samples: 

755 if sample: 

756 return 

757 

758 def wait_deleted(self, timeout: int = TIMEOUT_4MINUTES) -> bool: 

759 """ 

760 Wait until resource is deleted 

761 

762 Args: 

763 timeout (int): Time to wait for the resource. 

764 

765 Raises: 

766 TimeoutExpiredError: If resource still exists. 

767 """ 

768 self.logger.info(f"Wait until {self.kind} {self.name} is deleted") 

769 try: 

770 for sample in TimeoutSampler(wait_timeout=timeout, sleep=1, func=lambda: self.exists): 

771 if not sample: 

772 return True 

773 except TimeoutExpiredError: 

774 self.logger.warning(f"Timeout expired while waiting for {self.kind} {self.name} to be deleted") 

775 return False 

776 

777 return False 

778 

779 @property 

780 def exists(self) -> ResourceInstance | None: 

781 """ 

782 Whether self exists on the server 

783 """ 

784 try: 

785 return self.instance 

786 except NotFoundError: 

787 return None 

788 

789 @property 

790 def _kube_v1_api(self) -> kubernetes.client.CoreV1Api: 

791 return kubernetes.client.CoreV1Api(api_client=self.client.client) 

792 

793 def wait_for_status( 

794 self, status: str, timeout: int = TIMEOUT_4MINUTES, stop_status: str | None = None, sleep: int = 1 

795 ) -> None: 

796 """ 

797 Wait for resource to be in status 

798 

799 Args: 

800 status (str): Expected status. 

801 timeout (int): Time to wait for the resource. 

802 stop_status (str): Status which should stop the wait and failed. 

803 

804 Raises: 

805 TimeoutExpiredError: If resource in not in desire status. 

806 """ 

807 stop_status = stop_status if stop_status else self.Status.FAILED 

808 self.logger.info(f"Wait for {self.kind} {self.name} status to be {status}") 

809 samples = TimeoutSampler( 

810 wait_timeout=timeout, 

811 sleep=sleep, 

812 exceptions_dict={ 

813 **PROTOCOL_ERROR_EXCEPTION_DICT, 

814 **DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

815 }, 

816 func=self.api.get, 

817 field_selector=f"metadata.name=={self.name}", 

818 namespace=self.namespace, 

819 ) 

820 current_status = None 

821 last_logged_status = None 

822 try: 

823 for sample in samples: 

824 if sample.items: 

825 sample_status = sample.items[0].status 

826 if sample_status: 

827 current_status = sample_status.phase 

828 if current_status != last_logged_status: 

829 last_logged_status = current_status 

830 self.logger.info(f"Status of {self.kind} {self.name} is {current_status}") 

831 

832 if current_status == status: 

833 return 

834 

835 if current_status == stop_status: 

836 raise TimeoutExpiredError(f"Status of {self.kind} {self.name} is {current_status}") 

837 

838 except TimeoutExpiredError: 

839 if current_status: 

840 self.logger.error(f"Status of {self.kind} {self.name} is {current_status}") 

841 raise 

842 

843 def create(self, wait: bool = False) -> ResourceInstance | None: 

844 """ 

845 Create resource. 

846 

847 Args: 

848 wait (bool) : True to wait for resource status. 

849 

850 Returns: 

851 bool: True if create succeeded, False otherwise. 

852 """ 

853 self.to_dict() 

854 

855 hashed_res = self.hash_resource_dict(resource_dict=self.res) 

856 self.logger.info(f"Create {self.kind} {self.name}") 

857 self.logger.info(f"Posting {hashed_res}") 

858 self.logger.debug(f"\n{yaml.dump(hashed_res)}") 

859 resource_kwargs = {"body": self.res, "namespace": self.namespace} 

860 if self.dry_run: 

861 resource_kwargs["dry_run"] = "All" 

862 resource_ = self.api.create(**resource_kwargs) 

863 with contextlib.suppress(ForbiddenError, AttributeError, NotFoundError): 

864 # some resources do not support get() (no instance) or the client do not have permissions 

865 self.initial_resource_version = self.instance.metadata.resourceVersion 

866 

867 if wait and resource_: 

868 self.wait() 

869 return resource_ 

870 

871 def delete(self, wait: bool = False, timeout: int = TIMEOUT_4MINUTES, body: dict[str, Any] | None = None) -> bool: 

872 self.logger.info(f"Delete {self.kind} {self.name}") 

873 

874 if self.exists: 

875 _instance_dict = self.instance.to_dict() 

876 if isinstance(_instance_dict, dict): 

877 hashed_data = self.hash_resource_dict(resource_dict=_instance_dict) 

878 self.logger.info(f"Deleting {hashed_data}") 

879 self.logger.debug(f"\n{yaml.dump(hashed_data)}") 

880 

881 else: 

882 self.logger.warning(f"{self.kind}: {self.name} instance.to_dict() return was not a dict") 

883 

884 self.api.delete(name=self.name, namespace=self.namespace, body=body) 

885 

886 if wait: 

887 return self.wait_deleted(timeout=timeout) 

888 

889 return True 

890 

891 self.logger.warning(f"Resource {self.kind} {self.name} was not found, and wasn't deleted") 

892 return True 

893 

894 @property 

895 def status(self) -> str: 

896 """ 

897 Get resource status 

898 

899 Status: Running, Scheduling, Pending, Unknown, CrashLoopBackOff 

900 

901 Returns: 

902 str: Status 

903 """ 

904 self.logger.info(f"Get {self.kind} {self.name} status") 

905 return self.instance.status.phase 

906 

907 def update(self, resource_dict: dict[str, Any]) -> None: 

908 """ 

909 Update resource with resource dict 

910 

911 Args: 

912 resource_dict: Resource dictionary 

913 """ 

914 hashed_resource_dict = self.hash_resource_dict(resource_dict=resource_dict) 

915 self.logger.info(f"Update {self.kind} {self.name}:\n{hashed_resource_dict}") 

916 self.logger.debug(f"\n{yaml.dump(hashed_resource_dict)}") 

917 self.api.patch( 

918 body=resource_dict, 

919 namespace=self.namespace, 

920 content_type="application/merge-patch+json", 

921 ) 

922 

923 def update_replace(self, resource_dict: dict[str, Any]) -> None: 

924 """ 

925 Replace resource metadata. 

926 Use this to remove existing field. (update() will only update existing fields) 

927 """ 

928 hashed_resource_dict = self.hash_resource_dict(resource_dict=resource_dict) 

929 self.logger.info(f"Replace {self.kind} {self.name}: \n{hashed_resource_dict}") 

930 self.logger.debug(f"\n{yaml.dump(hashed_resource_dict)}") 

931 self.api.replace(body=resource_dict, name=self.name, namespace=self.namespace) 

932 

933 @staticmethod 

934 def retry_cluster_exceptions( 

935 func, 

936 exceptions_dict: dict[type[Exception], list[str]] = DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

937 timeout: int = TIMEOUT_10SEC, 

938 sleep_time: int = 1, 

939 **kwargs: Any, 

940 ) -> Any: 

941 try: 

942 sampler = TimeoutSampler( 

943 wait_timeout=timeout, 

944 sleep=sleep_time, 

945 func=func, 

946 print_log=False, 

947 exceptions_dict=exceptions_dict, 

948 **kwargs, 

949 ) 

950 for sample in sampler: 

951 return sample 

952 

953 except TimeoutExpiredError as exp: 

954 if exp.last_exp: 

955 raise exp.last_exp 

956 

957 raise 

958 

959 @classmethod 

960 def get( 

961 cls, 

962 config_file: str = "", 

963 context: str = "", 

964 singular_name: str = "", 

965 exceptions_dict: dict[type[Exception], list[str]] = DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

966 raw: bool = False, 

967 dyn_client: DynamicClient | None = None, 

968 *args: Any, 

969 **kwargs: Any, 

970 ) -> Generator[Any, None, None]: 

971 """ 

972 Get resources 

973 

974 Args: 

975 dyn_client (DynamicClient): Open connection to remote cluster. 

976 config_file (str): Path to config file for connecting to remote cluster. 

977 context (str): Context name for connecting to remote cluster. 

978 singular_name (str): Resource kind (in lowercase), in use where we have multiple matches for resource. 

979 raw (bool): If True return raw object. 

980 exceptions_dict (dict): Exceptions dict for TimeoutSampler 

981 

982 Returns: 

983 generator: Generator of Resources of cls.kind. 

984 """ 

985 if not dyn_client: 

986 dyn_client = get_client(config_file=config_file, context=context) 

987 

988 def _get() -> Generator["Resource|ResourceInstance", None, None]: 

989 _resources = cls._prepare_resources(dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs) # type: ignore[misc] 

990 try: 

991 for resource_field in _resources.items: 

992 if raw: 

993 yield _resources 

994 else: 

995 yield cls(client=dyn_client, name=resource_field.metadata.name) 

996 

997 except TypeError: 

998 if raw: 

999 yield _resources 

1000 else: 

1001 yield cls(client=dyn_client, name=_resources.metadata.name) 

1002 

1003 return Resource.retry_cluster_exceptions(func=_get, exceptions_dict=exceptions_dict) 

1004 

1005 @property 

1006 def instance(self) -> ResourceInstance: 

1007 """ 

1008 Get resource instance 

1009 

1010 Returns: 

1011 openshift.dynamic.client.ResourceInstance 

1012 """ 

1013 

1014 def _instance() -> ResourceInstance | None: 

1015 return self.api.get(name=self.name) 

1016 

1017 return self.retry_cluster_exceptions(func=_instance) 

1018 

1019 @property 

1020 def labels(self) -> ResourceField: 

1021 """ 

1022 Method to get labels for this resource 

1023 

1024 Returns: 

1025 openshift.dynamic.resource.ResourceField: Representation of labels 

1026 """ 

1027 return self.instance.get("metadata", {})["labels"] 

1028 

1029 def watcher(self, timeout: int, resource_version: str = "") -> Generator[dict[str, Any], None, None]: 

1030 """ 

1031 Get resource for a given timeout. 

1032 

1033 Args: 

1034 timeout (int): Time to get conditions. 

1035 resource_version (str): The version with which to filter results. Only events with 

1036 a resource_version greater than this value will be returned 

1037 

1038 Yield: 

1039 Event object with these keys: 

1040 'type': The type of event such as "ADDED", "DELETED", etc. 

1041 'raw_object': a dict representing the watched object. 

1042 'object': A ResourceInstance wrapping raw_object. 

1043 """ 

1044 yield from self.api.watch( 

1045 timeout=timeout, 

1046 namespace=self.namespace, 

1047 field_selector=f"metadata.name=={self.name}", 

1048 resource_version=resource_version or self.initial_resource_version, 

1049 ) 

1050 

1051 def wait_for_condition(self, condition: str, status: str, timeout: int = 300, sleep_time: int = 1) -> None: 

1052 """ 

1053 Wait for Resource condition to be in desire status. 

1054 

1055 Args: 

1056 condition (str): Condition to query. 

1057 status (str): Expected condition status. 

1058 timeout (int): Time to wait for the resource. 

1059 sleep_time(int): Interval between each retry when checking the resource's condition. 

1060 

1061 Raises: 

1062 TimeoutExpiredError: If Resource condition in not in desire status. 

1063 """ 

1064 self.logger.info(f"Wait for {self.kind}/{self.name}'s '{condition}' condition to be '{status}'") 

1065 

1066 timeout_watcher = TimeoutWatch(timeout=timeout) 

1067 for sample in TimeoutSampler( 

1068 wait_timeout=timeout, 

1069 sleep=sleep_time, 

1070 func=lambda: self.exists, 

1071 ): 

1072 if sample: 

1073 break 

1074 

1075 for sample in TimeoutSampler( 

1076 wait_timeout=timeout_watcher.remaining_time(), 

1077 sleep=sleep_time, 

1078 func=lambda: self.instance, 

1079 ): 

1080 if sample: 

1081 for cond in sample.get("status", {}).get("conditions", []): 

1082 if cond["type"] == condition and cond["status"] == status: 

1083 return 

1084 

1085 def api_request( 

1086 self, method: str, action: str, url: str, retry_params: dict[str, int] | None = None, **params: Any 

1087 ) -> dict[str, Any]: 

1088 """ 

1089 Handle API requests to resource. 

1090 

1091 Args: 

1092 method (str): Request method (GET/PUT etc.). 

1093 action (str): Action to perform (stop/start/guestosinfo etc.). 

1094 url (str): URL of resource. 

1095 retry_params (dict): dict of timeout and sleep_time values for retrying the api request call 

1096 

1097 Returns: 

1098 data(dict): response data 

1099 

1100 """ 

1101 client: DynamicClient = self.client 

1102 api_request_params = { 

1103 "url": f"{url}/{action}", 

1104 "method": method, 

1105 "headers": client.client.configuration.api_key, 

1106 } 

1107 if retry_params: 

1108 response = self.retry_cluster_exceptions( 

1109 func=client.client.request, 

1110 timeout=retry_params.get("timeout", TIMEOUT_10SEC), 

1111 sleep_time=retry_params.get("sleep_time", TIMEOUT_1SEC), 

1112 **api_request_params, 

1113 **params, 

1114 ) 

1115 else: 

1116 response = client.client.request( 

1117 **api_request_params, 

1118 **params, 

1119 ) 

1120 try: 

1121 return json.loads(response.data) 

1122 except json.decoder.JSONDecodeError: 

1123 return response.data 

1124 

1125 def wait_for_conditions(self) -> None: 

1126 timeout_watcher = TimeoutWatch(timeout=30) 

1127 for sample in TimeoutSampler( 

1128 wait_timeout=TIMEOUT_30SEC, 

1129 sleep=1, 

1130 func=lambda: self.exists, 

1131 ): 

1132 if sample: 

1133 break 

1134 

1135 samples = TimeoutSampler( 

1136 wait_timeout=timeout_watcher.remaining_time(), 

1137 sleep=1, 

1138 func=lambda: self.instance.status.conditions, 

1139 ) 

1140 for sample in samples: 

1141 if sample: 

1142 return 

1143 

1144 def events( 

1145 self, 

1146 name: str = "", 

1147 label_selector: str = "", 

1148 field_selector: str = "", 

1149 resource_version: str = "", 

1150 timeout: int = TIMEOUT_4MINUTES, 

1151 ): 

1152 """ 

1153 get - retrieves K8s events. 

1154 

1155 Args: 

1156 name (str): event name 

1157 label_selector (str): filter events by labels; comma separated string of key=value 

1158 field_selector (str): filter events by fields; comma separated string of key=valueevent fields; 

1159 comma separated string of key=value 

1160 resource_version (str): filter events by their resource's version 

1161 timeout (int): timeout in seconds 

1162 

1163 Returns 

1164 list: event objects 

1165 

1166 example: reading all CSV Warning events in namespace "my-namespace", with reason of "AnEventReason" 

1167 pod = Pod(client=client, name="pod", namespace="my-namespace") 

1168 for event in pod.events( 

1169 namespace="my-namespace", 

1170 field_selector="involvedObject.kind==ClusterServiceVersion,type==Warning,reason=AnEventReason", 

1171 timeout=10, 

1172 ): 

1173 print(event.object) 

1174 """ 

1175 _field_selector = f"involvedObject.name=={self.name}" 

1176 if field_selector: 

1177 field_selector = f"{_field_selector},{field_selector}" 

1178 yield from Event.get( 

1179 dyn_client=self.client, 

1180 namespace=self.namespace, 

1181 name=name, 

1182 label_selector=label_selector, 

1183 field_selector=field_selector or _field_selector, 

1184 resource_version=resource_version, 

1185 timeout=timeout, 

1186 ) 

1187 

1188 @staticmethod 

1189 def get_all_cluster_resources( 

1190 client: DynamicClient | None = None, 

1191 config_file: str = "", 

1192 context: str = "", 

1193 config_dict: dict[str, Any] | None = None, 

1194 *args: Any, 

1195 **kwargs: Any, 

1196 ) -> Generator[ResourceField, None, None]: 

1197 """ 

1198 Get all cluster resources 

1199 

1200 Args: 

1201 client (DynamicClient): k8s client 

1202 config_file (str): path to a kubeconfig file. 

1203 config_dict (dict): dict with kubeconfig configuration. 

1204 context (str): name of the context to use. 

1205 *args (tuple): args to pass to client.get() 

1206 **kwargs (dict): kwargs to pass to client.get() 

1207 

1208 Yields: 

1209 kubernetes.dynamic.resource.ResourceField: Cluster resource. 

1210 

1211 Example: 

1212 for resource in get_all_cluster_resources(label_selector="my-label=value"): 

1213 print(f"Resource: {resource}") 

1214 """ 

1215 if not client: 

1216 client = get_client(config_file=config_file, config_dict=config_dict, context=context) 

1217 

1218 for _resource in client.resources.search(): 

1219 try: 

1220 _resources = client.get(_resource, *args, **kwargs) 

1221 yield from _resources.items 

1222 

1223 except (NotFoundError, TypeError, MethodNotAllowedError): 

1224 continue 

1225 

1226 def to_yaml(self) -> str: 

1227 """ 

1228 Get resource as YAML representation. 

1229 

1230 Returns: 

1231 str: Resource YAML representation. 

1232 """ 

1233 if not self.res: 

1234 self.to_dict() 

1235 resource_yaml = yaml.dump(self.res) 

1236 self.logger.info(f"\n{resource_yaml}") 

1237 return resource_yaml 

1238 

1239 @property 

1240 def keys_to_hash(self) -> list[str]: 

1241 """ 

1242 Resource attributes list to hash in the logs. 

1243 

1244 The list should hold absolute key paths in resource dict. 

1245 

1246 Example: 

1247 given a dict: {"spec": {"data": <value_to_hash>}} 

1248 To hash spec['data'] key pass: ["spec>data"] 

1249 """ 

1250 return [] 

1251 

1252 def hash_resource_dict(self, resource_dict: dict[Any, Any]) -> dict[Any, Any]: 

1253 if not isinstance(resource_dict, dict): 

1254 raise ValueError("Expected a dictionary as the first argument") 

1255 

1256 if os.environ.get("OPENSHIFT_PYTHON_WRAPPER_HASH_LOG_DATA", "true") == "false": 

1257 return resource_dict 

1258 

1259 if self.keys_to_hash and self.hash_log_data: 

1260 resource_dict = copy.deepcopy(resource_dict) 

1261 for key_name in self.keys_to_hash: 

1262 resource_dict = replace_key_with_hashed_value(resource_dict=resource_dict, key_name=key_name) 

1263 

1264 return resource_dict 

1265 

1266 def get_condition_message(self, condition_type: str, condition_status: str = "") -> str: 

1267 """ 

1268 Get condition message by condition type and condition status 

1269 

1270 Args: 

1271 condition_type (str): condition type name 

1272 condition_status (str, optional): condition status to match 

1273 

1274 Returns: 

1275 str: condition message or empty string if condition status doesn't match 

1276 """ 

1277 if _conditions := self.instance.status.conditions: 

1278 for condition in _conditions: 

1279 if condition_type == condition.type: 

1280 if not condition_status: 

1281 return condition.message 

1282 

1283 if condition_status == condition.status: 

1284 return condition.message 

1285 

1286 self.logger.error( 

1287 f"Condition `{condition_type}` status is not `{condition_status}`, got `{condition.status}`" 

1288 ) 

1289 break 

1290 

1291 return "" 

1292 

1293 

1294class NamespacedResource(Resource): 

1295 """ 

1296 Namespaced object, inherited from Resource. 

1297 """ 

1298 

1299 def __init__( 

1300 self, 

1301 name: str = "", 

1302 namespace: str = "", 

1303 teardown: bool = True, 

1304 yaml_file: str = "", 

1305 delete_timeout: int = TIMEOUT_4MINUTES, 

1306 client: DynamicClient | None = None, 

1307 ensure_exists: bool = False, 

1308 **kwargs: Any, 

1309 ): 

1310 super().__init__( 

1311 name=name, 

1312 client=client, 

1313 teardown=teardown, 

1314 yaml_file=yaml_file, 

1315 delete_timeout=delete_timeout, 

1316 **kwargs, 

1317 ) 

1318 self.namespace = namespace 

1319 if not (self.name and self.namespace) and not self.yaml_file and not self.kind_dict: 

1320 raise MissingRequiredArgumentError(argument="'name' and 'namespace'") 

1321 

1322 if ensure_exists: 

1323 self._ensure_exists() 

1324 

1325 @classmethod 

1326 def get( 

1327 cls, 

1328 config_file: str = "", 

1329 context: str = "", 

1330 singular_name: str = "", 

1331 exceptions_dict: dict[type[Exception], list[str]] = DEFAULT_CLUSTER_RETRY_EXCEPTIONS, 

1332 raw: bool = False, 

1333 dyn_client: DynamicClient | None = None, 

1334 *args: Any, 

1335 **kwargs: Any, 

1336 ) -> Generator[Any, None, None]: 

1337 """ 

1338 Get resources 

1339 

1340 Args: 

1341 dyn_client (DynamicClient): Open connection to remote cluster 

1342 config_file (str): Path to config file for connecting to remote cluster. 

1343 context (str): Context name for connecting to remote cluster. 

1344 singular_name (str): Resource kind (in lowercase), in use where we have multiple matches for resource. 

1345 raw (bool): If True return raw object. 

1346 exceptions_dict (dict): Exceptions dict for TimeoutSampler 

1347 

1348 Returns: 

1349 generator: Generator of Resources of cls.kind 

1350 """ 

1351 if not dyn_client: 

1352 dyn_client = get_client(config_file=config_file, context=context) 

1353 

1354 def _get() -> Generator["NamespacedResource|ResourceInstance", None, None]: 

1355 _resources = cls._prepare_resources(dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs) # type: ignore[misc] 

1356 try: 

1357 for resource_field in _resources.items: 

1358 if raw: 

1359 yield resource_field 

1360 else: 

1361 yield cls( 

1362 client=dyn_client, 

1363 name=resource_field.metadata.name, 

1364 namespace=resource_field.metadata.namespace, 

1365 ) 

1366 except TypeError: 

1367 if raw: 

1368 yield _resources 

1369 else: 

1370 yield cls( 

1371 client=dyn_client, 

1372 name=_resources.metadata.name, 

1373 namespace=_resources.metadata.namespace, 

1374 ) 

1375 

1376 return Resource.retry_cluster_exceptions(func=_get, exceptions_dict=exceptions_dict) 

1377 

1378 @property 

1379 def instance(self) -> ResourceInstance: 

1380 """ 

1381 Get resource instance 

1382 

1383 Returns: 

1384 openshift.dynamic.client.ResourceInstance 

1385 """ 

1386 

1387 def _instance() -> ResourceInstance: 

1388 return self.api.get(name=self.name, namespace=self.namespace) 

1389 

1390 return self.retry_cluster_exceptions(func=_instance) 

1391 

1392 def _base_body(self) -> None: 

1393 if self.yaml_file or self.kind_dict: 

1394 self.namespace = self.res["metadata"].get("namespace", self.namespace) 

1395 

1396 else: 

1397 self.res["metadata"]["namespace"] = self.namespace 

1398 

1399 if not self.namespace: 

1400 raise MissingRequiredArgumentError(argument="namespace") 

1401 

1402 def to_dict(self) -> None: 

1403 super(NamespacedResource, self)._base_body() 

1404 self._base_body() 

1405 

1406 

1407class ResourceEditor: 

1408 def __init__( 

1409 self, patches: dict[Any, Any], action: str = "update", user_backups: dict[Any, Any] | None = None 

1410 ) -> None: 

1411 """ 

1412 Args: 

1413 patches (dict): {<Resource object>: <yaml patch as dict>} 

1414 e.g. {<Resource object>: 

1415 {'metadata': {'labels': {'label1': 'true'}}} 

1416 

1417 Allows for temporary edits to cluster resources for tests. During 

1418 __enter__ user-specified patches (see args) are applied and old values 

1419 are backed up, and during __exit__ these backups are used to reverse 

1420 all changes made. 

1421 

1422 Flow: 

1423 1) apply patches 

1424 2) automation runs 

1425 3) edits made to resources are reversed 

1426 

1427 May also be used without being treated as a context manager by 

1428 calling the methods update() and restore() after instantiation. 

1429 

1430 *** the DynamicClient object used to get the resources must not be 

1431 using an unprivileged_user; use default_client or similar instead.*** 

1432 """ 

1433 

1434 self._patches = self._dictify_resourcefield(res=patches) 

1435 self.action = action 

1436 self.user_backups = user_backups 

1437 self._backups: dict[Any, Any] = {} 

1438 

1439 @property 

1440 def backups(self) -> dict[Any, Any]: 

1441 """Returns a dict {<Resource object>: <backup_as_dict>} 

1442 The backup dict kept for each resource edited""" 

1443 return self._backups 

1444 

1445 @property 

1446 def patches(self) -> dict[Any, Any]: 

1447 """Returns the patches dict provided in the constructor""" 

1448 return self._patches 

1449 

1450 def update(self, backup_resources: bool = False) -> None: 

1451 """Prepares backup dicts (where necessary) and applies patches""" 

1452 # prepare update dicts and backups 

1453 resource_to_patch = [] 

1454 if backup_resources: 

1455 LOGGER.info("ResourceEdit: Backing up old data") 

1456 if self.user_backups: 

1457 resource_to_patch = self._patches 

1458 self._backups = self.user_backups 

1459 

1460 else: 

1461 for resource, update in self._patches.items(): 

1462 namespace = None 

1463 # prepare backup 

1464 try: 

1465 original_resource_dict = resource.instance.to_dict() 

1466 except NotFoundError: 

1467 # Some resource cannot be found by name. 

1468 # happens in 'ServiceMonitor' resource. 

1469 original_resource_dict = list( 

1470 resource.get( 

1471 dyn_client=resource.client, 

1472 field_selector=f"metadata.name={resource.name}", 

1473 ) 

1474 )[0].to_dict() 

1475 namespace = update.get("metadata", {}).get("namespace") 

1476 

1477 backup = self._create_backup(original=original_resource_dict, patch=update) 

1478 if namespace: 

1479 # Add namespace to metadata for restore. 

1480 backup["metadata"]["namespace"] = namespace 

1481 

1482 # no need to back up if no changes have been made 

1483 # if action is 'replace' we need to update even if no backup (replace update can be empty ) 

1484 if backup or self.action == "replace": 

1485 resource_to_patch.append(resource) 

1486 self._backups[resource] = backup 

1487 else: 

1488 LOGGER.warning(f"ResourceEdit: no diff found in patch for {resource.name} -- skipping") 

1489 if not resource_to_patch: 

1490 return 

1491 else: 

1492 resource_to_patch = self._patches 

1493 

1494 patches_to_apply = {resource: self._patches[resource] for resource in resource_to_patch} 

1495 

1496 # apply changes 

1497 self._apply_patches_sampler(patches=patches_to_apply, action_text="Updating", action=self.action) 

1498 

1499 def restore(self) -> None: 

1500 self._apply_patches_sampler(patches=self._backups, action_text="Restoring", action=self.action) 

1501 

1502 def __enter__(self) -> "ResourceEditor": 

1503 self.update(backup_resources=True) 

1504 return self 

1505 

1506 def __exit__( 

1507 self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None 

1508 ) -> None: 

1509 # restore backups 

1510 self.restore() 

1511 

1512 @staticmethod 

1513 def _dictify_resourcefield(res: Any) -> Any: 

1514 """Recursively turns any ResourceField objects into dicts to avoid issues caused by appending lists, etc.""" 

1515 if isinstance(res, ResourceField): 

1516 return ResourceEditor._dictify_resourcefield(res=dict(res.items())) 

1517 

1518 elif isinstance(res, dict): 

1519 return { 

1520 ResourceEditor._dictify_resourcefield(res=key): ResourceEditor._dictify_resourcefield(res=value) 

1521 for key, value in res.items() 

1522 } 

1523 

1524 elif isinstance(res, list): 

1525 return [ResourceEditor._dictify_resourcefield(res=x) for x in res] 

1526 

1527 return res 

1528 

1529 @staticmethod 

1530 def _create_backup(original: dict[Any, Any], patch: dict[Any, Any]) -> dict[Any, Any]: 

1531 """ 

1532 Args: 

1533 original (dict*): source of values to back up if necessary 

1534 patch (dict*): 'new' values; keys needn't necessarily all be 

1535 contained in original 

1536 

1537 Returns a dict containing the fields in original that are different 

1538 from update. Performs the 

1539 

1540 Places None for fields in update that don't appear in 

1541 original (because that's how the API knows to remove those fields from 

1542 the yaml). 

1543 

1544 * the first call will be with both of these arguments as dicts but 

1545 this will not necessarily be the case during recursion""" 

1546 

1547 # when both are dicts, get the diff (recursively if need be) 

1548 if isinstance(original, dict) and isinstance(patch, dict): 

1549 diff_dict: dict[Any, Any] = {} 

1550 for key, value in patch.items(): 

1551 if key not in original: 

1552 diff_dict[key] = None 

1553 continue 

1554 

1555 # recursive call 

1556 key_diff = ResourceEditor._create_backup(original=original[key], patch=value) 

1557 

1558 if key_diff is not None: 

1559 diff_dict[key] = key_diff 

1560 

1561 return diff_dict 

1562 

1563 # for one or more non-dict values, just compare them 

1564 if patch != original: 

1565 return original 

1566 else: 

1567 # this return value will be received by key_diff above 

1568 return None 

1569 

1570 @staticmethod 

1571 def _apply_patches(patches: dict[Any, Any], action_text: str, action: str) -> None: 

1572 """ 

1573 Updates provided Resource objects with provided yaml patches 

1574 

1575 Args: 

1576 patches (dict): {<Resource object>: <yaml patch as dict>} 

1577 action_text (str): 

1578 "ResourceEdit <action_text> for resource <resource name>" 

1579 will be printed for each resource; see below 

1580 """ 

1581 

1582 for resource, patch in patches.items(): 

1583 LOGGER.info(f"ResourceEdits: {action_text} data for resource {resource.kind} {resource.name}") 

1584 

1585 # add name to patch 

1586 if "metadata" not in patch: 

1587 patch["metadata"] = {} 

1588 

1589 # the api requires this field to be present in a yaml patch for 

1590 # some resource kinds even if it is not changed 

1591 if "name" not in patch["metadata"]: 

1592 patch["metadata"]["name"] = resource.name 

1593 

1594 if action == "update": 

1595 resource.update(resource_dict=patch) # update the resource 

1596 

1597 if action == "replace": 

1598 if "metadata" not in patch: 

1599 patch["metadata"] = {} 

1600 

1601 patch["metadata"]["name"] = resource.name 

1602 patch["metadata"]["namespace"] = resource.namespace 

1603 patch["metadata"]["resourceVersion"] = resource.instance.metadata.resourceVersion 

1604 patch["kind"] = resource.kind 

1605 patch["apiVersion"] = resource.api_version 

1606 

1607 resource.update_replace(resource_dict=patch) # replace the resource metadata 

1608 

1609 def _apply_patches_sampler(self, patches: dict[Any, Any], action_text: str, action: str) -> ResourceInstance: 

1610 exceptions_dict: dict[type[Exception], list[str]] = {ConflictError: []} 

1611 exceptions_dict.update(DEFAULT_CLUSTER_RETRY_EXCEPTIONS) 

1612 return Resource.retry_cluster_exceptions( 

1613 func=self._apply_patches, 

1614 exceptions_dict=exceptions_dict, 

1615 patches=patches, 

1616 action_text=action_text, 

1617 action=action, 

1618 timeout=TIMEOUT_30SEC, 

1619 sleep_time=TIMEOUT_5SEC, 

1620 )