Coverage for class_generator/class_generator.py: 0%
397 statements
« prev ^ index » next coverage.py v7.6.10, created at 2025-02-12 18:08 +0200
« prev ^ index » next coverage.py v7.6.10, created at 2025-02-12 18:08 +0200
1from __future__ import annotations
3import filecmp
4import json
5import shlex
6import os
7import sys
8import requests
9from pathlib import Path
10from packaging.version import Version
11import shutil
12from tempfile import gettempdir
14import textwrap
15from typing import Any
16import click
17import re
18from concurrent.futures import Future, ThreadPoolExecutor, as_completed
19import cloup
20from cloup.constraints import If, IsSet, accept_none, require_one
21from pyhelper_utils.shell import run_command
22import pytest
23from rich.console import Console
24from rich.syntax import Syntax
26from ocp_resources.resource import Resource
28from jinja2 import DebugUndefined, Environment, FileSystemLoader, meta
29from simple_logger.logger import get_logger
32SPEC_STR: str = "SPEC"
33FIELDS_STR: str = "FIELDS"
34LOGGER = get_logger(name="class_generator")
35TESTS_MANIFESTS_DIR: str = "class_generator/tests/manifests"
36SCHEMA_DIR: str = "class_generator/schema"
37RESOURCES_MAPPING_FILE: str = os.path.join(SCHEMA_DIR, "__resources-mappings.json")
38MISSING_DESCRIPTION_STR: str = "No field description from API; please add description"
41def _is_kind_and_namespaced(
42 client: str, _key: str, _data: dict[str, Any], kind: str, group: str, version: str
43) -> dict[str, Any]:
44 _group_and_version = f"{group}/{version}" if group else version
45 not_resource_dict = {"is_kind": False, "kind": _key}
47 # if explain command failed, this is not a resource
48 if not run_command(command=shlex.split(f"{client} explain {kind}"), check=False, log_errors=False)[0]:
49 return not_resource_dict
51 api_resources_base_cmd = f"bash -c '{client} api-resources"
53 # check if this as a valid version for the resource.
54 if run_command(
55 command=shlex.split(f"{api_resources_base_cmd} | grep -w {kind} | grep {_group_and_version}'"),
56 check=False,
57 log_errors=False,
58 )[0]:
59 # Check if the resource if namespaced.
60 _data["namespaced"] = (
61 run_command(
62 command=shlex.split(
63 f"{api_resources_base_cmd} --namespaced | grep -w {kind} | grep {_group_and_version} | wc -l'"
64 ),
65 check=False,
66 log_errors=False,
67 )[1].strip()
68 == "1"
69 )
70 return {"is_kind": True, "kind": _key, "data": _data}
72 return not_resource_dict
75def map_kind_to_namespaced(client: str, newer_cluster_version: bool, schema_definition_file: Path) -> None:
76 not_kind_file: str = os.path.join(SCHEMA_DIR, "__not-kind.txt")
78 resources_mapping = read_resources_mapping_file()
80 if os.path.isfile(not_kind_file):
81 with open(not_kind_file) as fd:
82 not_kind_list = fd.read().split("\n")
83 else:
84 not_kind_list = []
86 with open(schema_definition_file) as fd:
87 _definitions_json_data = json.load(fd)
89 _kind_data_futures: list[Future] = []
90 with ThreadPoolExecutor() as executor:
91 for _key, _data in _definitions_json_data["definitions"].items():
92 if not _data.get("x-kubernetes-group-version-kind"):
93 continue
95 if _key in not_kind_list:
96 continue
98 x_kubernetes_group_version_kind = extract_group_kind_version(_kind_schema=_data)
99 _kind = x_kubernetes_group_version_kind["kind"]
100 _group = x_kubernetes_group_version_kind.get("group", "")
101 _version = x_kubernetes_group_version_kind.get("version", "")
103 # Do not add the resource if it is already in the mapping and the cluster version is not newer than the last
104 if resources_mapping.get(_kind.lower()) and not newer_cluster_version:
105 continue
107 _kind_data_futures.append(
108 executor.submit(
109 _is_kind_and_namespaced,
110 client=client,
111 _key=_key,
112 _data=_data,
113 kind=_kind,
114 group=_group,
115 version=_version,
116 )
117 )
119 _temp_resources_mappings: dict[Any, Any] = {}
120 for res in as_completed(_kind_data_futures):
121 _res = res.result()
122 # _res["kind"] is group.version.kind, set only kind as key in the final dict
123 kind_key = _res["kind"].rsplit(".", 1)[-1].lower()
125 if _res["is_kind"]:
126 _temp_resources_mappings.setdefault(kind_key, []).append(_res["data"])
127 else:
128 not_kind_list.append(_res["kind"])
130 # Update the resources mapping dict with the one that we filled to avoid duplication in the lists
131 resources_mapping.update(_temp_resources_mappings)
133 with open(RESOURCES_MAPPING_FILE, "w") as fd:
134 json.dump(resources_mapping, fd, indent=4)
136 with open(not_kind_file, "w") as fd:
137 fd.writelines("\n".join(not_kind_list))
140def read_resources_mapping_file() -> dict[Any, Any]:
141 try:
142 with open(RESOURCES_MAPPING_FILE) as fd:
143 return json.load(fd)
144 except (FileNotFoundError, json.JSONDecodeError):
145 return {}
148def get_server_version(client: str):
149 rc, out, _ = run_command(command=shlex.split(f"{client} version -o json"), check=False)
150 if not rc:
151 LOGGER.error("Failed to get server version")
152 sys.exit(1)
154 json_out = json.loads(out)
155 return json_out["serverVersion"]["gitVersion"]
158def get_client_binary() -> str:
159 if os.system("which oc") == 0:
160 return "oc"
162 elif os.system("which kubectl") == 0:
163 return "kubectl"
164 else:
165 LOGGER.error("Failed to find oc or kubectl")
166 sys.exit(1)
169def update_kind_schema():
170 openapi2jsonschema_str: str = "openapi2jsonschema"
171 client = get_client_binary()
173 if not run_command(command=shlex.split("which openapi2jsonschema"), check=False, log_errors=False)[0]:
174 LOGGER.error(
175 f"{openapi2jsonschema_str} not found. Install it using `pipx install --python python3.9 openapi2jsonschema`"
176 )
177 sys.exit(1)
179 rc, token, _ = run_command(command=shlex.split(f"{client} whoami -t"), check=False, log_errors=False)
180 if not rc:
181 LOGGER.error(
182 f"Failed to get token.\nMake sure you are logged in to the cluster using user and password using `{client} login`"
183 )
184 sys.exit(1)
186 api_url = run_command(command=shlex.split(f"{client} whoami --show-server"), check=False, log_errors=False)[
187 1
188 ].strip()
189 data = requests.get(f"{api_url}/openapi/v2", headers={"Authorization": f"Bearer {token.strip()}"}, verify=False)
191 if not data.ok:
192 LOGGER.error("Failed to get openapi schema.")
193 sys.exit(1)
195 cluster_version_file = Path("class_generator/__cluster_version__.txt")
196 last_cluster_version_generated: str = ""
197 try:
198 with open(cluster_version_file, "r") as fd:
199 last_cluster_version_generated = fd.read().strip()
200 except (FileNotFoundError, IOError) as exp:
201 LOGGER.error(f"Failed to read cluster version file: {exp}")
202 sys.exit(1)
204 cluster_version = get_server_version(client=client)
205 cluster_version = cluster_version.split("+")[0]
206 ocp_openapi_json_file = Path(gettempdir()) / f"__k8s-openapi-{cluster_version}__.json"
208 newer_version: bool = Version(cluster_version) > Version(last_cluster_version_generated)
210 if newer_version:
211 with open(cluster_version_file, "w") as fd:
212 fd.write(cluster_version)
214 with open(ocp_openapi_json_file, "w") as fd:
215 fd.write(data.text)
217 tmp_schema_dir = Path(gettempdir()) / f"{SCHEMA_DIR}-{cluster_version}"
219 if not run_command(command=shlex.split(f"{openapi2jsonschema_str} {ocp_openapi_json_file} -o {tmp_schema_dir}"))[0]:
220 LOGGER.error("Failed to generate schema.")
221 sys.exit(1)
223 if newer_version:
224 # copy all files from tmp_schema_dir to schema dir
225 shutil.copytree(src=tmp_schema_dir, dst=SCHEMA_DIR, dirs_exist_ok=True)
227 else:
228 # Copy only new files from tmp_schema_dir to schema dir
229 for root, _, files in os.walk(tmp_schema_dir):
230 for file_ in files:
231 dst_file = Path(SCHEMA_DIR) / file_
232 try:
233 if not os.path.isfile(dst_file):
234 shutil.copy(src=Path(root) / file_, dst=dst_file)
235 except (OSError, IOError) as exp:
236 LOGGER.error(f"Failed to copy file {file_}: {exp}")
237 sys.exit(1)
239 map_kind_to_namespaced(
240 client=client, newer_cluster_version=newer_version, schema_definition_file=ocp_openapi_json_file
241 )
244def convert_camel_case_to_snake_case(string_: str) -> str:
245 """
246 Converts a camel case string to snake case.
248 Args:
249 string_ (str): The camel case string to convert.
251 Returns:
252 str: The snake case representation of the input string.
254 Examples:
255 >>> convert_camel_case_to_snake_case(string_="allocateLoadBalancerNodePorts")
256 'allocate_load_balancer_node_ports'
257 >>> convert_camel_case_to_snake_case(string_="clusterIPs")
258 'cluster_ips'
259 >>> convert_camel_case_to_snake_case(string_="additionalCORSAllowedOS")
260 'additional_cors_allowed_os'
262 Notes:
263 - This function assumes that the input string adheres to camel case conventions.
264 - If the input string contains acronyms (e.g., "XMLHttpRequest"), they will be treated as separate words
265 (e.g., "xml_http_request").
266 - The function handles both single-word camel case strings (e.g., "Service") and multi-word camel case strings
267 (e.g., "myCamelCaseString").
268 """
269 do_not_proccess_list = ["OAuth", "KubeVirt"]
270 # If the input string is in the do_not_proccess_list, return it as it is.
271 if string_.lower() in [_str.lower() for _str in do_not_proccess_list]:
272 return string_.lower()
274 formatted_str: str = ""
276 if string_.islower():
277 return string_
279 # For single words, e.g "Service" or "SERVICE"
280 if string_.istitle() or string_.isupper():
281 return string_.lower()
283 # To decide if underscore is needed before a char, keep the last char format.
284 # If previous char is uppercase, underscode should not be added. Also applied for the first char in the string.
285 last_capital_char: bool | None = None
287 # To decide if there are additional words ahead; if found, there is at least one more word ahead, else this is the
288 # last word. Underscore should be added before it and all chars from here should be lowercase.
289 following_capital_chars: re.Match | None = None
291 str_len_for_idx_check = len(string_) - 1
293 for idx, char in enumerate(string_):
294 # If lower case, append to formatted string
295 if char.islower():
296 formatted_str += char
297 last_capital_char = False
299 # If first char is uppercase
300 elif idx == 0:
301 formatted_str += char.lower()
302 last_capital_char = True
304 else:
305 if idx < str_len_for_idx_check:
306 following_capital_chars = re.search(r"[A-Z]", "".join(string_[idx + 1 :]))
307 if last_capital_char:
308 if idx < str_len_for_idx_check and string_[idx + 1].islower():
309 if following_capital_chars:
310 formatted_str += f"_{char.lower()}"
311 last_capital_char = True
312 continue
314 remaining_str = "".join(string_[idx:])
315 # The 2 letters in the string; uppercase char followed by lowercase char.
316 # Example: `clusterIPs`, handle `Ps` at this point
317 if idx + 1 == str_len_for_idx_check:
318 formatted_str += remaining_str.lower()
319 break
321 # The last word in the string; uppercase followed by multiple lowercase chars
322 # Example: `dataVolumeTTLSeconds`, handle `Seconds` at this point
323 elif remaining_str.istitle():
324 formatted_str += f"_{remaining_str.lower()}"
325 break
327 else:
328 formatted_str += char.lower()
329 last_capital_char = True
331 else:
332 formatted_str += char.lower()
333 last_capital_char = True
335 else:
336 formatted_str += f"_{char.lower()}"
337 last_capital_char = True
339 return formatted_str
342def render_jinja_template(template_dict: dict[Any, Any], template_dir: str, template_name: str) -> str:
343 env = Environment(
344 loader=FileSystemLoader(template_dir),
345 trim_blocks=True,
346 lstrip_blocks=True,
347 undefined=DebugUndefined,
348 )
350 template = env.get_template(name=template_name)
351 rendered = template.render(template_dict)
352 undefined_variables = meta.find_undeclared_variables(env.parse(rendered))
354 if undefined_variables:
355 LOGGER.error(f"The following variables are undefined: {undefined_variables}")
356 sys.exit(1)
358 return rendered
361def parse_user_code_from_file(file_path: str) -> tuple[str, str]:
362 with open(file_path) as fd:
363 data = fd.read()
365 end_of_generated_code_line = " # End of generated code"
366 user_code: str = ""
367 user_imports: str = ""
369 if end_of_generated_code_line in data:
370 _end_of_generated_code_index = data.index(end_of_generated_code_line)
371 user_code = data[_end_of_generated_code_index + len(end_of_generated_code_line) :]
373 for _line in data.splitlines():
374 if _line.startswith("import") or _line.startswith("from"):
375 user_imports += f"{_line}\n"
377 return user_code, user_imports
380def generate_resource_file_from_dict(
381 resource_dict: dict[str, Any],
382 overwrite: bool = False,
383 dry_run: bool = False,
384 output_file: str = "",
385 add_tests: bool = False,
386 output_file_suffix: str = "",
387 output_dir: str = "",
388) -> tuple[str, str]:
389 base_dir = output_dir or "ocp_resources"
390 if not os.path.exists(base_dir):
391 os.makedirs(base_dir)
393 rendered = render_jinja_template(
394 template_dict=resource_dict,
395 template_dir="class_generator/manifests",
396 template_name="class_generator_template.j2",
397 )
399 output = "# Generated using https://github.com/RedHatQE/openshift-python-wrapper/blob/main/scripts/resource/README.md\n\nfrom __future__ import annotations\n"
400 formatted_kind_str = convert_camel_case_to_snake_case(string_=resource_dict["kind"])
401 _file_suffix: str = f"{'_' + output_file_suffix if output_file_suffix else ''}"
403 if add_tests:
404 overwrite = True
405 tests_path = os.path.join(TESTS_MANIFESTS_DIR, resource_dict["kind"])
406 if not os.path.exists(tests_path):
407 os.makedirs(tests_path)
409 _output_file = os.path.join(tests_path, f"{formatted_kind_str}{_file_suffix}.py")
411 elif output_file:
412 _output_file = output_file
414 else:
415 _output_file = os.path.join(base_dir, f"{formatted_kind_str}{_file_suffix}.py")
417 _output_file_exists: bool = os.path.exists(_output_file)
418 _user_code: str = ""
419 _user_imports: str = ""
421 if _output_file_exists and not add_tests:
422 _user_code, _user_imports = parse_user_code_from_file(file_path=_output_file)
424 orig_filename = _output_file
425 if _output_file_exists:
426 if overwrite:
427 LOGGER.warning(f"Overwriting {_output_file}")
429 else:
430 temp_output_file = _output_file.replace(".py", "_TEMP.py")
431 LOGGER.warning(f"{_output_file} already exists, using {temp_output_file}")
432 _output_file = temp_output_file
434 if _user_code or _user_imports:
435 output += f"{_user_imports}{rendered}{_user_code}"
436 else:
437 output += rendered
439 if dry_run:
440 _code = Syntax(code=output, lexer="python", line_numbers=True)
441 Console().print(_code)
443 else:
444 write_and_format_rendered(filepath=_output_file, output=output)
446 return orig_filename, _output_file
449def types_generator(key_dict: dict[str, Any]) -> dict[str, str]:
450 type_for_docstring: str = "Any"
451 type_from_dict_for_init: str = ""
452 # A resource field may be defined with `x-kubernetes-preserve-unknown-fields`. In this case, `type` is not provided.
453 resource_type = key_dict.get("type")
455 # All fields must be set with Optional since resource can have yaml_file to cover all args.
456 if resource_type == "array":
457 type_for_docstring = "list[Any]"
459 elif resource_type == "string":
460 type_for_docstring = "str"
461 type_from_dict_for_init = f"{type_for_docstring} | None = None"
463 elif resource_type == "boolean":
464 type_for_docstring = "bool"
466 elif resource_type == "integer":
467 type_for_docstring = "int"
469 elif resource_type == "object":
470 type_for_docstring = "dict[str, Any]"
472 if not type_from_dict_for_init:
473 type_from_dict_for_init = f"{type_for_docstring} | None = None"
475 return {"type-for-init": type_from_dict_for_init, "type-for-doc": type_for_docstring}
478def get_property_schema(property_: dict[str, Any]) -> dict[str, Any]:
479 if _ref := property_.get("$ref"):
480 with open(f"{SCHEMA_DIR}/{_ref.rsplit('.')[-1].lower()}.json") as fd:
481 return json.load(fd)
482 return property_
485def format_description(description: str) -> str:
486 _res = ""
487 _text = textwrap.wrap(text=description, subsequent_indent=" ")
488 for _txt in _text:
489 _res += f"{_txt}\n"
491 return _res
494def prepare_property_dict(
495 schema: dict[str, Any],
496 required: list[str],
497 resource_dict: dict[str, Any],
498 dict_key: str,
499) -> dict[str, Any]:
500 keys_to_ignore: list[str] = ["kind", "apiVersion", "status", SPEC_STR.lower()]
501 keys_to_rename: set[str] = {"annotations", "labels"}
502 if dict_key != SPEC_STR.lower():
503 keys_to_ignore.append("metadata")
505 for key, val in schema.items():
506 if key in keys_to_ignore:
507 continue
509 val_schema = get_property_schema(property_=val)
510 type_dict = types_generator(key_dict=val_schema)
511 python_name = convert_camel_case_to_snake_case(string_=f"{dict_key}_{key}" if key in keys_to_rename else key)
512 resource_dict[dict_key].append({
513 "name-for-class-arg": python_name,
514 "property-name": key,
515 "required": key in required,
516 "description": format_description(description=val_schema.get("description", MISSING_DESCRIPTION_STR)),
517 "type-for-docstring": type_dict["type-for-doc"],
518 "type-for-class-arg": f"{python_name}: {type_dict['type-for-init']}",
519 })
521 return resource_dict
524def parse_explain(
525 kind: str,
526) -> list[dict[str, Any]]:
527 _schema_definition = read_resources_mapping_file()
528 _resources: list[dict[str, Any]] = []
530 _kinds_schema = _schema_definition[kind.lower()]
531 for _kind_schema in _kinds_schema:
532 namespaced = _kind_schema["namespaced"]
533 resource_dict: dict[str, Any] = {
534 "base_class": "NamespacedResource" if namespaced else "Resource",
535 "description": _kind_schema.get("description", MISSING_DESCRIPTION_STR),
536 "fields": [],
537 "spec": [],
538 }
540 schema_properties: dict[str, Any] = _kind_schema.get("properties", {})
541 fields_required = _kind_schema.get("required", [])
543 resource_dict.update(extract_group_kind_version(_kind_schema=_kind_schema))
545 if spec_schema := schema_properties.get("spec", {}):
546 spec_schema = get_property_schema(property_=spec_schema)
547 spec_required = spec_schema.get("required", [])
548 resource_dict = prepare_property_dict(
549 schema=spec_schema.get("properties", {}),
550 required=spec_required,
551 resource_dict=resource_dict,
552 dict_key="spec",
553 )
555 resource_dict = prepare_property_dict(
556 schema=schema_properties,
557 required=fields_required,
558 resource_dict=resource_dict,
559 dict_key="fields",
560 )
562 api_group_real_name = resource_dict.get("group")
563 # If API Group is not present in resource, try to get it from VERSION
564 if not api_group_real_name:
565 version_splited = resource_dict["version"].split("/")
566 if len(version_splited) == 2:
567 api_group_real_name = version_splited[0]
569 if api_group_real_name:
570 api_group_for_resource_api_group = api_group_real_name.upper().replace(".", "_").replace("-", "_")
571 resource_dict["group"] = api_group_for_resource_api_group
572 missing_api_group_in_resource: bool = not hasattr(Resource.ApiGroup, api_group_for_resource_api_group)
574 if missing_api_group_in_resource:
575 LOGGER.warning(
576 f"Missing API Group in Resource\n"
577 f"Please add `Resource.ApiGroup.{api_group_for_resource_api_group} = {api_group_real_name}` "
578 "manually into ocp_resources/resource.py under Resource class > ApiGroup class."
579 )
581 else:
582 api_version_for_resource_api_version = resource_dict["version"].upper()
583 missing_api_version_in_resource: bool = not hasattr(
584 Resource.ApiVersion, api_version_for_resource_api_version
585 )
587 if missing_api_version_in_resource:
588 LOGGER.warning(
589 f"Missing API Version in Resource\n"
590 f"Please add `Resource.ApiVersion.{api_version_for_resource_api_version} = {resource_dict['version']}` "
591 "manually into ocp_resources/resource.py under Resource class > ApiGroup class."
592 )
594 _resources.append(resource_dict)
596 return _resources
599def extract_group_kind_version(_kind_schema: dict[str, Any]) -> dict[str, str]:
600 group_kind_versions: list[dict[str, str]] = _kind_schema["x-kubernetes-group-version-kind"]
601 group_kind_version = group_kind_versions[0]
603 for group_kind_version in group_kind_versions:
604 if group_kind_version.get("group"):
605 break
607 return group_kind_version
610def class_generator(
611 kind: str,
612 overwrite: bool = False,
613 dry_run: bool = False,
614 output_file: str = "",
615 output_dir: str = "",
616 add_tests: bool = False,
617 called_from_cli: bool = True,
618) -> list[str]:
619 """
620 Generates a class for a given Kind.
621 """
622 LOGGER.info(f"Generating class for {kind}")
623 kind = kind.lower()
624 kind_and_namespaced_mappings = read_resources_mapping_file().get(kind)
626 if not kind_and_namespaced_mappings:
627 LOGGER.error(f"{kind} not found in {RESOURCES_MAPPING_FILE}, Please run with --update-schema")
628 if called_from_cli:
629 sys.exit(1)
630 else:
631 return []
633 resources = parse_explain(kind=kind)
635 use_output_file_suffix: bool = len(resources) > 1
636 generated_files: list[str] = []
637 for resource_dict in resources:
638 output_file_suffix = resource_dict["group"].lower() if use_output_file_suffix else ""
640 orig_filename, generated_py_file = generate_resource_file_from_dict(
641 resource_dict=resource_dict,
642 overwrite=overwrite,
643 dry_run=dry_run,
644 output_file=output_file,
645 add_tests=add_tests,
646 output_file_suffix=output_file_suffix,
647 output_dir=output_dir,
648 )
650 if not dry_run:
651 run_command(
652 command=shlex.split(f"uvx pre-commit run --files {generated_py_file}"),
653 verify_stderr=False,
654 check=False,
655 )
657 if orig_filename != generated_py_file and filecmp.cmp(orig_filename, generated_py_file):
658 LOGGER.warning(f"File {orig_filename} was not updated, deleting {generated_py_file}")
659 Path.unlink(Path(generated_py_file))
661 generated_files.append(generated_py_file)
663 return generated_files
666def write_and_format_rendered(filepath: str, output: str) -> None:
667 with open(filepath, "w") as fd:
668 fd.write(output)
670 for op in ("format", "check"):
671 run_command(
672 command=shlex.split(f"uvx ruff {op} {filepath}"),
673 verify_stderr=False,
674 check=False,
675 )
678def generate_class_generator_tests() -> None:
679 tests_info: dict[str, list[dict[str, str]]] = {"template": []}
680 dirs_to_ignore: list[str] = ["__pycache__"]
682 for _dir in os.listdir(TESTS_MANIFESTS_DIR):
683 if _dir in dirs_to_ignore:
684 continue
686 dir_path = os.path.join(TESTS_MANIFESTS_DIR, _dir)
687 if os.path.isdir(dir_path):
688 test_data = {"kind": _dir}
690 for _file in os.listdir(dir_path):
691 if _file.endswith("_res.py"):
692 test_data["res_file"] = _file
694 tests_info["template"].append(test_data)
696 rendered = render_jinja_template(
697 template_dict=tests_info,
698 template_dir=TESTS_MANIFESTS_DIR,
699 template_name="test_parse_explain.j2",
700 )
702 write_and_format_rendered(
703 filepath=os.path.join(Path(TESTS_MANIFESTS_DIR).parent, "test_class_generator.py"),
704 output=rendered,
705 )
708@cloup.command("Resource class generator", show_constraints=True)
709@cloup.option(
710 "-k",
711 "--kind",
712 type=click.STRING,
713 help="""
714 \b
715 The Kind to generate the class for, Needs working cluster with admin privileges.
716 multiple kinds can be sent separated by comma (without psaces)
717 Example: -k Deployment,Pod,ConfigMap
718""",
719)
720@cloup.option(
721 "-o",
722 "--output-file",
723 help="The full filename path to generate a python resource file. If not sent, resource kind will be used",
724 type=click.Path(),
725)
726@cloup.option(
727 "--overwrite",
728 is_flag=True,
729 help="Output file overwrite existing file if passed",
730)
731@cloup.option("--dry-run", is_flag=True, help="Run the script without writing to file")
732@cloup.option(
733 "--add-tests",
734 help=f"Add a test to `test_class_generator.py` and test files to `{TESTS_MANIFESTS_DIR}` dir",
735 is_flag=True,
736 show_default=True,
737)
738@cloup.option(
739 "--update-schema",
740 help="Update kind schema files",
741 is_flag=True,
742 show_default=True,
743)
744@cloup.constraint(
745 If("update_schema", then=accept_none),
746 ["add_tests", "dry_run", "kind", "output_file", "overwrite"],
747)
748@cloup.constraint(
749 If(
750 IsSet("add_tests"),
751 then=accept_none,
752 ),
753 ["output_file", "dry_run", "update_schema", "overwrite"],
754)
755@cloup.constraint(require_one, ["kind", "update_schema"])
756def main(
757 kind: str,
758 overwrite: bool,
759 dry_run: bool,
760 output_file: str,
761 add_tests: bool,
762 update_schema: bool,
763) -> None:
764 if update_schema:
765 return update_kind_schema()
767 _kwargs: dict[str, Any] = {
768 "overwrite": overwrite,
769 "dry_run": dry_run,
770 "output_file": output_file,
771 "add_tests": add_tests,
772 }
774 kinds: list[str] = kind.split(",")
775 futures: list[Future] = []
777 with ThreadPoolExecutor() as executor:
778 for _kind in kinds:
779 _kwargs["kind"] = _kind
781 if len(kinds) == 1:
782 class_generator(**_kwargs)
784 else:
785 executor.submit(
786 class_generator,
787 **_kwargs,
788 )
790 for _ in as_completed(futures):
791 # wait for all tasks to complete
792 pass
794 if add_tests:
795 generate_class_generator_tests()
796 pytest.main(["-k", "test_class_generator"])
799if __name__ == "__main__":
800 main()