generated from jacobtomlinson/python-container-action
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
248 lines (174 loc) · 7.48 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
import os
import traceback
import shutil
from pathlib import Path
import json
from typing import List
logs = []
# get the environment variables
library_repo_path = os.environ["INPUT_LIBRARY_REPO_PATH"] # "library"
path_to_docs = os.environ["INPUT_DOCS_PATH"] # "docs"
nav_replacement_placeholder = os.environ["INPUT_REPLACEMENT_PLACEHOLDER"] # "\n - 'ConnectorsGetInsertedHere': ''"
readme_destination = os.environ["INPUT_README_DEST"] # "docs/docs/library_readmes/connectors"
class File:
name = ''
name = ''
path = ''
full_path = ''
def __init__(self, name, path):
self.name = name
self.path = path
self.full_path = "{}/{}".format(self.path, self.name)
pass
class LibrayJsonFile(File):
readme_path = ''
json = ''
def get_files(source_dir, search_pattern) -> List[File]:
found_files = []
for path in Path(source_dir).rglob(search_pattern):
file = File(path.name, path.parent)
found_files.append(file)
return found_files
def json_has_tag(dict_var, tag, value):
for k, v in dict_var.items():
if k.lower() == tag.lower() and (value == "" or value in v):
yield v
elif isinstance(v, dict):
for id_val in json_has_tag(v, tag, value):
yield id_val
def get_files_for_tag(json_data, folder_path: str, tag: str, tag_value: str, search_pattern: str):
files = []
for _ in json_has_tag(json_data, tag, tag_value):
files = get_files(folder_path, search_pattern)
return files
def has_tag(json_data, tag: str, tag_value: str):
for _ in json_has_tag(json_data, tag, tag_value):
return True
return False
def load_json_file(path):
if not os.path.exists(path):
raise Exception("File {} not found".format(path))
f = open(path, "r")
contents = f.read()
return json.loads(contents)
def replace_chr(value):
exclude_list = "\\/*?. "
found = []
for i, v in enumerate(value):
if v not in exclude_list:
found.append(v)
return "".join(found)
def copy_files(files, target_dir):
if not os.path.exists(target_dir + "/"):
log(f"copy_files:: {target_dir} does not exist. Creating..")
os.makedirs(os.path.dirname(target_dir + "/"), exist_ok=True)
for file in files:
new_filename = replace_chr(file.json["name"]) + ".md"
dest = "{}/{}".format(target_dir, new_filename)
if os.path.exists(dest):
os.remove(dest)
shutil.copy2(file.readme_path, dest)
file.readme_docs_path = dest
return files
def get_library_item_with_tag(files: List[File], tag: str, tag_value: str) -> List[LibrayJsonFile]:
found: List[LibrayJsonFile] = []
for file in files:
json_data = load_json_file(file.full_path)
for _ in json_has_tag(json_data, tag, tag_value):
f = LibrayJsonFile(file.name, file.path)
f.json = json_data
found.append(f)
return found
def get_named_files_associated_with_library_file(library_files: List[LibrayJsonFile], name: str, search_pattern: str) -> List[LibrayJsonFile]:
for libraryFile in library_files:
files = get_files(libraryFile.path, search_pattern)
for file in files:
if str(file.name).lower() == name:
libraryFile.readme_path = file.full_path
return library_files
def get_item_by_tag(library_files: List[LibrayJsonFile], tag, tag_value):
sources = []
for t in library_files:
if has_tag(t.json["tags"], tag, tag_value):
sources.append(t)
return sources
def build_nav_dict(library_files: List[LibrayJsonFile]):
nav = {}
for library_files in library_files:
lib_id = library_files.json["libraryItemId"]
nav[lib_id] = {
"name": library_files.json["name"],
"readme": library_files.readme_docs_path
}
return nav
def build_nav(nav_dict, section_title):
nav_replacement_lines = []
nav_title_indentation = 6 # spaces
spaces = ""
spaces += ' ' * nav_title_indentation
log(f"Adding nav entries for '{section_title}'")
line = f"{spaces}- '{section_title}':"
log(line)
nav_replacement_lines.append(line)
for n in nav_dict:
path_to_readme = nav_dict[n]["readme"].replace("docs/", "")
line = f"{spaces} - '{nav_dict[n]['name']}': '{path_to_readme}'"
log(line)
nav_replacement_lines.append(line)
return nav_replacement_lines
def gen_nav_replacement(tech_readmes, section_title, tag, tag_value):
tagged_items = get_item_by_tag(tech_readmes, tag, tag_value)
nav_dict = build_nav_dict(tagged_items)
return build_nav(nav_dict, section_title)
def update_nav(nav_file_path, find_text, replacement_text):
with open(nav_file_path, 'r') as file:
file_data = file.read()
file_data = file_data.replace(find_text, replacement_text)
with open(nav_file_path, 'w') as file:
file.write(file_data)
def set_action_output(name: str, value: str):
with open(os.environ["GITHUB_OUTPUT"], "a") as fh:
fh.write(f"{name}={value}\n")
def log(message):
logs.append(message)
def main():
try:
library_file_dictionary = get_files(library_repo_path, 'library.json')
# filter library files down to specific tag and value
tech_connector_library_files = get_library_item_with_tag(library_file_dictionary, "type", "Tech connectors")
# get readme's for those library items, filtering on tag and value
tech_readmes = get_named_files_associated_with_library_file(tech_connector_library_files, "readme.md", "*.md")
tech_readmes = copy_files(tech_readmes, readme_destination)
# generate the nav replacements
nav_replacement = []
sources_nav_replacement = gen_nav_replacement(tech_readmes, "Sources", "Pipeline Stage", "Source")
destinations_nav_replacement = gen_nav_replacement(tech_readmes, "Destinations", "Pipeline Stage", "Destination")
# to have the technologies category or any other category that might repeat readmes
# (or use readmes for a second or 3rd time)
# we'd have to determine the categories first, then find the readme to go with it,
# we'd also have to copy the file and give it a unique name
# otherwise mkdocs will select the last nav item to link to that md file rather than the nav item clicked
# technologies_nav_replacement = gen_nav_replacement(tech_readmes, "Technologies", "Technology", "")
# add them to the nav array
nav_replacement.extend(sources_nav_replacement)
nav_replacement.extend(destinations_nav_replacement)
# nav_replacement.extend(technologies_nav_replacement)
# log(f"Nav replacement built\n [{nav_replacement}]")
# get the nav file
nav_files = get_files(path_to_docs, 'mkdocs.yml')
if len(nav_files) == 0:
log("mkdocs.yml not found")
raise Exception(f"mkdocs.yml not found in {path_to_docs}")
# log(f"Updating nav file: {nav_files[0].full_path}")
log(f"Yaml file path: {nav_files[0].full_path}")
# join with new line
n = "\n".join(nav_replacement)
log(f"Nav replacement string: {n}")
update_nav(nav_files[0].full_path, nav_replacement_placeholder, "\n".join(nav_replacement))
except Exception as e:
print(f"Error: {traceback.print_exc()}")
log(f"Error: {traceback.print_exc()}")
finally:
set_action_output("logs", logs)
if __name__ == "__main__":
main()