Refactor to parse multiple files from the directory

Signed-off-by: Thomas A. Christensen II <25492070+MillironX@users.noreply.github.com>
This commit is contained in:
Thomas A. Christensen II 2022-01-10 16:13:30 -06:00
parent ab23cd2e17
commit ccdb61ac27
Signed by: millironx
GPG key ID: 139C07724802BC5D

View file

@ -1,4 +1,5 @@
#!/usr/bin/env python
import os
import yaml
from docutils import nodes
from docutils.parsers.rst import Directive
@ -53,79 +54,84 @@ class NFDocs(Directive):
def run(self):
# Take path as single argument for now
nextflow_path = self.arguments[0]
with open(nextflow_path) as nextflow_file:
print(nextflow_path)
# Split by lines
nextflow_lines = nextflow_file.readlines()
# Create dictionaries for each of the block types
docstrings = {
"process": {},
"workflow": {},
"function": {}
}
# Declare some variables to keep track of where the docstrings begin and end
doc_start = 0
doc_end = 0
# Create any array to return from the plugin
return_nodes = []
# Declare dictionaries to keep track of the docstrings
docstring_positions = []
for root, dirs, files in os.walk(nextflow_path):
for f in files:
if f.endswith(".nf"):
with open(os.path.join(root,f)) as nextflow_file:
# Calculate the start and end positions of each docstring
for i, line in enumerate(nextflow_lines):
# Check if this is a docstring
if line.startswith(self.DOC_STARTER):
# It is: check the next and previous lines to see if this is part of a block
line_previous = nextflow_lines[i-1]
line_next = nextflow_lines[i+1]
if not line_previous.startswith(self.DOC_STARTER):
doc_start = i
if not line_next.startswith(self.DOC_STARTER):
doc_end = i
# Split by lines
nextflow_lines = nextflow_file.readlines()
# Check if we've reached the end of a docstring block
if doc_end == i:
# Add this docstring position to the array
docstring_positions.append(range(doc_start, doc_end+1))
# Declare some variables to keep track of where the docstrings begin and end
doc_start = 0
doc_end = 0
# Create dictionaries for each of the block types
docstrings = {
"process": {},
"workflow": {},
"function": {}
}
# Declare dictionaries to keep track of the docstrings
docstring_positions = []
# Parse out the docstrings and put them in the appropriate dictionary
for pos in docstring_positions:
proc_name, proc_type = self.definition_type(nextflow_lines[pos[-1]+1])
doc_yaml = ""
for i in pos:
doc_yaml = doc_yaml + nextflow_lines[i].replace(self.DOC_STARTER, "")
docstrings[proc_type][proc_name] = yaml.safe_load(doc_yaml)
# Calculate the start and end positions of each docstring
for i, line in enumerate(nextflow_lines):
# Check if this is a docstring
if line.startswith(self.DOC_STARTER):
# It is: check the next and previous lines to see if this is part of a block
line_previous = nextflow_lines[i-1]
line_next = nextflow_lines[i+1]
if not line_previous.startswith(self.DOC_STARTER):
doc_start = i
if not line_next.startswith(self.DOC_STARTER):
doc_end = i
# Create any array to return from the plugin
return_nodes = []
# Check if we've reached the end of a docstring block
if doc_end == i:
# Add this docstring position to the array
docstring_positions.append(range(doc_start, doc_end+1))
# Try to convert each definition to a node
for block_type, block_docs in docstrings.items():
block_section = nodes.section()
block_section += nodes.title(text=block_type)
for proc_name, proc_docs in block_docs.items():
proc_section = nodes.section()
proc_section += nodes.title(text=proc_name)
proc_section += nodes.paragraph(text=proc_docs["summary"])
io_methods = ["input", "output"]
for met in io_methods:
if met in proc_docs.keys():
io_section = nodes.section()
io_section += nodes.title(text=met)
io_list = nodes.bullet_list()
for io in proc_docs[met]:
io_list += self.params_to_list(io)
io_section += io_list
proc_section += io_section
self.state_machine.document.note_implicit_target(io_section)
self.state_machine.document.note_implicit_target(proc_section)
block_section += proc_section
# Parse out the docstrings and put them in the appropriate dictionary
for pos in docstring_positions:
proc_name, proc_type = self.definition_type(nextflow_lines[pos[-1]+1])
doc_yaml = ""
for i in pos:
doc_yaml = doc_yaml + nextflow_lines[i].replace(self.DOC_STARTER, "")
docstrings[proc_type][proc_name] = yaml.safe_load(doc_yaml)
self.state_machine.document.note_implicit_target(block_section)
return_nodes.append(block_section)
# Try to convert each definition to a node
for block_type, block_docs in docstrings.items():
block_section = nodes.section()
block_section += nodes.title(text=block_type)
for proc_name, proc_docs in block_docs.items():
proc_section = nodes.section()
proc_section += nodes.title(text=proc_name)
proc_section += nodes.paragraph(text=proc_docs["summary"])
io_methods = ["input", "output"]
for met in io_methods:
if met in proc_docs.keys():
io_section = nodes.section()
io_section += nodes.title(text=met)
io_list = nodes.bullet_list()
for io in proc_docs[met]:
io_list += self.params_to_list(io)
io_section += io_list
proc_section += io_section
self.state_machine.document.note_implicit_target(io_section)
self.state_machine.document.note_implicit_target(proc_section)
block_section += proc_section
return return_nodes
self.state_machine.document.note_implicit_target(block_section)
return_nodes.append(block_section)
return return_nodes
def setup(app):
app.add_directive('nfdocs', NFDocs)