Skip to content
Snippets Groups Projects
Commit ce437432 authored by Pieter Donker's avatar Pieter Donker
Browse files

RTSD-79, add vhdl_style_fix script in tools dir

parent 2608ded8
Branches
No related tags found
No related merge requests found
#!/usr/bin/python3
import sys
# from collections import OrderedDict
from argparse import ArgumentParser, RawTextHelpFormatter
from textwrap import dedent
# from copy import copy
def main():
with open(args.filename, 'r') as fd:
data_in = fd.read()
sdata = data_in.splitlines()
rts = RemoveTrailingSpaces(sdata)
sdata = rts.get_data()
asao = AddSpacesAroundOperators(sdata)
sdata = asao.get_data()
# asad = AddSpacesAroundDelimiters(sdata)
# sdata = asad.get_data()
csl = CaseStandardLogic(sdata)
sdata = csl.get_data()
cc = CaseConstants(sdata)
sdata = cc.get_data()
ck = CaseKeywords(sdata)
sdata = ck.get_data()
ct = CaseTypes(sdata)
sdata = ct.get_data()
cc = CaseConversions(sdata)
sdata = cc.get_data()
cr = CaseResolutions(sdata)
sdata = cr.get_data()
ca = CaseAttributes(sdata)
sdata = ca.get_data()
rsbb = RemoveSpaceBeforeBracket(sdata)
sdata = rsbb.get_data()
# sdata = apa.get_data()
# apa = AlignProcedureArgs(sdata)
data = '\n'.join(sdata) + "\n"
if data_in != data:
if args.verbose:
print(f"fix {args.filename}")
with open(args.filename, 'w') as fd:
fd.write(data)
class BaseCheck:
def __init__(self, data):
self.splitchars = []
self.data = data
self.n_data = len(self.data)
def set_data(self, data):
self.data = data
self.n_data = len(self.data)
def get_data(self):
return self.data
def check(self):
pass
def splitline(self, line):
# remove comment
code = line.split("--")[0].strip()
words = []
word = ''
for ch in code:
if ch in self.splitchars:
if word:
words.append(word.strip())
word = ''
words.append(ch)
elif ch == '"':
word += ch
elif ch == "'":
word += ch
elif ch.isascii():
word += ch
else:
pass
if word:
words.append(word)
return words
class RemoveTrailingSpaces(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.check()
def check(self):
for i in range(self.n_data):
self.data[i] = self.data[i].rstrip()
class AddSpacesAroundOperators(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.tokens = Operators()
self.check()
def check(self):
for i in range(self.n_data):
if self.data[i].strip().startswith('--'):
continue
code_str = []
comment_str = []
operator = ''
isoperator = False
isstring = False
iscomment = False
for j in range(len(self.data[i])):
ch = self.data[i][j]
if iscomment:
comment_str.append(ch)
continue
if ch in ['"']:
if isoperator:
code_str.append(' ')
operator = ''
isoperator = False
isstring = not isstring
if isstring:
code_str.append(ch)
continue
if ch.isspace():
operator = ''
isoperator = False
elif ch.isidentifier() or ch in [")"]:
if isoperator:
code_str.append(' ')
isoperator = False
operator = ''
elif self.tokens.is_valid(ch):
if isoperator:
operator += ch
else:
isoperator = True
try:
if code_str[-1] not in [" ", "("]:
code_str.append(' ')
except IndexError:
pass
operator += ch
elif isoperator and (ch.isnumeric() or ch == "."):
of = False
for jj in range(j-2, -1, -1):
if self.data[i][jj] in [",", "(", "=", ":"]:
isoperator = False
break
if (self.data[i][jj].isidentifier()
or self.data[i][jj].isnumeric()):
of = True
break
for jj in range(j, len(self.data[i])):
if of or not isoperator:
break
if self.data[i][jj] in [",", ")", ";"]:
isoperator = False
break
if (self.data[i][jj].isspace()
or self.data[i][jj].isidentifier()):
break
if isoperator:
code_str.append(' ')
isoperator = False
else:
operator = ''
elif ch.isascii() and ch != ';':
if isoperator:
code_str.append(' ')
isoperator = False
operator = ''
code_str.append(ch)
if operator == "--":
comment_str = code_str[-3:]
code_str = code_str[:-3]
iscomment = True
operator = ''
code_line = ''.join(code_str).rstrip()
comment_line = ''.join(comment_str).strip()
for str in [" ** ", " **", "** "]:
code_line = code_line.replace(str, "**")
self.data[i] = code_line
if comment_line:
self.data[i] += " " + comment_line
class AddSpacesAroundDelimiters(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.tokens = Delimiters()
self.check()
def check(self):
for i in range(self.n_data):
if self.data[i].strip().startswith('--'):
continue
new_line = []
last_word = ''
isdelimiter = False
iscomment = False
for ch in self.data[i]:
if iscomment:
new_line.append(ch)
continue
if ch.isspace():
last_word = ' '
isdelimiter = False
elif ch.isidentifier():
if isdelimiter:
new_line.append(' ')
isdelimiter = False
last_word += ch
elif self.tokens.is_valid(ch):
if isdelimiter:
last_word += ch
else:
isdelimiter = True
if last_word != ' ':
new_line.append(' ')
last_word += ch
elif ch.isascii() and ch != ';':
if isdelimiter:
new_line.append(' ')
isdelimiter = False
last_word += ch
if last_word == "--":
iscomment = True
new_line.append(ch)
self.data[i] = ''.join(new_line)
class CaseKeywords(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.splitchars = [" ", ".", ";", "(", ")"]
self.reserved_words = ReservedWords()
self.check()
def check(self):
for i in range(self.n_data):
line = self.data[i]
sline = self.splitline(line)
for word in sline:
if self.reserved_words.is_valid(word):
line = line.replace(word, word.lower(), 1)
self.data[i] = line
class CaseTypes(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.splitchars = [" ", ".", ";", "(", ")"]
self.tokens = Types()
self.check()
def check(self):
for i in range(self.n_data):
line = self.data[i]
sline = self.splitline(line)
for word in sline:
if self.tokens.is_valid(word):
line = line.replace(word, word.lower(), 1)
self.data[i] = line
class CaseConversions(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.splitchars = [" ", ".", ";", "(", ")"]
self.tokens = Conversions()
self.check()
def check(self):
for i in range(self.n_data):
line = self.data[i]
sline = self.splitline(line)
for word in sline:
if self.tokens.is_valid(word):
line = line.replace(word, word.lower(), 1)
self.data[i] = line
class CaseResolutions(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.splitchars = [" ", ".", ";", "(", ")"]
self.tokens = Resolutions()
self.check()
def check(self):
for i in range(self.n_data):
line = self.data[i]
sline = self.splitline(line)
for word in sline:
if self.tokens.is_valid(word):
line = line.replace(word, word.lower(), 1)
self.data[i] = line
class CaseAttributes(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.splitchars = [" ", ".", ";", "(", ")", "'"]
self.tokens = Attributes()
self.check()
def check(self):
for i in range(self.n_data):
line = self.data[i]
sline = self.splitline(line)
for j in range(len(sline)):
word = sline[j]
if self.tokens.is_valid(word):
if sline[j-1] == "'":
line = line.replace(word, word.lower(), 1)
self.data[i] = line
class CaseConstants(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.splitchars = [" ", ".", ";", "(", ")", ","]
self.tokens = Constants()
self.check()
def check(self):
for i in range(self.n_data):
line = self.data[i]
sline = self.splitline(line)
for j in range(len(sline)):
word = sline[j]
if self.tokens.is_valid(word):
line = line.replace(word, word.lower(), 1)
self.data[i] = line
class CaseStandardLogic(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.splitchars = [" ", ".", ";", "(", ")", ","]
self.tokens = StandardLogic()
self.check()
def check(self):
for i in range(self.n_data):
line = self.data[i]
sline = self.splitline(line)
for j in range(len(sline)):
word = sline[j]
if self.tokens.is_valid(word):
line = line.replace(word, word.lower(), 1)
self.data[i] = line
class RemoveSpaceBeforeBracket(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.types = Types()
self.check()
def check(self):
for i in range(self.n_data):
for t in self.types.tokens():
self.data[i] = self.data[i].replace(f"{t} (", f"{t}(")
class AlignProcedureArgs(BaseCheck):
def __init__(self, data):
super().__init__(data)
self.check()
def check(self):
proc_span = [0, 0]
pos1 = 0
for i, _ in enumerate(self.data):
line = self.data[i]
if line.strip().startswith('procedure'):
if ":" in line:
proc_span[0] = i
pos1 = line.find('(') + 2
elif proc_span[0] > 0:
line = ' ' * pos1 + line.strip()
if proc_span[0] > 0 and (line.strip().endswith('is') or line.strip().endswith(');')):
proc_span[1] = i
self.data[i] = line
if proc_span[1] > proc_span[0]:
print(proc_span)
type_len = []
name_len = []
dir_len = []
for j in range(proc_span[0], proc_span[1]+1):
sline = self.data[j][pos1:].split()
if len(sline) >= 4:
type_len.append(len(sline[0]))
name_len.append(len(sline[1]))
dir_len.append(len(sline[3]))
else:
print(f"{j}: ERROR1: {self.data[j]} -> {sline}")
sys.stdout.flush()
type_len = max(type_len)
name_len = max(name_len)
dir_len = max(dir_len)
for j in range(proc_span[0], proc_span[1]+1):
line = self.data[j][:pos1]
sline = self.data[j][pos1:].split()
if len(sline) >= 5:
line += f"{sline[0]:{type_len}s} {sline[1]:{name_len}s} {sline[2]} {sline[3]:{dir_len}s} {' '.join(sline[4:])}"
else:
print(f"{j}: ERROR2: {self.data[j]} -> {sline}")
sys.stdout.flush()
self.data[j] = line
proc_span = [0, 0]
class Token:
"""
Base class for tokens
"""
def __init__(self, tokens):
self._tokens = tokens
def tokens(self):
return self._tokens
def is_valid(self, val):
if val.lower() in self._tokens:
return True
return False
class Types(Token):
def __init__(self):
super().__init__(["bit", "bit_vector", "integer", "natural", "positive", "boolean", "string",
"character", "real", "time", "delay_length",
"std_ulogic", "std_ulogic_vector", "std_logic", "std_logic_vector"])
class Conversions(Token):
def __init__(self):
super().__init__(["signed", "unsigned", "to_signed", "to_unsigned", "to_integer", "to_uint", "to_sint",
"to_ureal", "to_sreal", "to_uvec", "to_svec"])
class Resolutions(Token):
def __init__(self):
super().__init__(["'u'", "'x'", "'0'", "'1'", "'z'", "'w'", "'l'", "'h'", "'-'"])
class Constants(Token):
def __init__(self):
super().__init__(["true", "false"])
class StandardLogic(Token):
def __init__(self):
super().__init__(["ieee", "std_logic_1164", "numeric_std", "math_real", "std_logic_textio", "resize"])
class Attributes(Token):
def __init__(self):
super().__init__(["base", "left", "right", "high", "low", "ascending", "image", "value", "pos", "val", "succ",
"pred", "leftof", "rightof", "left", "left", "right", "right", "high", "high", "low", "low",
"range", "range", "reverse_range", "reverse_range", "length", "length", "ascending",
"ascending", "delayed", "stable", "stable", "quiet", "quiet", "transaction", "event",
"active", "last_event", "last_active", "last_value", "driving", "driving_value",
"simple_name", "instance_name", "path_name"])
class Operators(Token):
def __init__(self):
super().__init__(["*", "/", "+", "-", "&", "=", "<", ">", ":"])
class Delimiters(Token):
def __init__(self):
super().__init__([":"])
class ReservedWords(Token):
def __init__(self):
super().__init__(["abs", "access", "after", "alias", "all", "and", "architecture",
"array", "assert", "attribute", "begin", "block", "body", "buffer",
"bus", "case", "component", "configuration", "constant", "disconnect",
"downto", "else", "elsif", "end", "entity", "exit", "file", "for",
"function", "generate", "generic", "group", "guarded", "if", "impure",
"in", "inertial", "inout", "is", "label", "library", "linkage", "literal",
"loop", "map", "mod", "nand", "new", "next", "nor", "not", "null", "of",
"on", "open", "or", "others", "out", "package", "port", "postponed",
"procedure", "process", "pure", "range", "record", "register", "reject",
"rem", "report", "return", "rol", "ror", "select", "severity", "signal",
"shared", "sla", "sll", "sra", "srl", "subtype", "then", "to", "transport",
"type", "unaffected", "units", "until", "use", "variable", "wait", "when",
"while", "with", "xnor", "xor"])
if __name__ == "__main__":
# Parse command line arguments
parser = ArgumentParser(
description="".join(dedent("""\
vhdl style fixer:
* vhdl_style_fix.py filename -h
\n""")),
formatter_class=RawTextHelpFormatter)
parser.add_argument('filename', type=str, help="filename to fix")
parser.add_argument('-v', '--verbose', action='store_true', help="verbose output")
args = parser.parse_args()
main()
#!/usr/bin/python3
from os import getenv
from sys import exit, version_info
from time import sleep
from argparse import ArgumentParser, RawTextHelpFormatter
from textwrap import dedent
from subprocess import Popen, PIPE
from multiprocessing import Process, Queue
from queue import Empty
PYTHON_VERSION = version_info[0]
def run_cmd(cmd):
""" run_cmd()
run 'cmd' in a terminal and return response
cmd: command to execute in terminal
return: 'stdout' or 'Error, stderr' in case of a error
"""
proc = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = proc.communicate()
if _stderr:
stderr = _stderr.decode('utf - 8') if PYTHON_VERSION == 3 else _stderr
print(stderr)
return 'Error, {}'.format(stderr)
stdout = _stdout.decode('utf - 8') if PYTHON_VERSION == 3 else _stdout
return stdout
class Worker(Process):
""" Worker, used for file checking before giving it to vsg to fix.
generated files and files with inside message 'DO NOT EDIT' are skipped.
"""
def __init__(self, id, control, in_data, out_data, verbose=None):
Process.__init__(self)
self.id = id
self.control = control
self.in_data = in_data
self.out_data = out_data
self.verbose = False if verbose is None else verbose
self.stop = False
def is_generated(self, filename):
"""
Look if 'generated' is found in the pathname.
Return the skip message and return True if found, else False
"""
if "generated" in filename:
response = f"skip {filename}: (generated file)"
self.out_data.put(response)
return True
return False
def is_do_not_edit(self, filename):
"""
Look if 'DO NOT EDIT' is found inside the file.
Return the skip message and return True if found, else False
"""
with open(filename, 'r') as fd:
data = fd.read()
if "DO NOT EDIT" in data:
response = f"skip {filename}: (DO NOT EDIT)"
self.out_data.put(response)
return True
return False
def run_fix(self, filename):
"""
Use vhdl_style_fix.py to fix the file
"""
cmd = f"/home/donker/git/hdl/vhdl_style_fix.py {filename}"
if self.verbose:
cmd += " --verbose"
response = run_cmd(cmd)
self.out_data.put(response.strip())
def run(self):
while not self.stop:
try:
if not self.control.empty():
control = self.control.get()
if control == "stop":
self.stop = True
print(f"stop vsg worker {self.id}")
# get next vhd file to process
filename = self.in_data.get(block=False)
# look if generated is part of the full-filename, skip if found
if self.is_generated(filename):
continue
# look if inside the file is a DO NOT EDIT message, skip if found
if self.is_do_not_edit(filename):
continue
# fix the vhd file with vsg
self.run_fix(filename)
except Empty:
sleep(0.001)
def main():
basedir = getenv("HDL_WORK")
print(f"basedir = {basedir}")
# dirs in basedir to check/fix
checkdirs = ["/libraries/base/", "/libraries/dsp/", "/libraries/io/",
"/libraries/technology/", "/boards/", "/applications/"]
# info for the workers, number of workers to use is given by cmdline arg --proc
n_workers = args.proc
workers = []
worker_control = [Queue() for i in range(n_workers)]
process_queue = Queue()
response_queue = Queue()
n_files = 0
n_files_done = 0
# start the workers
for id in range(n_workers):
if args.verbose:
print(f"start vsg worker {id}")
_p = Worker(id, worker_control[id], process_queue, response_queue)
workers.append(_p)
_p.start()
# put all vhd files in the
for checkdir in checkdirs:
dirname = basedir + checkdir
if args.verbose:
print(f"dirname = {dirname}")
cmd = f'find {dirname} -name "*vhd"'
response = run_cmd(cmd)
for filename in response.splitlines():
process_queue.put(filename)
n_files += 1
if args.verbose:
print(f"put {n_files} in the worker queue")
fd = open("vhdl_fix_all_response.txt", 'w')
while True:
if response_queue.empty():
sleep(0.001)
else:
response = response_queue.get()
if response:
fd.write(response + '\n')
fd.flush()
if args.verbose:
print(response)
n_files_done += 1
if n_files == n_files_done:
if args.verbose:
print("All files processed, stop workers now")
for nr in range(n_workers):
worker_control[nr].put("stop")
sleep(0.5)
for worker in workers:
worker.terminate()
break
fd.close()
return 0
if __name__ == "__main__":
# Parse command line arguments
parser = ArgumentParser(
description="".join(dedent("""
run vhdl-style-guide on all found hdl files with settings from vsg_config.yaml:
with --proc the number of vsg workers can be selected, output from this script
is put in 'vsg_fix_all_response.txt' and send to stdout if --verbose is used.
\n""")),
formatter_class=RawTextHelpFormatter)
parser.add_argument('-p', '--proc', type=int, default=4, help="number of processes to use")
parser.add_argument('-v', '--verbose', action='store_true', help="verbose output")
args = parser.parse_args()
exit_code = main()
exit(exit_code)
#!/usr/bin/python3
from collections import OrderedDict
from argparse import ArgumentParser, RawTextHelpFormatter
from textwrap import dedent
from copy import copy
def main():
with open(args.filename, 'r') as fd:
old_data = fd.read()
parse = ParseData(old_data)
parse.parse()
new_data = parse.get_data()
if old_data != new_data:
print(f"fix {args.filename}")
with open(args.filename, 'w') as fd:
fd.write(new_data)
class ParseData:
def __init__(self, data):
self.types = Types()
self.operators = Operators()
self.reserved_words = ReservedWords()
self._comment = False
self._block_begin = ['--'] #, 'entity', 'generic', 'port', 'architecture']
self._block_end = [');', 'end']
self._data = data
self._new_data = []
self._last_ch = ''
self._word = ''
self._operator = ''
self._is_block_word = False
self._blocks = []
self._last_block = 'empty'
self._indent = 0
def get_data(self):
return ''.join(self._new_data)
# handle _word
def add_to_word(self, ch):
self._word += ch
self._is_block_word = self._word in self._block_begin
def get_word(self):
return self._word
def clear_word(self):
self._word = ''
self._is_block_word = False
def is_block_word(self):
return self._is_block_word
# handle _operator
def add_to_operator(self, ch):
self._operator += ch
def get_operator(self):
return self._operator
def clear_operator(self):
self._operator = ''
# handle _blocks info
def add_block(self, block):
self._blocks.append(block)
self._last_block = block
def delete_last_block(self):
self._blocks.pop()
try:
self._last_block = self._blocks[-1]
except IndexError:
self._last_block = 'empty'
def last_block(self):
return self._last_block
# handle new_data
def add_new_data(self, word):
for ch in word:
self._new_data.append(ch)
# parse data
def parse(self):
for ch in self._data:
# '--' is comment
if self._comment is True:
if ch == "\n":
self._comment = False
self.add_new_data(ch)
continue
# if ch == "\n":
# self.add_new_data("\n")
# continue
if self.get_operator() and (ch.isidentifier() or ch.isspace()):
self.add_new_data(self.get_operator())
self.clear_operator()
if ch.isidentifier():
self.add_to_word(ch)
continue
if self.get_word():
if self.types.is_valid(self.get_word()): # check if in types
self.add_new_data(self.get_word().lower())
elif self.operators.is_valid(self.get_word()): # check if in operators
self.add_new_data(self.get_word().lower())
elif self.reserved_words.is_valid(self.get_word()): # check if in reserved words
self.add_new_data(self.get_word().lower())
else:
self.add_new_data(self.get_word())
self.clear_word()
if self.operators.is_valid(ch):
self.add_to_operator(ch)
if self.get_operator() == '--':
self.add_new_data(self.get_operator())
self.clear_operator()
self._comment = True
continue
if self.get_operator():
self.add_new_data(self.get_operator())
self.clear_operator()
self.add_new_data(ch)
if self.is_block_word():
self.add_block(self.get_word())
continue
class Entity:
"""
entity [name] is
generic (
[g_*] : [type] := [val]; -- info
[g_*] : [type] := [val] -- info
);
port (
[port] : [dir] [type] := [val];
[port] : [dir] [type] := [val]
);
end [name];
"""
def __init__(self):
pass
class Types:
def __init__(self):
self.types = ["bit", "bit_vector", "integer", "natural", "positive", "boolean", "string",
"character", "real", "time", "delay_length",
"std_ulogic", "std_ulogic_vector", "std_logic", "std_logic_vector"]
def is_valid(self, val):
if val.lower() in self.types:
return True
return False
class Operators:
def __init__(self):
self.operators = ["**", "abs", "not", "*", "/", "mod", "rem", "+", "-", "+", "-", "&",
"sll", "srl", "sla", "sra", "rol", "ror", "=", "/=", "<", "<=", ">",
">=", "and", "or", "nand", "nor", "xor", "xnor"]
def is_valid(self, val):
if val.lower() in self.operators:
return True
return False
class ReservedWords:
def __init__(self):
self.reserved_words = ["abs", "access", "after", "alias", "all", "and", "architecture",
"array", "assert", "attribute", "begin", "block", "body", "buffer",
"bus", "case", "component", "configuration", "constant", "disconnect",
"downto", "else", "elsif", "end", "entity", "exit", "file", "for",
"function", "generate", "generic", "group", "guarded", "if", "impure",
"in", "inertial", "inout", "is", "label", "library", "linkage", "literal",
"loop", "map", "mod", "nand", "new", "next", "nor", "not", "null", "of",
"on", "open", "or", "others", "out", "package", "port", "postponed",
"procedure", "process", "pure", "range", "record", "register", "reject",
"rem", "report", "return", "rol", "ror", "select", "severity", "signal",
"shared", "sla", "sll", "sra", "srl", "subtype", "then", "to", "transport",
"type", "unaffected", "units", "until", "use", "variable", "wait", "when",
"while", "with", "xnor", "xor"]
def is_valid(self, val):
if val.lower() in self.reserved_words:
return True
return False
if __name__ == "__main__":
# Parse command line arguments
parser = ArgumentParser(
description="".join(dedent("""\
vhdl style fixer:
* vhdl_style_fix.py filename -h
\n""")),
formatter_class=RawTextHelpFormatter)
parser.add_argument('filename', type=str, help="filename to fix")
parser.add_argument('-v', '--verbose', action='store_true', help="verbose output")
args = parser.parse_args()
main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment