diff --git a/.cmake-format.py b/.cmake-format.py
index 94457f5b233fd324995fef4e7a2a39e6bef5ec42..06299106236de92c7574d3784ed289687c283e66 100644
--- a/.cmake-format.py
+++ b/.cmake-format.py
@@ -3,238 +3,242 @@
 # ----------------------------------
 with section("parse"):
 
-  # Specify structure for custom cmake functions
-  additional_commands = { 'foo': { 'flags': ['BAR', 'BAZ'],
-             'kwargs': {'DEPENDS': '*', 'HEADERS': '*', 'SOURCES': '*'}}}
+    # Specify structure for custom cmake functions
+    additional_commands = {
+        "foo": {
+            "flags": ["BAR", "BAZ"],
+            "kwargs": {"DEPENDS": "*", "HEADERS": "*", "SOURCES": "*"},
+        }
+    }
 
-  # Override configurations per-command where available
-  override_spec = {}
+    # Override configurations per-command where available
+    override_spec = {}
 
-  # Specify variable tags.
-  vartags = []
+    # Specify variable tags.
+    vartags = []
 
-  # Specify property tags.
-  proptags = []
+    # Specify property tags.
+    proptags = []
 
 # -----------------------------
 # Options affecting formatting.
 # -----------------------------
 with section("format"):
 
-  # Disable formatting entirely, making cmake-format a no-op
-  disable = False
+    # Disable formatting entirely, making cmake-format a no-op
+    disable = False
 
-  # How wide to allow formatted cmake files
-  line_width = 80
+    # How wide to allow formatted cmake files
+    line_width = 80
 
-  # How many spaces to tab for indent
-  tab_size = 2
+    # How many spaces to tab for indent
+    tab_size = 2
 
-  # If true, lines are indented using tab characters (utf-8 0x09) instead of
-  # <tab_size> space characters (utf-8 0x20). In cases where the layout would
-  # require a fractional tab character, the behavior of the  fractional
-  # indentation is governed by <fractional_tab_policy>
-  use_tabchars = False
+    # If true, lines are indented using tab characters (utf-8 0x09) instead of
+    # <tab_size> space characters (utf-8 0x20). In cases where the layout would
+    # require a fractional tab character, the behavior of the  fractional
+    # indentation is governed by <fractional_tab_policy>
+    use_tabchars = False
 
-  # If <use_tabchars> is True, then the value of this variable indicates how
-  # fractional indentions are handled during whitespace replacement. If set to
-  # 'use-space', fractional indentation is left as spaces (utf-8 0x20). If set
-  # to `round-up` fractional indentation is replaced with a single tab character
-  # (utf-8 0x09) effectively shifting the column to the next tabstop
-  fractional_tab_policy = 'use-space'
+    # If <use_tabchars> is True, then the value of this variable indicates how
+    # fractional indentions are handled during whitespace replacement. If set to
+    # 'use-space', fractional indentation is left as spaces (utf-8 0x20). If set
+    # to `round-up` fractional indentation is replaced with a single tab character
+    # (utf-8 0x09) effectively shifting the column to the next tabstop
+    fractional_tab_policy = "use-space"
 
-  # If an argument group contains more than this many sub-groups (parg or kwarg
-  # groups) then force it to a vertical layout.
-  max_subgroups_hwrap = 2
+    # If an argument group contains more than this many sub-groups (parg or kwarg
+    # groups) then force it to a vertical layout.
+    max_subgroups_hwrap = 2
 
-  # If a positional argument group contains more than this many arguments, then
-  # force it to a vertical layout.
-  max_pargs_hwrap = 6
+    # If a positional argument group contains more than this many arguments, then
+    # force it to a vertical layout.
+    max_pargs_hwrap = 6
 
-  # If a cmdline positional group consumes more than this many lines without
-  # nesting, then invalidate the layout (and nest)
-  max_rows_cmdline = 2
+    # If a cmdline positional group consumes more than this many lines without
+    # nesting, then invalidate the layout (and nest)
+    max_rows_cmdline = 2
 
-  # If true, separate flow control names from their parentheses with a space
-  separate_ctrl_name_with_space = False
+    # If true, separate flow control names from their parentheses with a space
+    separate_ctrl_name_with_space = False
 
-  # If true, separate function names from parentheses with a space
-  separate_fn_name_with_space = False
+    # If true, separate function names from parentheses with a space
+    separate_fn_name_with_space = False
 
-  # If a statement is wrapped to more than one line, than dangle the closing
-  # parenthesis on its own line.
-  dangle_parens = False
+    # If a statement is wrapped to more than one line, than dangle the closing
+    # parenthesis on its own line.
+    dangle_parens = False
 
-  # If the trailing parenthesis must be 'dangled' on its on line, then align it
-  # to this reference: `prefix`: the start of the statement,  `prefix-indent`:
-  # the start of the statement, plus one indentation  level, `child`: align to
-  # the column of the arguments
-  dangle_align = 'prefix'
+    # If the trailing parenthesis must be 'dangled' on its on line, then align it
+    # to this reference: `prefix`: the start of the statement,  `prefix-indent`:
+    # the start of the statement, plus one indentation  level, `child`: align to
+    # the column of the arguments
+    dangle_align = "prefix"
 
-  # If the statement spelling length (including space and parenthesis) is
-  # smaller than this amount, then force reject nested layouts.
-  min_prefix_chars = 4
+    # If the statement spelling length (including space and parenthesis) is
+    # smaller than this amount, then force reject nested layouts.
+    min_prefix_chars = 4
 
-  # If the statement spelling length (including space and parenthesis) is larger
-  # than the tab width by more than this amount, then force reject un-nested
-  # layouts.
-  max_prefix_chars = 10
+    # If the statement spelling length (including space and parenthesis) is larger
+    # than the tab width by more than this amount, then force reject un-nested
+    # layouts.
+    max_prefix_chars = 10
 
-  # If a candidate layout is wrapped horizontally but it exceeds this many
-  # lines, then reject the layout.
-  max_lines_hwrap = 2
+    # If a candidate layout is wrapped horizontally but it exceeds this many
+    # lines, then reject the layout.
+    max_lines_hwrap = 2
 
-  # What style line endings to use in the output.
-  line_ending = 'unix'
+    # What style line endings to use in the output.
+    line_ending = "unix"
 
-  # Format command names consistently as 'lower' or 'upper' case
-  command_case = 'canonical'
+    # Format command names consistently as 'lower' or 'upper' case
+    command_case = "canonical"
 
-  # Format keywords consistently as 'lower' or 'upper' case
-  keyword_case = 'unchanged'
+    # Format keywords consistently as 'lower' or 'upper' case
+    keyword_case = "unchanged"
 
-  # A list of command names which should always be wrapped
-  always_wrap = []
+    # A list of command names which should always be wrapped
+    always_wrap = []
 
-  # If true, the argument lists which are known to be sortable will be sorted
-  # lexicographicall
-  enable_sort = True
+    # If true, the argument lists which are known to be sortable will be sorted
+    # lexicographicall
+    enable_sort = True
 
-  # If true, the parsers may infer whether or not an argument list is sortable
-  # (without annotation).
-  autosort = False
+    # If true, the parsers may infer whether or not an argument list is sortable
+    # (without annotation).
+    autosort = False
 
-  # By default, if cmake-format cannot successfully fit everything into the
-  # desired linewidth it will apply the last, most agressive attempt that it
-  # made. If this flag is True, however, cmake-format will print error, exit
-  # with non-zero status code, and write-out nothing
-  require_valid_layout = False
+    # By default, if cmake-format cannot successfully fit everything into the
+    # desired linewidth it will apply the last, most agressive attempt that it
+    # made. If this flag is True, however, cmake-format will print error, exit
+    # with non-zero status code, and write-out nothing
+    require_valid_layout = False
 
-  # A dictionary mapping layout nodes to a list of wrap decisions. See the
-  # documentation for more information.
-  layout_passes = {}
+    # A dictionary mapping layout nodes to a list of wrap decisions. See the
+    # documentation for more information.
+    layout_passes = {}
 
 # ------------------------------------------------
 # Options affecting comment reflow and formatting.
 # ------------------------------------------------
 with section("markup"):
 
-  # What character to use for bulleted lists
-  bullet_char = '*'
+    # What character to use for bulleted lists
+    bullet_char = "*"
 
-  # What character to use as punctuation after numerals in an enumerated list
-  enum_char = '.'
+    # What character to use as punctuation after numerals in an enumerated list
+    enum_char = "."
 
-  # If comment markup is enabled, don't reflow the first comment block in each
-  # listfile. Use this to preserve formatting of your copyright/license
-  # statements.
-  first_comment_is_literal = False
+    # If comment markup is enabled, don't reflow the first comment block in each
+    # listfile. Use this to preserve formatting of your copyright/license
+    # statements.
+    first_comment_is_literal = False
 
-  # If comment markup is enabled, don't reflow any comment block which matches
-  # this (regex) pattern. Default is `None` (disabled).
-  literal_comment_pattern = None
+    # If comment markup is enabled, don't reflow any comment block which matches
+    # this (regex) pattern. Default is `None` (disabled).
+    literal_comment_pattern = None
 
-  # Regular expression to match preformat fences in comments default=
-  # ``r'^\s*([`~]{3}[`~]*)(.*)$'``
-  fence_pattern = '^\\s*([`~]{3}[`~]*)(.*)$'
+    # Regular expression to match preformat fences in comments default=
+    # ``r'^\s*([`~]{3}[`~]*)(.*)$'``
+    fence_pattern = "^\\s*([`~]{3}[`~]*)(.*)$"
 
-  # Regular expression to match rulers in comments default=
-  # ``r'^\s*[^\w\s]{3}.*[^\w\s]{3}$'``
-  ruler_pattern = '^\\s*[^\\w\\s]{3}.*[^\\w\\s]{3}$'
+    # Regular expression to match rulers in comments default=
+    # ``r'^\s*[^\w\s]{3}.*[^\w\s]{3}$'``
+    ruler_pattern = "^\\s*[^\\w\\s]{3}.*[^\\w\\s]{3}$"
 
-  # If a comment line matches starts with this pattern then it is explicitly a
-  # trailing comment for the preceeding argument. Default is '#<'
-  explicit_trailing_pattern = '#<'
+    # If a comment line matches starts with this pattern then it is explicitly a
+    # trailing comment for the preceeding argument. Default is '#<'
+    explicit_trailing_pattern = "#<"
 
-  # If a comment line starts with at least this many consecutive hash
-  # characters, then don't lstrip() them off. This allows for lazy hash rulers
-  # where the first hash char is not separated by space
-  hashruler_min_length = 10
+    # If a comment line starts with at least this many consecutive hash
+    # characters, then don't lstrip() them off. This allows for lazy hash rulers
+    # where the first hash char is not separated by space
+    hashruler_min_length = 10
 
-  # If true, then insert a space between the first hash char and remaining hash
-  # chars in a hash ruler, and normalize its length to fill the column
-  canonicalize_hashrulers = True
+    # If true, then insert a space between the first hash char and remaining hash
+    # chars in a hash ruler, and normalize its length to fill the column
+    canonicalize_hashrulers = True
 
-  # enable comment markup parsing and reflow
-  enable_markup = False
+    # enable comment markup parsing and reflow
+    enable_markup = False
 
 # ----------------------------
 # Options affecting the linter
 # ----------------------------
 with section("lint"):
 
-  # a list of lint codes to disable
-  disabled_codes = []
+    # a list of lint codes to disable
+    disabled_codes = []
 
-  # regular expression pattern describing valid function names
-  function_pattern = '[0-9a-z_]+'
+    # regular expression pattern describing valid function names
+    function_pattern = "[0-9a-z_]+"
 
-  # regular expression pattern describing valid macro names
-  macro_pattern = '[0-9A-Z_]+'
+    # regular expression pattern describing valid macro names
+    macro_pattern = "[0-9A-Z_]+"
 
-  # regular expression pattern describing valid names for variables with global
-  # (cache) scope
-  global_var_pattern = '[A-Z][0-9A-Z_]+'
+    # regular expression pattern describing valid names for variables with global
+    # (cache) scope
+    global_var_pattern = "[A-Z][0-9A-Z_]+"
 
-  # regular expression pattern describing valid names for variables with global
-  # scope (but internal semantic)
-  internal_var_pattern = '_[A-Z][0-9A-Z_]+'
+    # regular expression pattern describing valid names for variables with global
+    # scope (but internal semantic)
+    internal_var_pattern = "_[A-Z][0-9A-Z_]+"
 
-  # regular expression pattern describing valid names for variables with local
-  # scope
-  local_var_pattern = '[a-z][a-z0-9_]+'
+    # regular expression pattern describing valid names for variables with local
+    # scope
+    local_var_pattern = "[a-z][a-z0-9_]+"
 
-  # regular expression pattern describing valid names for privatedirectory
-  # variables
-  private_var_pattern = '_[0-9a-z_]+'
+    # regular expression pattern describing valid names for privatedirectory
+    # variables
+    private_var_pattern = "_[0-9a-z_]+"
 
-  # regular expression pattern describing valid names for public directory
-  # variables
-  public_var_pattern = '[A-Z][0-9A-Z_]+'
+    # regular expression pattern describing valid names for public directory
+    # variables
+    public_var_pattern = "[A-Z][0-9A-Z_]+"
 
-  # regular expression pattern describing valid names for function/macro
-  # arguments and loop variables.
-  argument_var_pattern = '[a-z][a-z0-9_]+'
+    # regular expression pattern describing valid names for function/macro
+    # arguments and loop variables.
+    argument_var_pattern = "[a-z][a-z0-9_]+"
 
-  # regular expression pattern describing valid names for keywords used in
-  # functions or macros
-  keyword_pattern = '[A-Z][0-9A-Z_]+'
+    # regular expression pattern describing valid names for keywords used in
+    # functions or macros
+    keyword_pattern = "[A-Z][0-9A-Z_]+"
 
-  # In the heuristic for C0201, how many conditionals to match within a loop in
-  # before considering the loop a parser.
-  max_conditionals_custom_parser = 2
+    # In the heuristic for C0201, how many conditionals to match within a loop in
+    # before considering the loop a parser.
+    max_conditionals_custom_parser = 2
 
-  # Require at least this many newlines between statements
-  min_statement_spacing = 1
+    # Require at least this many newlines between statements
+    min_statement_spacing = 1
 
-  # Require no more than this many newlines between statements
-  max_statement_spacing = 2
-  max_returns = 6
-  max_branches = 12
-  max_arguments = 5
-  max_localvars = 15
-  max_statements = 50
+    # Require no more than this many newlines between statements
+    max_statement_spacing = 2
+    max_returns = 6
+    max_branches = 12
+    max_arguments = 5
+    max_localvars = 15
+    max_statements = 50
 
 # -------------------------------
 # Options affecting file encoding
 # -------------------------------
 with section("encode"):
 
-  # If true, emit the unicode byte-order mark (BOM) at the start of the file
-  emit_byteorder_mark = False
+    # If true, emit the unicode byte-order mark (BOM) at the start of the file
+    emit_byteorder_mark = False
 
-  # Specify the encoding of the input file. Defaults to utf-8
-  input_encoding = 'utf-8'
+    # Specify the encoding of the input file. Defaults to utf-8
+    input_encoding = "utf-8"
 
-  # Specify the encoding of the output file. Defaults to utf-8. Note that cmake
-  # only claims to support utf-8 so be careful when using anything else
-  output_encoding = 'utf-8'
+    # Specify the encoding of the output file. Defaults to utf-8. Note that cmake
+    # only claims to support utf-8 so be careful when using anything else
+    output_encoding = "utf-8"
 
 # -------------------------------------
 # Miscellaneous configurations options.
 # -------------------------------------
 with section("misc"):
 
-  # A dictionary containing any per-command configuration overrides. Currently
-  # only `command_case` is supported.
-  per_command = {}
+    # A dictionary containing any per-command configuration overrides. Currently
+    # only `command_case` is supported.
+    per_command = {}
diff --git a/base/__init__.py b/base/__init__.py
index f4c23df5e9bf0af997bcfb288b8be0b9b5eadd44..939317c67c538fe8a3a85788567d09146553ab44 100644
--- a/base/__init__.py
+++ b/base/__init__.py
@@ -12,7 +12,8 @@ import pyrap.tables as pt
 import numpy
 import pylab
 
-def plotflags (tabnames):
+
+def plotflags(tabnames):
     """Plot NDPPP Count results
 
     A flagging or count step in NDPPP can save the flagging percentages per
@@ -27,15 +28,17 @@ def plotflags (tabnames):
 
     """
     t = pt.table(tabnames)
-    if 'Frequency' in t.colnames():
-        t1 = t.sort ('Frequency')
-        pylab.plot (t1.getcol('Frequency'), t1.getcol('Percentage'))
-    elif 'Station' in t.colnames():
+    if "Frequency" in t.colnames():
+        t1 = t.sort("Frequency")
+        pylab.plot(t1.getcol("Frequency"), t1.getcol("Percentage"))
+    elif "Station" in t.colnames():
         percs = []
         names = []
-        for t1 in t.iter ('Station'):
-            percs.append (t1.getcol('Percentage').mean())
-            names.append (t1.getcell('Name', 0))
-        pylab.plot (numpy.array(percs), '+')
+        for t1 in t.iter("Station"):
+            percs.append(t1.getcol("Percentage").mean())
+            names.append(t1.getcell("Name", 0))
+        pylab.plot(numpy.array(percs), "+")
     else:
-        raise RuntimeError('Table appears not to be a NDPPP Count result; it does not contain a Frequency or Station column')
+        raise RuntimeError(
+            "Table appears not to be a NDPPP Count result; it does not contain a Frequency or Station column"
+        )
diff --git a/ci/.produce-ci-metrics.py b/ci/.produce-ci-metrics.py
index 2039a52a69ca90c4ea6699ce4ca1a565d46bc281..cd8b68b422fd26428ebee82be02e709d173ddc38 100644
--- a/ci/.produce-ci-metrics.py
+++ b/ci/.produce-ci-metrics.py
@@ -11,79 +11,74 @@ import xml.dom.minidom
 if sys.version_info[0] < 3:
     b2s = lambda s: s
 else:
-    b2s = lambda s: s.decode('utf-8')
+    b2s = lambda s: s.decode("utf-8")
+
 
 def _has_subelement(element, subelement_name):
     return len(element.getElementsByTagName(subelement_name)) > 0
 
+
 def _has_error(test_case):
-    return _has_subelement(test_case, 'error')
+    return _has_subelement(test_case, "error")
+
 
 def _has_failure(test_case):
-    return _has_subelement(test_case, 'failure')
+    return _has_subelement(test_case, "failure")
+
 
 def get_git_sha():
-    if 'CI_COMMIT_SHA' in os.environ:
-        return os.environ['CI_COMMIT_SHA']
-    return b2s(subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip())
+    if "CI_COMMIT_SHA" in os.environ:
+        return os.environ["CI_COMMIT_SHA"]
+    return b2s(subprocess.check_output(["git", "rev-parse", "HEAD"]).strip())
+
 
 def get_coverage_metrics(cov_xml_file):
     cov_dom = xml.dom.minidom.parse(cov_xml_file)
-    coverage = cov_dom.getElementsByTagName('coverage')[0]
-    coverage_line_rate = float(coverage.attributes['line-rate'].value)
-    return {
-        'percentage': coverage_line_rate * 100
-    }
+    coverage = cov_dom.getElementsByTagName("coverage")[0]
+    coverage_line_rate = float(coverage.attributes["line-rate"].value)
+    return {"percentage": coverage_line_rate * 100}
+
 
 def get_tests_metrics(utests_xml_file):
     utests_xml = xml.dom.minidom.parse(utests_xml_file)
-    test_cases = utests_xml.getElementsByTagName('testcase')
+    test_cases = utests_xml.getElementsByTagName("testcase")
     errors = len(list(filter(_has_error, test_cases)))
     failures = len(list(filter(_has_failure, test_cases)))
-    return {
-        'errors': errors,
-        'failed': failures,
-        'total': len(test_cases)
-    }
+    return {"errors": errors, "failed": failures, "total": len(test_cases)}
+
 
 def get_lint_metrics(lint_xml_file):
-    return {
-        'errors': 0,
-        'failures': 0,
-        'tests': 0
-    }
+    return {"errors": 0, "failures": 0, "tests": 0}
+
 
 def get_build_status(ci_metrics):
     now = time.time()
-    test_metrics = ci_metrics['tests']
-    if test_metrics['errors'] > 0 or test_metrics['failed'] > 0:
-        last_build_status = 'failed'
+    test_metrics = ci_metrics["tests"]
+    if test_metrics["errors"] > 0 or test_metrics["failed"] > 0:
+        last_build_status = "failed"
     else:
-        last_build_status = 'passed'
+        last_build_status = "passed"
 
     return {
-        'last': {
-            'status': last_build_status,
-            'timestamp': now
-        },
-        'green': {
-            'timestamp': now
-        }
+        "last": {"status": last_build_status, "timestamp": now},
+        "green": {"timestamp": now},
     }
 
+
 def produce_ci_metrics(build_dir):
-    cov_xml_file = os.path.join(build_dir, 'code-coverage.xml')
-    utests_xml_file = os.path.join(build_dir, 'unit-tests.xml')
-    lint_xml_file = os.path.join(build_dir, 'linting.xml')
+    cov_xml_file = os.path.join(build_dir, "code-coverage.xml")
+    utests_xml_file = os.path.join(build_dir, "unit-tests.xml")
+    lint_xml_file = os.path.join(build_dir, "linting.xml")
 
     ci_metrics = {
-        'commit_sha': get_git_sha(),
-        'coverage': get_coverage_metrics(cov_xml_file),
-        'tests': get_tests_metrics(utests_xml_file),
-        'lint': get_lint_metrics(lint_xml_file)
+        "commit_sha": get_git_sha(),
+        "coverage": get_coverage_metrics(cov_xml_file),
+        "tests": get_tests_metrics(utests_xml_file),
+        "lint": get_lint_metrics(lint_xml_file),
     }
-    ci_metrics['build-status'] = get_build_status(ci_metrics)
+    ci_metrics["build-status"] = get_build_status(ci_metrics)
     print(json.dumps(ci_metrics, indent=2))
 
-if __name__ == '__main__':
-    produce_ci_metrics(sys.argv[1])
\ No newline at end of file
+
+if __name__ == "__main__":
+    produce_ci_metrics(sys.argv[1])
diff --git a/ci/collect_metrics.py b/ci/collect_metrics.py
index ce6c27b63eaac076f4122e63c1507ac014bb92af..b584917974256a1436df9d6e747707f581177c3b 100644
--- a/ci/collect_metrics.py
+++ b/ci/collect_metrics.py
@@ -87,8 +87,8 @@ def count_junit_metrics(filename):
     log = logging.getLogger(LOGGER_NAME)
     try:
         root_elem = etree.parse(filename).getroot()
-        if root_elem.tag not in ['testsuites', 'testsuite']:
-            raise ValueError('Invalid JUnit XML file.')
+        if root_elem.tag not in ["testsuites", "testsuite"]:
+            raise ValueError("Invalid JUnit XML file.")
         stats = parse_junit_tree(root_elem)
         result = dict(errors=0, failures=0, tests=0, skipped=0)
         for key in result:
@@ -99,12 +99,16 @@ def count_junit_metrics(filename):
                     stats["testcase"][key],
                 )
             else:
-                result[key] = max(stats["testsuite"][key],
-                                  stats["testcase"][key])
+                result[key] = max(
+                    stats["testsuite"][key], stats["testcase"][key]
+                )
         result["total"] = result["tests"]
         del result["tests"]
     except Exception as expt:
-        log.exception("Exception caught parsing '%s', returning 0 since the CI does not allow any linting errors/warnings", filename)
+        log.exception(
+            "Exception caught parsing '%s', returning 0 since the CI does not allow any linting errors/warnings",
+            filename,
+        )
         result = dict(errors=0, failures=0, total=0, skipped=0)
 
     return result
@@ -174,8 +178,9 @@ def main():
                 # latest_pipeline_id = str(pipeline["id"])
                 latest_build_date = pipeline["created_at"]
                 latest_build_timestamp = datetime.timestamp(
-                    datetime.strptime(latest_build_date,
-                                      "%Y-%m-%dT%H:%M:%S.%fZ")
+                    datetime.strptime(
+                        latest_build_date, "%Y-%m-%dT%H:%M:%S.%fZ"
+                    )
                 )
                 break
     except Exception as err:
@@ -216,4 +221,4 @@ def init_logging(name="", level=logging.DEBUG):
 
 if __name__ == "__main__":
     init_logging()
-    main()
\ No newline at end of file
+    main()
diff --git a/ci/create_badges.py b/ci/create_badges.py
index d3953087a66e18058d42d1488cd0b40f14b1ea29..f4912e1520534ec8710a389164dc5bf26876bfc5 100644
--- a/ci/create_badges.py
+++ b/ci/create_badges.py
@@ -37,7 +37,11 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
     # Create badge
     badge = anybadge.Badge(
-        label=label, value=value, default_color=color, value_prefix=" ", value_suffix=" "
+        label=label,
+        value=value,
+        default_color=color,
+        value_prefix=" ",
+        value_suffix=" ",
     )
 
     # Write badge
@@ -65,7 +69,11 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
     # Create badge
     badge = anybadge.Badge(
-        label=label, value=value, default_color=color, value_prefix=" ", value_suffix=" "
+        label=label,
+        value=value,
+        default_color=color,
+        value_prefix=" ",
+        value_suffix=" ",
     )
 
     # Write badge
@@ -91,7 +99,11 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
     # Create badge
     badge = anybadge.Badge(
-        label=label, value=value, default_color=color, value_prefix=" ", value_suffix=" "
+        label=label,
+        value=value,
+        default_color=color,
+        value_prefix=" ",
+        value_suffix=" ",
     )
 
     # Write badge
@@ -117,7 +129,11 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
     # Create badge
     badge = anybadge.Badge(
-        label=label, value=value, default_color=color, value_prefix=" ", value_suffix=" "
+        label=label,
+        value=value,
+        default_color=color,
+        value_prefix=" ",
+        value_suffix=" ",
     )
 
     # Write badge
@@ -141,7 +157,11 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
     # Create badge
     badge = anybadge.Badge(
-        label=label, value=value, default_color=color, value_prefix=" ", value_suffix=" "
+        label=label,
+        value=value,
+        default_color=color,
+        value_prefix=" ",
+        value_suffix=" ",
     )
 
     # Write badge
@@ -214,7 +234,11 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
     # Create badge
     badge = anybadge.Badge(
-        label=label, value=value, default_color=color, value_prefix=" ", value_suffix=" "
+        label=label,
+        value=value,
+        default_color=color,
+        value_prefix=" ",
+        value_suffix=" ",
     )
 
     # Write badge
@@ -240,7 +264,11 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
     # Create badge
     badge = anybadge.Badge(
-        label=label, value=value, default_color=color, value_prefix=" ", value_suffix=" "
+        label=label,
+        value=value,
+        default_color=color,
+        value_prefix=" ",
+        value_suffix=" ",
     )
 
     # Write badge
@@ -264,7 +292,11 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
     # Create badge
     badge = anybadge.Badge(
-        label=label, value=value, default_color=color, value_prefix=" ", value_suffix=" "
+        label=label,
+        value=value,
+        default_color=color,
+        value_prefix=" ",
+        value_suffix=" ",
     )
 
     # Write badge
@@ -272,4 +304,4 @@ def main():  # pylint: disable=too-many-branches,too-many-statements
 
 
 if __name__ == "__main__":
-    main()
\ No newline at end of file
+    main()
diff --git a/ddecal/test/integration/tBdaDdeCal.py b/ddecal/test/integration/tBdaDdeCal.py
index 149ba29707000ce0b35b88ed15c844fb828a3ce3..6c6136d68ee09ce153968b9840dd735c066cffda 100755
--- a/ddecal/test/integration/tBdaDdeCal.py
+++ b/ddecal/test/integration/tBdaDdeCal.py
@@ -53,9 +53,15 @@ def create_skymodel():
         f.write(
             "FORMAT = Name, Type, Ra, Dec, I, MajorAxis, MinorAxis, PositionAngle, ReferenceFrequency='134e6', SpectralIndex='[0.0]'\r\n"
         )
-        f.write("center, POINT, 16:38:28.205000, +63.44.34.314000, 1, , , , , \r\n")
-        f.write("ra_off, POINT, 16:58:28.205000, +63.44.34.314000, 1, , , , , \r\n")
-        f.write("radec_off, POINT, 16:38:28.205000, +65.44.34.314000, 1, , , , , \r\n")
+        f.write(
+            "center, POINT, 16:38:28.205000, +63.44.34.314000, 1, , , , , \r\n"
+        )
+        f.write(
+            "ra_off, POINT, 16:58:28.205000, +63.44.34.314000, 1, , , , , \r\n"
+        )
+        f.write(
+            "radec_off, POINT, 16:38:28.205000, +65.44.34.314000, 1, , , , , \r\n"
+        )
 
 
 @pytest.fixture()
@@ -156,7 +162,11 @@ def test_only_predict(create_skymodel):
     )
 
     check_call(
-        [tcf.DP3EXE, "msout=PREDICT_DIR_1.MS", "predict.sources=[center, dec_off]"]
+        [
+            tcf.DP3EXE,
+            "msout=PREDICT_DIR_1.MS",
+            "predict.sources=[center, dec_off]",
+        ]
         + common_args
         + predict_args
     )
@@ -230,8 +240,14 @@ def test_uvwflagger(create_skymodel, create_corrupted_data_from_regular):
     # When uvw flagging is disabled, the NaNs in the solution file are only 9
     expected_flagged_solutions = 54
 
-    assert np.count_nonzero(np.isnan(amplitude_solutions)) == expected_flagged_solutions
-    assert np.count_nonzero(np.isnan(phase_solutions)) == expected_flagged_solutions
+    assert (
+        np.count_nonzero(np.isnan(amplitude_solutions))
+        == expected_flagged_solutions
+    )
+    assert (
+        np.count_nonzero(np.isnan(phase_solutions))
+        == expected_flagged_solutions
+    )
 
 
 # Only test a limited set of caltype + nchannels combinations, since testing
@@ -256,7 +272,9 @@ def test_uvwflagger(create_skymodel, create_corrupted_data_from_regular):
         # "rotation+diagonal", # part of fulljones -> not implemented
     ],
 )
-def test_caltype(create_skymodel, create_corrupted_data_from_regular, caltype_nchan):
+def test_caltype(
+    create_skymodel, create_corrupted_data_from_regular, caltype_nchan
+):
     """Test calibration for different calibration types"""
     caltype = caltype_nchan[:-1]
     nchan = int(caltype_nchan[-1])
@@ -282,13 +300,20 @@ def test_caltype(create_skymodel, create_corrupted_data_from_regular, caltype_nc
 
     h5 = h5py.File("solutions.h5", "r")
 
-    if caltype in ["scalar", "diagonal", "scalaramplitude", "diagonalamplitude"]:
+    if caltype in [
+        "scalar",
+        "diagonal",
+        "scalaramplitude",
+        "diagonalamplitude",
+    ]:
         amplitude_solutions = h5["sol000/amplitude000/val"]
 
         if caltype.startswith("scalar"):
             assert amplitude_solutions.attrs["AXES"] == b"time,freq,ant,dir"
         else:
-            assert amplitude_solutions.attrs["AXES"] == b"time,freq,ant,dir,pol"
+            assert (
+                amplitude_solutions.attrs["AXES"] == b"time,freq,ant,dir,pol"
+            )
 
         if nchan == 0:
             assert amplitude_solutions.shape[1] == 1
@@ -339,7 +364,12 @@ def test_subtract(create_skymodel, create_corrupted_data):
 
     residual = float(
         check_output(
-            [tcf.TAQLEXE, "-nopr", "-noph", "select gmax(abs(DATA)) from out.MS"]
+            [
+                tcf.TAQLEXE,
+                "-nopr",
+                "-noph",
+                "select gmax(abs(DATA)) from out.MS",
+            ]
         )
     )
 
diff --git a/ddecal/test/integration/tDDECal.py b/ddecal/test/integration/tDDECal.py
index 12115a584fa67e66b07bc15abef768e8a438c20e..88a29cafdc10997509d91691658e24d4d0467ab8 100755
--- a/ddecal/test/integration/tDDECal.py
+++ b/ddecal/test/integration/tDDECal.py
@@ -114,12 +114,17 @@ def create_corrupted_visibilities():
 
 
 @pytest.mark.parametrize(
-    "caltype", ["complexgain", "scalarcomplexgain", "amplitudeonly", "scalaramplitude"]
+    "caltype",
+    ["complexgain", "scalarcomplexgain", "amplitudeonly", "scalaramplitude"],
 )
 @pytest.mark.parametrize("solint", [0, 1, 2, 4])
 @pytest.mark.parametrize("nchan", [1, 2, 5])
 def test(
-    create_corrupted_visibilities, copy_data_to_model_data, caltype, solint, nchan
+    create_corrupted_visibilities,
+    copy_data_to_model_data,
+    caltype,
+    solint,
+    nchan,
 ):
     # Subtract corrupted visibilities using multiple predict steps
     check_call(
@@ -251,12 +256,13 @@ def test_calibration_with_dd_intervals(
                     >= reference_solutions["sol000/amplitude000/val"].shape[0]
                 ):
                     corresponding_index = (
-                        reference_solutions["sol000/amplitude000/val"].shape[0] - 1
+                        reference_solutions["sol000/amplitude000/val"].shape[0]
+                        - 1
                     )
 
-                values_reference = reference_solutions["sol000/amplitude000/val"][
-                    corresponding_index, :, :, direction_index, 0
-                ]
+                values_reference = reference_solutions[
+                    "sol000/amplitude000/val"
+                ][corresponding_index, :, :, direction_index, 0]
 
                 assert (abs(values_ddecal - values_reference) < 1.2).all()
 
@@ -331,12 +337,13 @@ def test_bug_ast_924(
                     >= reference_solutions["sol000/amplitude000/val"].shape[0]
                 ):
                     corresponding_index = (
-                        reference_solutions["sol000/amplitude000/val"].shape[0] - 1
+                        reference_solutions["sol000/amplitude000/val"].shape[0]
+                        - 1
                     )
 
-                values_reference = reference_solutions["sol000/amplitude000/val"][
-                    corresponding_index, :, :, direction_index, 0
-                ]
+                values_reference = reference_solutions[
+                    "sol000/amplitude000/val"
+                ][corresponding_index, :, :, direction_index, 0]
 
                 assert (abs(values_ddecal - values_reference) < 1.2).all()
 
@@ -347,7 +354,13 @@ def test_bug_ast_924(
 )
 @pytest.mark.parametrize(
     "caltype",
-    ["amplitudeonly", "scalaramplitude", "scalar", "diagonal", "diagonalamplitude"],
+    [
+        "amplitudeonly",
+        "scalaramplitude",
+        "scalar",
+        "diagonal",
+        "diagonalamplitude",
+    ],
 )
 def test_subtract_with_dd_intervals(
     create_corrupted_visibilities,
@@ -739,7 +752,9 @@ def test_bda_constaints():
     phase_bda = f_bda["sol000/phase000/val"]
     phase_no_bda = f_no_bda["sol000/phase000/val"]
 
-    np.testing.assert_allclose(ampl_bda, ampl_no_bda, rtol=0.05, atol=0, equal_nan=True)
+    np.testing.assert_allclose(
+        ampl_bda, ampl_no_bda, rtol=0.05, atol=0, equal_nan=True
+    )
     np.testing.assert_allclose(
         phase_bda, phase_no_bda, rtol=0.3, atol=0, equal_nan=True
     )
diff --git a/ddecal/test/integration/tIDGPredict.py b/ddecal/test/integration/tIDGPredict.py
index 6bffac24444ad91b9d3edfa4958dfd666b289552..0299bc4bba134ec6fb2480e0beebe2d467769e81 100755
--- a/ddecal/test/integration/tIDGPredict.py
+++ b/ddecal/test/integration/tIDGPredict.py
@@ -101,7 +101,9 @@ def test_input_with_single_sources(source, offset):
     except FileNotFoundError:
         pytest.skip("WSClean not available")
 
-    check_call(["wsclean", "-use-idg", "-predict", "-name", f"{source}", f"{MSIN}"])
+    check_call(
+        ["wsclean", "-use-idg", "-predict", "-name", f"{source}", f"{MSIN}"]
+    )
     check_output(
         [
             tcf.TAQLEXE,
@@ -112,7 +114,10 @@ def test_input_with_single_sources(source, offset):
     )
 
     # Predict source: $source offset: $offset using IDG
-    if "polygon" in open(f"{tcf.DDECAL_RESOURCEDIR}/{source}-{offset}.reg").read():
+    if (
+        "polygon"
+        in open(f"{tcf.DDECAL_RESOURCEDIR}/{source}-{offset}.reg").read()
+    ):
         check_call(
             [
                 tcf.DP3EXE,
diff --git a/ddecal/test/integration/tIDGPredict_ref.py b/ddecal/test/integration/tIDGPredict_ref.py
index 523b2be364247d5a34329647c05a063824e12e8d..6996d6ea65505dc656e3579539472e33294cca49 100755
--- a/ddecal/test/integration/tIDGPredict_ref.py
+++ b/ddecal/test/integration/tIDGPredict_ref.py
@@ -9,32 +9,31 @@ import os
 
 os.system("rm dummy-image.fits dummy-dirty.fits")
 # -channel-range 0 1 ensures the reference frequency is from the first channel.
-os.system("wsclean -size 512 512 -scale 0.01 -channel-range 0 1 -name dummy tDDECal.MS")
+os.system(
+    "wsclean -size 512 512 -scale 0.01 -channel-range 0 1 -name dummy tDDECal.MS"
+)
 
 sources = {
-    "radec": ( 400, 64 ),
-    "ra": ( 400, 256 ),
-    "dec": ( 256, 64 ),
-    "center": ( 256, 256 )
-    }
-brightness = {
-    "radec": 10,
-    "ra": 20,
-    "dec": 20,
-    "center": 10
-    }
-term_brightness = { 0:10, 1:20000, 2:30000 }
-
-fits_files=[]
+    "radec": (400, 64),
+    "ra": (400, 256),
+    "dec": (256, 64),
+    "center": (256, 256),
+}
+brightness = {"radec": 10, "ra": 20, "dec": 20, "center": 10}
+term_brightness = {0: 10, 1: 20000, 2: 30000}
+
+fits_files = []
 
 hdu = fits.open("dummy-image.fits")[0]
 
+
 def write_fits(name):
     filename = name + "-model.fits"
     os.system("rm -rf " + filename)
     hdu.writeto(filename)
     fits_files.append(filename)
 
+
 # Generate foursources.fits, which has all four sources.
 hdu.data *= 0
 
diff --git a/docker/ubuntu_22_04_base b/docker/ubuntu_22_04_base
index fd26111764fb165746f9b1a671e32c24277e86e2..452ba8e0ab6f4260b433bd5783b7439c69cc5a77 100644
--- a/docker/ubuntu_22_04_base
+++ b/docker/ubuntu_22_04_base
@@ -80,6 +80,7 @@ RUN wget -nv -O /WSRT_Measures.ztar ftp://ftp.astron.nl/outgoing/Measures/WSRT_M
     && rm /WSRT_Measures.ztar
 # Install pip dependencies
 RUN pip3 install \
+		black \
 		cmake-format \
 		h5py \
 		sphinx \
diff --git a/external/aocommon b/external/aocommon
index b422a1bcc4a51d12ac4f8a3f08c5885c776f4a6d..c126d20b28c8557375e12bc3039bb6a64c05f8b0 160000
--- a/external/aocommon
+++ b/external/aocommon
@@ -1 +1 @@
-Subproject commit b422a1bcc4a51d12ac4f8a3f08c5885c776f4a6d
+Subproject commit c126d20b28c8557375e12bc3039bb6a64c05f8b0
diff --git a/parmdb/test/integration/tShowSourceDb.py b/parmdb/test/integration/tShowSourceDb.py
index 666ef7cae83dfaab52ced5f8b924b7029bb1b7ea..f1d856338229386346df3f6a1619dfb75fd823ea 100644
--- a/parmdb/test/integration/tShowSourceDb.py
+++ b/parmdb/test/integration/tShowSourceDb.py
@@ -24,6 +24,7 @@ Script can be invoked in two ways:
 MSIN = "tDemix.in_MS"
 CWD = os.getcwd()
 
+
 @pytest.fixture(autouse=True)
 def source_env():
     os.chdir(CWD)
@@ -40,18 +41,35 @@ def source_env():
     os.chdir(CWD)
     shutil.rmtree(tmpdir)
 
+
 def test_skymodel_sourcedb_roundtrip():
     """Check that skymodel in default format is reproduced after makesourcedb and showsourcedb"""
 
     # sky.txt is not in the default format, create a skymodel in default format by going through sourcedb
     check_call([tcf.MAKESOURCEDBEXE, "in=tDemix_tmp/sky.txt", "out=sourcedb"])
     # The first line of showsourcedb is the application's announcement
-    skymodel_defaultformat_input = check_output([tcf.SHOWSOURCEDBEXE, "in=sourcedb", "mode=skymodel"]).decode('utf-8').split('\n',1)[-1]
+    skymodel_defaultformat_input = (
+        check_output([tcf.SHOWSOURCEDBEXE, "in=sourcedb", "mode=skymodel"])
+        .decode("utf-8")
+        .split("\n", 1)[-1]
+    )
     with open("tDemix_tmp/sky_defaultformat.txt", "w") as f:
         f.write(skymodel_defaultformat_input)
 
     # Now do the roundtrip test: make sourcedb and print the result in the default format
-    check_call([tcf.MAKESOURCEDBEXE, "in=tDemix_tmp/sky_defaultformat.txt", "out=sourcedb_defaultformat"])
-    skymodel_defaultformat_output = check_output([tcf.SHOWSOURCEDBEXE, "in=sourcedb_defaultformat", "mode=skymodel"]).decode('utf-8').split('\n',1)[-1]
+    check_call(
+        [
+            tcf.MAKESOURCEDBEXE,
+            "in=tDemix_tmp/sky_defaultformat.txt",
+            "out=sourcedb_defaultformat",
+        ]
+    )
+    skymodel_defaultformat_output = (
+        check_output(
+            [tcf.SHOWSOURCEDBEXE, "in=sourcedb_defaultformat", "mode=skymodel"]
+        )
+        .decode("utf-8")
+        .split("\n", 1)[-1]
+    )
 
     assert skymodel_defaultformat_input == skymodel_defaultformat_output
diff --git a/scripts/run-format.sh b/scripts/run-format.sh
index 1bfd615db1202b30c55e909d886a6eb2eda2c0e2..0d915e53cfed4cc31454155968389da0be3e384b 100755
--- a/scripts/run-format.sh
+++ b/scripts/run-format.sh
@@ -12,12 +12,6 @@ SOURCE_DIR=$(dirname "$0")/..
 #relative to SOURCE_DIR.
 EXCLUDE_DIRS=(external build CMake)
 
-#The patterns of the C++ source files, which clang-format should format.
-CXX_SOURCES=(*.cc *.h)
-
-#The patterns of the CMake source files, which cmake-format should format.
-CMAKE_SOURCES=(CMakeLists.txt *.cmake)
-
 #End script configuration.
 
 #The common formatting script has further documentation.
diff --git a/steps/test/integration/tApplyBeam.py b/steps/test/integration/tApplyBeam.py
index 59bc18841aa73b63f1a62b2916913be2bae3c1e0..c06c34d62c1344bf03b5ada733be230d0bae120c 100644
--- a/steps/test/integration/tApplyBeam.py
+++ b/steps/test/integration/tApplyBeam.py
@@ -109,7 +109,5 @@ def test_with_updateweights():
             f"applybeam.updateweights=true",
         ]
     )
-    taql_command = (
-        f"select from {MSIN} where all(near(WEIGHT_SPECTRUM, NEW_WEIGHT_SPECTRUM))"
-    )
+    taql_command = f"select from {MSIN} where all(near(WEIGHT_SPECTRUM, NEW_WEIGHT_SPECTRUM))"
     assert_taql(taql_command)
diff --git a/steps/test/integration/tBdaExpander.py b/steps/test/integration/tBdaExpander.py
index 47e3f16a10350991484110acad9663988cd114cd..f115c997275a8d3ea20c40f40ae3fb06614702c1 100644
--- a/steps/test/integration/tBdaExpander.py
+++ b/steps/test/integration/tBdaExpander.py
@@ -54,9 +54,15 @@ def create_skymodel():
         f.write(
             "FORMAT = Name, Type, Ra, Dec, I, MajorAxis, MinorAxis, PositionAngle, ReferenceFrequency='134e6', SpectralIndex='[0.0]'\r\n"
         )
-        f.write("center, POINT, 16:38:28.205000, + 63.44.34.314000, 10, , , , , \r\n")
-        f.write("ra_off, POINT, 16:38:28.205000, + 64.44.34.314000, 10, , , , , \r\n")
-        f.write("radec_off, POINT, 16:38:28.205000, +65.44.34.314000, 10, , , , , \r\n")
+        f.write(
+            "center, POINT, 16:38:28.205000, + 63.44.34.314000, 10, , , , , \r\n"
+        )
+        f.write(
+            "ra_off, POINT, 16:38:28.205000, + 64.44.34.314000, 10, , , , , \r\n"
+        )
+        f.write(
+            "radec_off, POINT, 16:38:28.205000, +65.44.34.314000, 10, , , , , \r\n"
+        )
 
     check_call([tcf.MAKESOURCEDBEXE, "in=test.skymodel", "out=test.sourcedb"])
 
@@ -143,6 +149,6 @@ def test_regular_buffer_writing():
             "checkparset=true",
             "msin=regular_buffer.MS",
             "msout=out.MS",
-            "steps=[]"
+            "steps=[]",
         ]
-    )
\ No newline at end of file
+    )
diff --git a/steps/test/integration/tBdaPredict.py b/steps/test/integration/tBdaPredict.py
index f772277f515a9116f31ce6f1b8d4632e2a322ce2..6fce28d00d070f4adb492f931d51d2fed640987e 100755
--- a/steps/test/integration/tBdaPredict.py
+++ b/steps/test/integration/tBdaPredict.py
@@ -56,7 +56,12 @@ def create_skymodel():
         )
 
     check_call(
-        [tcf.MAKESOURCEDBEXE, "in=test.skymodel", "out=test.sourcedb", "append=false"]
+        [
+            tcf.MAKESOURCEDBEXE,
+            "in=test.skymodel",
+            "out=test.sourcedb",
+            "append=false",
+        ]
     )
 
 
@@ -66,10 +71,17 @@ def create_skymodel_in_phase_center():
         f.write(
             "FORMAT = Name, Type, Ra, Dec, I, MajorAxis, MinorAxis, PositionAngle, ReferenceFrequency='134e6', SpectralIndex='[0.0]'\r\n"
         )
-        f.write(f"center, POINT, 01:37:41.299000, +33.09.35.132000, 10, , , , , \r\n")
+        f.write(
+            f"center, POINT, 01:37:41.299000, +33.09.35.132000, 10, , , , , \r\n"
+        )
 
     check_call(
-        [tcf.MAKESOURCEDBEXE, "in=test.skymodel", "out=test.sourcedb", "append=false"]
+        [
+            tcf.MAKESOURCEDBEXE,
+            "in=test.skymodel",
+            "out=test.sourcedb",
+            "append=false",
+        ]
     )
 
 
diff --git a/steps/test/integration/tDemix.py b/steps/test/integration/tDemix.py
index 8719699d722642e1778f68b57f65094b24ce938a..70bd7249e7628d479b0e6fe1a7832c954c630b43 100755
--- a/steps/test/integration/tDemix.py
+++ b/steps/test/integration/tDemix.py
@@ -40,7 +40,8 @@ common_args = [
     "demix.subtractsources=[CasA]",
 ]
 
-skymodel_arg="demix.skymodel='tDemix_tmp/{}'"
+skymodel_arg = "demix.skymodel='tDemix_tmp/{}'"
+
 
 @pytest.fixture(autouse=True)
 def source_env():
@@ -50,7 +51,13 @@ def source_env():
     os.chdir(tmpdir)
 
     untar_ms(f"{tcf.RESOURCEDIR}/{MSIN}.tgz")
-    check_call([tcf.MAKESOURCEDBEXE, "in=tDemix_tmp/sky.txt", "out=tDemix_tmp/sourcedb"])
+    check_call(
+        [
+            tcf.MAKESOURCEDBEXE,
+            "in=tDemix_tmp/sky.txt",
+            "out=tDemix_tmp/sourcedb",
+        ]
+    )
 
     # Tests are executed here
     yield
@@ -60,7 +67,7 @@ def source_env():
     shutil.rmtree(tmpdir)
 
 
-@pytest.mark.parametrize("skymodel", ['sky.txt', 'sourcedb'])
+@pytest.mark.parametrize("skymodel", ["sky.txt", "sourcedb"])
 def test_without_target(skymodel):
     check_call(
         [
@@ -73,13 +80,12 @@ def test_without_target(skymodel):
         + common_args
     )
 
-
     # Compare some columns of the output MS with the reference output.
     taql_command = f"select from tDemix_out.MS t1, tDemix_tmp/tDemix_ref1.MS t2 where not all(near(t1.DATA,t2.DATA,1e-3) || (isnan(t1.DATA) && isnan(t2.DATA)))  ||  not all(t1.FLAG = t2.FLAG)  ||  not all(near(t1.WEIGHT_SPECTRUM, t2.WEIGHT_SPECTRUM))  ||  not all(t1.LOFAR_FULL_RES_FLAG = t2.LOFAR_FULL_RES_FLAG)  ||  t1.ANTENNA1 != t2.ANTENNA1  ||  t1.ANTENNA2 != t2.ANTENNA2  ||  t1.TIME !~= t2.TIME"
     assert_taql(taql_command)
 
 
-@pytest.mark.parametrize("skymodel", ['sky.txt', 'sourcedb'])
+@pytest.mark.parametrize("skymodel", ["sky.txt", "sourcedb"])
 def test_with_target_projected_away(skymodel):
     check_call(
         [
@@ -92,13 +98,12 @@ def test_with_target_projected_away(skymodel):
         + common_args
     )
 
-
     # Compare some columns of the output MS with the reference output.
     taql_command = f"select from tDemix_out.MS t1, tDemix_tmp/tDemix_ref2.MS t2 where not all(near(t1.DATA,t2.DATA,1e-3) || (isnan(t1.DATA) && isnan(t2.DATA)))  ||  not all(t1.FLAG = t2.FLAG)  ||  not all(near(t1.WEIGHT_SPECTRUM, t2.WEIGHT_SPECTRUM))  ||  not all(t1.LOFAR_FULL_RES_FLAG = t2.LOFAR_FULL_RES_FLAG)  ||  t1.ANTENNA1 != t2.ANTENNA1  ||  t1.ANTENNA2 != t2.ANTENNA2  ||  t1.TIME !~= t2.TIME"
     assert_taql(taql_command)
 
 
-@pytest.mark.parametrize("skymodel", ['sky.txt', 'sourcedb'])
+@pytest.mark.parametrize("skymodel", ["sky.txt", "sourcedb"])
 def test_with_target(skymodel):
     check_call(
         [
@@ -131,7 +136,6 @@ def test_time_freq_resolution():
         + common_args
     )
 
-
     # Compare some columns of the output MS with the reference output.
     taql_command = f"select from tDemix_out.MS t1, tDemix_tmp/tDemix_ref1.MS t2 where not all(near(t1.DATA,t2.DATA,1e-3) || (isnan(t1.DATA) && isnan(t2.DATA)))  ||  not all(t1.FLAG = t2.FLAG)  ||  not all(near(t1.WEIGHT_SPECTRUM, t2.WEIGHT_SPECTRUM))  ||  not all(t1.LOFAR_FULL_RES_FLAG = t2.LOFAR_FULL_RES_FLAG)  ||  t1.ANTENNA1 != t2.ANTENNA1  ||  t1.ANTENNA2 != t2.ANTENNA2  ||  t1.TIME !~= t2.TIME"
     assert_taql(taql_command)
diff --git a/steps/test/integration/tMsIn.py b/steps/test/integration/tMsIn.py
index ffdb541c535fc556a0af3847669e042cfcd18173..e70c8a81da4a12e67f4d877e25bac415ca06ddea 100644
--- a/steps/test/integration/tMsIn.py
+++ b/steps/test/integration/tMsIn.py
@@ -141,7 +141,8 @@ def test_write_thread_enabled():
 
     assert re.search(b"use thread: *true", result)
     assert re.search(
-        b"(1[0-9]| [ 0-9])[0-9]\\.[0-9]% \\([ 0-9]{5} [m ]s\\) Creating task\n", result
+        b"(1[0-9]| [ 0-9])[0-9]\\.[0-9]% \\([ 0-9]{5} [m ]s\\) Creating task\n",
+        result,
     )
     assert re.search(
         b"(1[0-9]| [ 0-9])[0-9]\\.[0-9]% \\([ 0-9]{5} [m ]s\\) Writing \\(threaded\\)\n",
@@ -169,10 +170,12 @@ def test_write_thread_disabled():
     assert re.search(b"use thread: *false", result)
     assert (
         re.search(
-            b"(1[0-9]| [ 0-9])[0-9]\\.[0-9]% \\([ 0-9]{5} [m ]s\\) Creating task\n", result
+            b"(1[0-9]| [ 0-9])[0-9]\\.[0-9]% \\([ 0-9]{5} [m ]s\\) Creating task\n",
+            result,
         )
         == None
     )
     assert re.search(
-        b"(1[0-9]| [ 0-9])[0-9]\\.[0-9]% \\([ 0-9]{5} [m ]s\\) Writing\n", result
+        b"(1[0-9]| [ 0-9])[0-9]\\.[0-9]% \\([ 0-9]{5} [m ]s\\) Writing\n",
+        result,
     )
diff --git a/steps/test/integration/tMsOut.py b/steps/test/integration/tMsOut.py
index 5c7a23eab58da83f9f9bbdcb21bc6843a55dd106..f2bc3a697c40d62e5ad4433855d17a528d0b76fb 100644
--- a/steps/test/integration/tMsOut.py
+++ b/steps/test/integration/tMsOut.py
@@ -19,6 +19,7 @@ from utils import untar_ms, get_taql_result, check_output
 MSIN = "tNDPPP-generic.MS"
 CWD = os.getcwd()
 
+
 @pytest.fixture(autouse=True)
 def source_env():
     os.chdir(CWD)
@@ -52,16 +53,22 @@ def test_chunking():
     assert os.path.exists("chunktest-000.ms")
     assert os.path.exists("chunktest-001.ms")
     assert os.path.exists("chunktest-002.ms")
-    
+
     # Each should have two timesteps:
     taql_command = f"select unique TIME from chunktest-000.ms"
     result = get_taql_result(taql_command)
-    assert result == 'Unit: s\n29-Mar-2013/13:59:53.007\n29-Mar-2013/14:00:03.021'
-    
+    assert (
+        result == "Unit: s\n29-Mar-2013/13:59:53.007\n29-Mar-2013/14:00:03.021"
+    )
+
     taql_command = f"select unique TIME from chunktest-001.ms"
     result = get_taql_result(taql_command)
-    assert result == 'Unit: s\n29-Mar-2013/14:00:13.035\n29-Mar-2013/14:00:23.049'
+    assert (
+        result == "Unit: s\n29-Mar-2013/14:00:13.035\n29-Mar-2013/14:00:23.049"
+    )
 
     taql_command = f"select unique TIME from chunktest-002.ms"
     result = get_taql_result(taql_command)
-    assert result == 'Unit: s\n29-Mar-2013/14:00:33.063\n29-Mar-2013/14:00:43.076'
+    assert (
+        result == "Unit: s\n29-Mar-2013/14:00:33.063\n29-Mar-2013/14:00:43.076"
+    )
diff --git a/steps/test/integration/tMultiApplyCal.py b/steps/test/integration/tMultiApplyCal.py
index 7e640374facfd742f04df3b97754513b0b733f44..5789a93c4ff4fd54b508129b80c122ee44ce36f5 100644
--- a/steps/test/integration/tMultiApplyCal.py
+++ b/steps/test/integration/tMultiApplyCal.py
@@ -23,7 +23,9 @@ Script can be invoked in two ways:
 """
 
 MSIN = "tNDPPP-generic.MS"
-PARMDB_TGZ = "tApplyCal2.parmdb.tgz"  # Note: This archive contains tApplyCal.parmdb.
+PARMDB_TGZ = (
+    "tApplyCal2.parmdb.tgz"  # Note: This archive contains tApplyCal.parmdb.
+)
 PARMDB = "tApplyCal.parmdb"
 CWD = os.getcwd()
 
diff --git a/steps/test/integration/tNullStokes.py b/steps/test/integration/tNullStokes.py
index 8bfca87e9054b9ceb7323f05f04a6c8f00ed6bc9..cda2488b4fee18f4e97ac0b4ab2a263366960e36 100755
--- a/steps/test/integration/tNullStokes.py
+++ b/steps/test/integration/tNullStokes.py
@@ -71,15 +71,16 @@ def test_stokes_Qzero(create_model_data):
     assert_taql(taql_command)
 
     # Checking that Stokes I, U, V have not been changed
-    taql_command = f"select from (select mscal.stokes(STOKES_QDATA, 'I') as a, mscal.stokes(DATA, 'I') as b from {MSOUT}) where not all(near(a, b))" 
+    taql_command = f"select from (select mscal.stokes(STOKES_QDATA, 'I') as a, mscal.stokes(DATA, 'I') as b from {MSOUT}) where not all(near(a, b))"
     assert_taql(taql_command)
 
-    taql_command = f"select from (select mscal.stokes(STOKES_QDATA, 'U') as a, mscal.stokes(DATA, 'U') as b from {MSOUT}) where not all(near(a, b))" 
+    taql_command = f"select from (select mscal.stokes(STOKES_QDATA, 'U') as a, mscal.stokes(DATA, 'U') as b from {MSOUT}) where not all(near(a, b))"
     assert_taql(taql_command)
 
-    taql_command = f"select from (select mscal.stokes(STOKES_QDATA, 'V') as a, mscal.stokes(DATA, 'V') as b from {MSOUT}) where not all(near(a, b))" 
+    taql_command = f"select from (select mscal.stokes(STOKES_QDATA, 'V') as a, mscal.stokes(DATA, 'V') as b from {MSOUT}) where not all(near(a, b))"
     assert_taql(taql_command)
 
+
 def test_stokes_Uzero(create_model_data):
     check_call(
         [
@@ -97,11 +98,11 @@ def test_stokes_Uzero(create_model_data):
     assert_taql(taql_command)
 
     # Checking that Stokes I, Q, V have not been changed
-    taql_command = f"select from (select mscal.stokes(STOKES_UDATA, 'I') as a, mscal.stokes(DATA, 'I') as b from {MSOUT}) where not all(near(a, b))" 
+    taql_command = f"select from (select mscal.stokes(STOKES_UDATA, 'I') as a, mscal.stokes(DATA, 'I') as b from {MSOUT}) where not all(near(a, b))"
     assert_taql(taql_command)
 
-    taql_command = f"select from (select mscal.stokes(STOKES_UDATA, 'Q') as a, mscal.stokes(DATA, 'Q') as b from {MSOUT}) where not all(near(a, b))" 
+    taql_command = f"select from (select mscal.stokes(STOKES_UDATA, 'Q') as a, mscal.stokes(DATA, 'Q') as b from {MSOUT}) where not all(near(a, b))"
     assert_taql(taql_command)
 
-    taql_command = f"select from (select mscal.stokes(STOKES_UDATA, 'V') as a, mscal.stokes(DATA, 'V') as b from {MSOUT}) where not all(near(a, b))" 
+    taql_command = f"select from (select mscal.stokes(STOKES_UDATA, 'V') as a, mscal.stokes(DATA, 'V') as b from {MSOUT}) where not all(near(a, b))"
     assert_taql(taql_command)
diff --git a/steps/test/integration/tPhaseshiftPredict.py b/steps/test/integration/tPhaseshiftPredict.py
index 1a4976495b49c9e0520e8cb7aa3851664c3ab82a..3fad33403ba6b632828913ae9817fe2abd4ceebb 100755
--- a/steps/test/integration/tPhaseshiftPredict.py
+++ b/steps/test/integration/tPhaseshiftPredict.py
@@ -22,6 +22,7 @@ Script can be invoked in two ways:
 
 MSIN = "tNDPPP-generic.MS"
 
+
 @pytest.fixture(autouse=True)
 def source_env(tmpdir_factory):
     tmpdir = str(tmpdir_factory.mktemp("data"))
@@ -36,7 +37,6 @@ def source_env(tmpdir_factory):
     shutil.rmtree(tmpdir)
 
 
-
 testdata = [
     ("POINT", 0, 0, 0, 2, 10),
     ("GAUSSIAN", 600, 60, 0, 2, 10),
@@ -44,12 +44,22 @@ testdata = [
     ("GAUSSIAN", 600, 60, 30, 2, 0),
     ("GAUSSIAN", 600, 60, 30, 0, 10),
     ("GAUSSIAN", 600, 60, 30, 2, 10),
-    ("GAUSSIAN", 60, 60, 0, 2, 10)
+    ("GAUSSIAN", 60, 60, 0, 2, 10),
 ]
 
 
-@pytest.mark.parametrize("source_type,major_axis,minor_axis,orientation,offset_ra_hour,offset_dec_degree", testdata)
-def test_phaseshift_predict(source_type, major_axis, minor_axis, orientation, offset_ra_hour, offset_dec_degree):
+@pytest.mark.parametrize(
+    "source_type,major_axis,minor_axis,orientation,offset_ra_hour,offset_dec_degree",
+    testdata,
+)
+def test_phaseshift_predict(
+    source_type,
+    major_axis,
+    minor_axis,
+    orientation,
+    offset_ra_hour,
+    offset_dec_degree,
+):
     """
     - Phaseshift MS to the position of a source
     - Predict the source with the new phase center
@@ -60,14 +70,23 @@ def test_phaseshift_predict(source_type, major_axis, minor_axis, orientation, of
     MS_PHASECENTER_RA = "01h37m41.299"
     MS_PHASECENTER_DEC = "+033d09m35.132"
     assert -1 < offset_ra_hour < 22 and int(offset_ra_hour) == offset_ra_hour
-    assert -33 < offset_dec_degree < 56 and int(offset_dec_degree) == offset_dec_degree
-    source_position_ra = MS_PHASECENTER_RA.replace("01", f"{1 + offset_ra_hour:02d}")
-    source_position_dec = MS_PHASECENTER_DEC.replace("+033", f"{33 + offset_dec_degree:+04d}")
+    assert (
+        -33 < offset_dec_degree < 56
+        and int(offset_dec_degree) == offset_dec_degree
+    )
+    source_position_ra = MS_PHASECENTER_RA.replace(
+        "01", f"{1 + offset_ra_hour:02d}"
+    )
+    source_position_dec = MS_PHASECENTER_DEC.replace(
+        "+033", f"{33 + offset_dec_degree:+04d}"
+    )
     with open("test.skymodel", "w") as f:
-        f.write(f"""\
+        f.write(
+            f"""\
 FORMAT = Name, Type, Ra, Dec, I, MajorAxis, MinorAxis, Orientation, OrientationIsAbsolute
 dummysource, {source_type}, {source_position_ra}, {source_position_dec}, {major_axis}, {minor_axis}, 2, 112, True
-""")
+"""
+        )
 
     check_call(
         [
@@ -88,7 +107,9 @@ dummysource, {source_type}, {source_position_ra}, {source_position_dec}, {major_
         ]
     )
 
-    taql_command = f"select gmean(abs(DATA)) from out.MS WHERE ANTENNA1!=ANTENNA2"
+    taql_command = (
+        f"select gmean(abs(DATA)) from out.MS WHERE ANTENNA1!=ANTENNA2"
+    )
     residual = float(get_taql_result(taql_command))
     print("Residual:", residual)
     assert residual < 0.001
diff --git a/steps/test/integration/tPredict.py b/steps/test/integration/tPredict.py
index 23ed4b0b578591315778e54c993aa5532423f1d5..b9511c36ec60b6f7caf1a2defc2fd30ef8865da1 100644
--- a/steps/test/integration/tPredict.py
+++ b/steps/test/integration/tPredict.py
@@ -139,7 +139,9 @@ def test_without_and_with_time_smearing(use_time_smearing):
         )
 
     shutil.rmtree(sourcedb, ignore_errors=True)
-    check_call([tcf.MAKESOURCEDBEXE, "in=timesmearing.skymodel", f"out={sourcedb}"])
+    check_call(
+        [tcf.MAKESOURCEDBEXE, "in=timesmearing.skymodel", f"out={sourcedb}"]
+    )
     check_call(
         [
             tcf.DP3EXE,
@@ -173,6 +175,7 @@ def test_without_and_with_time_smearing(use_time_smearing):
         )
         assert_taql(taql_command, 2)
 
+
 @pytest.mark.parametrize("use_beam", [False, True])
 def test_without_and_with_beam_parallelbaseline(use_beam):
     predict_column = "PREDICT_beam" if use_beam else "PREDICT_nobeam"
diff --git a/steps/test/integration/tUVWFlagger.py b/steps/test/integration/tUVWFlagger.py
index 745494d1203980f197f16ecc26b8463c197f44d3..ec8e4de1830ccccec967ba0e078a2466d1f40275 100644
--- a/steps/test/integration/tUVWFlagger.py
+++ b/steps/test/integration/tUVWFlagger.py
@@ -85,7 +85,8 @@ def test_update_flags_inplace():
     )
     utils.assert_taql(count_flags_set, 168)
     assert re.search(
-        b"\nTotal flagged:     0.000%   \\(0 out of 1344 visibilities\\)\n\n\n", result
+        b"\nTotal flagged:     0.000%   \\(0 out of 1344 visibilities\\)\n\n\n",
+        result,
     )
 
 
diff --git a/steps/test/unit/mock/mockpystep.py b/steps/test/unit/mock/mockpystep.py
index 0d2e9e9151c609a1f85a2023ffdde8d50b0b5b47..bb256477e31d9ec75362c724e863769b4e885aef 100644
--- a/steps/test/unit/mock/mockpystep.py
+++ b/steps/test/unit/mock/mockpystep.py
@@ -8,8 +8,10 @@ import parameterset
 from pydp3 import Step
 import numpy as np
 
+
 class MockPyStep(Step):
     """Example python DPStep that multiplies DATA and WEIGHT_SPECTRUM"""
+
     def __init__(self, parset, prefix):
         """
         Set up the step (constructor). Read the parset here.