...
 
Commits (13)
......@@ -24,6 +24,9 @@ if [ -d /build ]; then
else
d="$(realpath "$(dirname "$0")")"
#"
docker run --rm -ti -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix -v "$d:/build" --user $(id -u):$(id -g) registry.gitlab.com/canfd/server-tools/ghdl:gtkwave /build/run-docker-test "$@"
docker run --rm -ti -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix \
-v "$d:/build" --user $(id -u):$(id -g) --memory 6G \
registry.gitlab.com/canfd/server-tools/ghdl:gtkwave \
/build/run-docker-test "$@"
fi
......@@ -16,6 +16,12 @@ Features:
* `gtkw`: a path to GTKW file with waveform layout definition for gtkwave; if
set together with `wave`, this takes precedence. The specified gtkw
file is not modified.
* `dump_all_signals`: If true, dump all signals in GUI mode, not only these
included in the layout file. May be overriden by
`--dumpall` commandline option. By default, it is set to
true, but for long-lasting tests with lots of signals it
may be necessary to set it to false to prevent `gtkwave`
to run out of memory while loading waveforms.
* many more
## Using waveform layout files
......@@ -23,8 +29,12 @@ Features:
* Specify the file in YML config, either as `gtkw` or `wave` (tcl). Later, this
might be extended to native gtkw-generating python files.
* Run the tests with `--create-ghws`. This generates signal and type hierarchy.
You should run this each time you modify a signal in the layout (or add a signal both to code and to layout).
You should run this each time you modify a signal in the layout (or add a
signal both to code and to layout).
* Run in gui mode, using the VUnit `-g` flag.
* If a layout file is specified and `dump_all_signals` is false (and
`--dumpall` is not used), only the signals specified in the layout file are
dumped.
# How it works
......
--------------------------------------------------------------------------------
--
--
-- CTU CAN FD IP Core
-- Copyright (C) 2015-2018
--
--
-- Authors:
-- Ondrej Ille <ondrej.ille@gmail.com>
-- Martin Jerabek <martin.jerabek01@gmail.com>
--
-- Project advisors:
--
-- Project advisors:
-- Jiri Novak <jnovak@fel.cvut.cz>
-- Pavel Pisa <pisa@cmp.felk.cvut.cz>
--
--
-- Department of Measurement (http://meas.fel.cvut.cz/)
-- Faculty of Electrical Engineering (http://www.fel.cvut.cz)
-- Czech Technical University (http://www.cvut.cz/)
--
--
-- Permission is hereby granted, free of charge, to any person obtaining a copy
-- of this VHDL component and associated documentation files (the "Component"),
-- to deal in the Component without restriction, including without limitation
-- the rights to use, copy, modify, merge, publish, distribute, sublicense,
-- and/or sell copies of the Component, and to permit persons to whom the
-- Component is furnished to do so, subject to the following conditions:
--
--
-- The above copyright notice and this permission notice shall be included in
-- all copies or substantial portions of the Component.
--
--
-- THE COMPONENT IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
......@@ -32,11 +32,11 @@
-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-- FROM, OUT OF OR IN CONNECTION WITH THE COMPONENT OR THE USE OR OTHER DEALINGS
-- IN THE COMPONENT.
--
--
-- The CAN protocol is developed by Robert Bosch GmbH and protected by patents.
-- Anybody who wants to implement this IP core on silicon has to obtain a CAN
-- protocol license from Bosch.
--
--
--------------------------------------------------------------------------------
--------------------------------------------------------------------------------
......@@ -104,6 +104,7 @@ package CANtestLib is
-- Logger severity type (severities in increasing order)
type log_lvl_type is (
debug_l,
info_l,
warning_l,
error_l
......@@ -251,9 +252,9 @@ package CANtestLib is
-- SSP (Secondary Sampling Point) configuration options
type SSP_set_command_type is (
ssp_measured,
ssp_meas_n_offset,
ssp_offset
ssp_measured,
ssp_meas_n_offset,
ssp_offset
);
-- Use only TRV_DELAY
-- Use TRV_DELAY + fixed offset given by user
......@@ -466,7 +467,7 @@ package CANtestLib is
type SW_CAN_mask_filter_type is (
filter_A,
filter_B,
filter_C
filter_C
);
......@@ -482,7 +483,7 @@ package CANtestLib is
type SW_CAN_range_filter_config is record
ID_th_low : natural;
ID_th_high : natural;
ident_type : std_logic;
ident_type : std_logic;
acc_CAN_2_0 : boolean;
acc_CAN_FD : boolean;
end record;
......@@ -515,7 +516,7 @@ package CANtestLib is
----------------------------------------------------------------------------
-- Bit sequence generator
----------------------------------------------------------------------------
-- Longest possible CAN FD Frame is aroud 700 bits. If each bit has opposite
-- polarity than previous one, this could use up to 700 entries. Have some
-- reserve...
......@@ -585,7 +586,8 @@ package CANtestLib is
-- VUnit from configuration.
--
-- Log levels are bound to Vunit Logging library:
-- info_l - All logs are shown
-- debug_l - All logs are shown (even pass and trace)
-- info_l - info(), warning(), error(), failure() are shown
-- warning_l - warning(), error(), failure() are shown
-- error_l - error(), failure() are shown
--
......@@ -608,7 +610,7 @@ package CANtestLib is
procedure set_error_beh(
constant error_beh : in err_beh_type
);
----------------------------------------------------------------------------
-- Generates clock signal for the test with custom period, duty cycle and
......@@ -919,7 +921,7 @@ package CANtestLib is
----------------------------------------------------------------------------
-- Execute write access on Avalon memory bus via Avalon burst.
-- Execute write access on Avalon memory bus via Avalon burst.
-- Does not support unaligned accesses. Size of the burst is given by
-- length of "w_data".
--
......@@ -939,7 +941,7 @@ package CANtestLib is
----------------------------------------------------------------------------
-- Execute read access on Avalon memory bus via Avalon burst.
-- Execute read access on Avalon memory bus via Avalon burst.
-- Does not support unaligned accesses. Size of the burst is given by
-- length of "r_data".
--
......@@ -1378,7 +1380,7 @@ package CANtestLib is
--
-- Arguments:
-- bits Number of Bit times to wait for
-- exit_trans Exit when unit turns transceiver.
-- exit_trans Exit when unit turns transceiver.
-- exit_rec Exit when unit turns receiver.
-- ID Index of CTU CAN FD Core instance
-- mem_bus Avalon memory bus to execute the access on.
......@@ -1463,7 +1465,7 @@ package CANtestLib is
----------------------------------------------------------------------------
-- Set options of RX Buffer.
-- Set options of RX Buffer.
--
-- Arguments:
-- options Options to be applied on RX Buffer.
......@@ -1796,8 +1798,8 @@ package CANtestLib is
----------------------------------------------------------------------------
-- Read Timestamp from TIMESTAMP_LOW and TIMESTAMP_HIGH registers
--
-- Read Timestamp from TIMESTAMP_LOW and TIMESTAMP_HIGH registers
--
-- Arguments:
-- ts Variable in which timestamp will be stored
-- ID Index of CTU CAN FD Core instance.
......@@ -1807,13 +1809,13 @@ package CANtestLib is
variable ts : out std_logic_vector(63 downto 0);
constant ID : in natural range 0 to 15;
signal mem_bus : inout Avalon_mem_type
);
);
----------------------------------------------------------------------------
-- Configure SSP (Secondary Sampling Point) configuration: choose applicable
-- SSP delaying source and set offest given by the user (if eventually used).
--
-- SSP delaying source and set offest given by the user (if eventually used).
--
-- Arguments:
-- ssp_source Select required source of delaying.
-- ssp_offset Amount of clock cycles to wait for.
......@@ -1821,8 +1823,8 @@ package CANtestLib is
-- mem_bus Avalon memory bus to execute the access on.
----------------------------------------------------------------------------
procedure CAN_configure_ssp(
variable ssp_source : in SSP_set_command_type;
variable ssp_offset_val : in std_logic_vector(6 downto 0);
variable ssp_source : in SSP_set_command_type;
variable ssp_offset_val : in std_logic_vector(6 downto 0);
constant ID : in natural range 0 to 15;
signal mem_bus : inout Avalon_mem_type
);
......@@ -2037,33 +2039,34 @@ package body CANtestLib is
wait for 0 ns;
end procedure;
procedure set_log_level(
constant log_level : in log_lvl_type
) is
begin
case log_level is
when error_l =>
show_all(display_handler);
hide(display_handler, debug);
hide(display_handler, info);
hide(display_handler, pass);
hide(display_handler, warning);
when warning_l =>
show_all(display_handler);
hide(display_handler, debug);
hide(display_handler, pass);
hide(display_handler, info);
when info_l =>
show_all(display_handler);
--hide(logger, display_handler, debug);
when others =>
failure("Unknwon log level.");
end case;
show_all(display_handler);
if log_level >= debug_l then
null;
end if;
if log_level >= info_l then
hide(display_handler, pass);
hide(display_handler, trace);
null;
end if;
if log_level >= warning_l then
hide(display_handler, debug);
hide(display_handler, info);
end if;
if log_level >= error_l then
hide(display_handler, warning);
end if;
end procedure;
procedure set_error_beh(
constant error_beh : in err_beh_type
) is
......@@ -2073,7 +2076,7 @@ package body CANtestLib is
else
set_stop_level(failure);
end if;
end procedure;
end procedure;
procedure print_test_info(
......@@ -2086,13 +2089,7 @@ package body CANtestLib is
info("Test info:");
info("Number of iterations: " & integer'image(iterations));
if (log_level = info_l) then
info("Log level: INFO,WARNING,ERROR logs are shown!");
elsif (log_level = warning_l) then
info("Log level: WARNING,ERROR logs are shown!");
else
info("Log level: ERROR logs are shown!");
end if;
info("Log level: " & log_lvl_type'image(log_level));
set_log_level(log_level);
if (error_beh = go_on) then
......@@ -2384,11 +2381,11 @@ package body CANtestLib is
constant size : in aval_access_size
) return std_logic_vector is
begin
if (address'length < 2) then
error("Address to BE conversion. Invalid address");
end if;
if (size = BIT_32) then
return "1111";
end if;
......@@ -2426,10 +2423,10 @@ package body CANtestLib is
-- Check for access alignment
if (not aval_is_aligned(w_address, w_size)) then
warning("Unaligned Avalon write, Adress :" & to_hstring(w_address)
& " Size: " & aval_access_size'image(w_size));
& " Size: " & aval_access_size'image(w_size));
else
w_addr_padded(w_address'length - 1 downto 0) := w_address;
w_addr_padded(w_address'length - 1 downto 0) := w_address;
wait until falling_edge(mem_bus.clk_sys);
mem_bus.scs <= '1';
mem_bus.swr <= '1';
......@@ -2466,8 +2463,8 @@ package body CANtestLib is
warning("Unaligned Avalon Read, Adress :" & to_hstring(r_address) &
" Size: " & aval_access_size'image(r_size));
else
r_addr_padded(r_address'length - 1 downto 0) := r_address;
r_addr_padded(r_address'length - 1 downto 0) := r_address;
wait until falling_edge(mem_bus.clk_sys);
mem_bus.scs <= '1';
mem_bus.srd <= '1';
......@@ -2527,7 +2524,7 @@ package body CANtestLib is
to_hstring(w_address));
return;
end if;
if (not aval_is_valid_burst_size(w_data'length)) then
return;
end if;
......@@ -2541,7 +2538,7 @@ package body CANtestLib is
if (not stat_burst) then
increment := 4;
end if;
act_address(w_address'length - 1 downto 0) := w_address;
-- Iterate through the addresses
......@@ -2581,7 +2578,7 @@ package body CANtestLib is
to_hstring(r_address));
return;
end if;
if (not aval_is_valid_burst_size(r_data'length)) then
return;
end if;
......@@ -2838,7 +2835,7 @@ package body CANtestLib is
CAN_write(data, SETTINGS_ADR, ID, mem_bus, BIT_16);
end procedure;
procedure config_filter_frame_types(
constant ident_type : in std_logic;
constant acc_CAN_2_0 : in boolean;
......@@ -3087,7 +3084,7 @@ package body CANtestLib is
str_msg(89 to 117) := " RWCNT (read word count): ";
str_msg(118 to 127) :=
to_string(std_logic_vector(to_unsigned(frame.rwcnt, 10)));
to_string(std_logic_vector(to_unsigned(frame.rwcnt, 10)));
-- Data words
if (frame.rtr = NO_RTR_FRAME and frame.data_length > 0) then
......@@ -3559,7 +3556,7 @@ package body CANtestLib is
(bus_timing.prop_dbt + bus_timing.ph1_dbt +
bus_timing.ph2_dbt + 1);
end if;
-- Check Minimal Bit time
check(wait_time > 6, "Calculated Bit Time shorter than minimal!");
......@@ -4453,8 +4450,8 @@ package body CANtestLib is
procedure CAN_configure_ssp(
variable ssp_source : in SSP_set_command_type;
variable ssp_offset_val : in std_logic_vector(6 downto 0);
variable ssp_source : in SSP_set_command_type;
variable ssp_offset_val : in std_logic_vector(6 downto 0);
constant ID : in natural range 0 to 15;
signal mem_bus : inout Avalon_mem_type
) is
......@@ -4467,7 +4464,7 @@ package body CANtestLib is
when ssp_meas_n_offset =>
data(SSP_SRC_H downto SSP_SRC_L) := SSP_SRC_MEAS_N_OFFSET; --"01";
when ssp_offset =>
data(SSP_SRC_H downto SSP_SRC_L) := SSP_SRC_OFFSET; --"10";
data(SSP_SRC_H downto SSP_SRC_L) := SSP_SRC_OFFSET; --"10";
when others =>
error("Unsupported SSP type.");
end case;
......@@ -4491,7 +4488,7 @@ entity CAN_test is
-- Used only for "reference" test
constant data_path :in string :=
"test/reference/data_sets/log_500Kb_2Mb_80p_1K_samples_1"
"test/reference/data_sets/log_500Kb_2Mb_80p_1K_samples_1"
);
port (
......
......@@ -27,7 +27,7 @@ setup_logging()
from . import vunit_ifc
from . import test_unit, test_sanity, test_feature, test_reference
from vunit.ui import VUnit
from .test_common import add_common_sources, add_flags
from .test_common import add_common_sources, get_compile_options
#-------------------------------------------------------------------------------
......@@ -80,10 +80,12 @@ def create():
@click.option('--strict', 'strict', flag_value=1,
help='Return non-zero if an unconfigured test was found.')
@click.option('--no-strict', 'strict', flag_value=0)
@click.option('--dumpall', is_flag=True, flag_value=True, default=False,
help='In GUI mode, dump all signals, not only these included in layout file.')
@click.option('--create-ghws/--no-create-ghws', default=False,
help='Only elaborate and create basic GHW files necessary for converting TCL layout files to GTKW files for gtkwave..')
@click.pass_obj
def test(obj, *, config, strict, create_ghws, vunit_args):
def test(obj, *, config, strict, create_ghws, dumpall, vunit_args):
"""Run the tests. Configuration is passed in YAML config file.
You mas pass arguments directly to VUnit by appending them at the command end.
......@@ -143,14 +145,20 @@ def test(obj, *, config, strict, create_ghws, vunit_args):
tests = []
for cfg_key, factory in tests_classes:
if cfg_key in config:
tests.append(factory(ui, lib, config[cfg_key], build, base, create_ghws=create_ghws))
tests.append(factory(ui, lib, config[cfg_key], build, base,
create_ghws=create_ghws,
force_unrestricted_dump_signals=dumpall))
(func_cov_dir / "html").mkdir(parents=True, exist_ok=True)
(func_cov_dir / "coverage_data").mkdir(parents=True, exist_ok=True)
for t in tests:
t.add_sources()
add_flags(ui, lib, build)
c = get_compile_options()
for k, v in c.items():
lib.set_compile_option(k, v)
conf_ok = [t.configure() for t in tests]
# check for unknown tests
......@@ -198,27 +206,3 @@ def vunit_run(ui, build, out_basename) -> int:
f.write(c)
out.unlink()
return res
"""
+ vunit configurations
+ pass modelsim gui file via ui.set_sim_option("modelsim.init_file.gui", ...)
+ include the standard library files in ui.set_sim_option("modelsim.init_files.after_load", [...])
+ set TCOMP global variable
- allow preprocessed calls to log()
- use some log from vunit?
- use random from unit?
+ use per-test default configurations (with set tcl files etc.), different sanity configurations
x pass encoded composite generics (sanity test)
+ use watchdog - pass the time in config: test_runner_watchdog(runner, 10 ms);
- bash completion for files & tests:
- click._bashcompletion.get_choices -> extend the if to check if the given argument is an instance of XXX
and implement completion method for that instance. Complete test names.
- feature tests
- sanity - optimize bus delay shift registers
"""
from vcd.gtkw import GTKWSave
import tkinter
from typing import List
from typing import List, Set
import logging
import traceback
import functools
......@@ -25,6 +25,7 @@ def logexc(f):
class TclFuncs:
def __init__(self, gtkw: str, hierarchy):
self.gtkw = gtkw
self.used_signals = set() # type: Set[str]
self.hierarchy = hierarchy
# set up TCL
......@@ -65,6 +66,13 @@ class TclFuncs:
fqn = 'top.' + fqn
return fqn.replace('(', '[').replace(')', ']').lower()
def convsig_wave_opt(self, sig: str) -> str:
sig = re.sub(r'__([0-9]+)', r'(\1)', sig)
sig = re.sub(r'\([^)]+\)', '', sig)
if sig[0] != '/':
sig = '/'+sig
return sig
def _add_trace(self, signal, type, *, label: str, datafmt: str, expand: bool, **kwds):
if ghw_parse.is_record(type):
with self.gtkw.group(label, closed=not expand):
......@@ -72,6 +80,7 @@ class TclFuncs:
# do not pass label
self._add_trace(signal+'/'+iname, itype, datafmt=datafmt, expand=False, label=None, **kwds)
else:
self.used_signals.add(self.convsig_wave_opt(signal))
signal = self.convsig(signal)
self.gtkw.trace(signal, alias=label, datafmt=datafmt, **kwds)
......@@ -170,7 +179,7 @@ class TclFuncs:
self.gtkw.end_group(o.group)
def tcl2gtkw(tcl_wave, tcl_init_files: List[str], gtkw, ghw: Path):
def tcl2gtkw(tcl_wave, tcl_init_files: List[str], gtkw, ghw: Path) -> List[str]:
hierarchy = ghw_parse.parse(ghw)
with open(gtkw, 'wt') as f:
gtkw = GTKWSave(f)
......@@ -183,3 +192,5 @@ def tcl2gtkw(tcl_wave, tcl_init_files: List[str], gtkw, ghw: Path):
c.tcl.createcommand('run_simulation', lambda: None)
c.source(tcl_wave)
c.finalize()
used_signals = sorted(c.used_signals)
return used_signals
vunit_hdl
#vunit_hdl
git+git://github.com/mjerabek/vunit@ghdl-gtkwave#egg=vunit_hdl
pyvcd
attrs
jinja2
parsy
pyyaml
click
yattag
json2html
......@@ -8,11 +8,13 @@ from jinja2 import Environment, PackageLoader
from pprint import pprint
import random
from .gtkwave import tcl2gtkw
from typing import List
from typing import List, Tuple
import copy
import re
__all__ = ['add_sources', 'add_common_sources', 'get_common_modelsim_init_files',
'add_flags', 'dict_merge', 'vhdl_serialize', 'dump_sim_options',
'TestsBase', 'get_seed']
__all__ = ['add_sources', 'add_common_sources',
'dict_merge', 'vhdl_serialize', 'dump_sim_options',
'TestsBase', 'get_seed', 'OptionsDict']
d = Path(abspath(__file__)).parent
log = logging.getLogger(__name__)
......@@ -20,14 +22,53 @@ log = logging.getLogger(__name__)
jinja_env = Environment(loader=PackageLoader(__package__, 'data'), autoescape=False)
class OptionsDict(dict):
# def __getattr__(self, key):
# return self[key]
def __iadd__(self, upper: dict):
self.__merge(self, upper)
return self
def __add__(self, upper: dict) -> 'OptionsDict':
res = copy.deepcopy(self)
res += upper
return res
def __radd__(self, lower: dict) -> 'OptionsDict':
res = copy.deepcopy(lower)
res += self
return res
@classmethod
def __merge(cls, lower, upper) -> None:
if isinstance(lower, OptionsDict):
if not isinstance(upper, OptionsDict):
raise TypeError('Cannot merge {} and {}'.format(type(lower), type('upper')))
for k, v in upper.items():
if k not in lower:
lower[k] = v
else:
cls.__merge(lower[k], v)
elif isinstance(lower, list):
if not isinstance(upper, list):
raise TypeError('Cannot merge {} and {}'.format(type(lower), type('upper')))
lower.extend(upper)
else:
raise TypeError('Cannot merge {} and {}'.format(type(lower), type('upper')))
class TestsBase:
def __init__(self, ui, lib, config, build, base, create_ghws: bool):
def __init__(self, ui, lib, config, build, base, create_ghws: bool,
force_unrestricted_dump_signals: bool):
self.ui = ui
self.lib = lib
self.config = config
self.build = build
self.base = base
self.create_ghws = create_ghws
self.force_unrestricted_dump_signals = force_unrestricted_dump_signals
@property
def jinja_env(self):
......@@ -43,9 +84,18 @@ class TestsBase:
raise NotImplementedError()
def add_modelsim_gui_file(self, tb, cfg, name, tcl_init_files: List[str] = None) -> None:
if tcl_init_files is None:
tcl_init_files = get_common_modelsim_init_files()
def generate_init_tcl(self, fname: str, tcomp: str) -> OptionsDict:
tcl = self.build / fname
with tcl.open('wt', encoding='utf-8') as f:
print(dedent('''\
global TCOMP
set TCOMP {}
'''.format(tcomp)), file=f)
return OptionsDict({"modelsim.init_files.after_load": [str(tcl)]})
def add_modelsim_gui_file(self, tb, cfg, name, tcl_init_files: List[str]) -> OptionsDict:
"""Return sim_options to add to the testcase."""
sim_options = OptionsDict({'ghdl.sim_flags': []})
if 'wave' in cfg:
tcl = self.base / cfg['wave']
if not tcl.exists():
......@@ -65,23 +115,23 @@ class TestsBase:
get_test_results
'''.format(name)), file=f)
tb.set_sim_option("modelsim.init_file.gui", str(tcl))
sim_options["modelsim.init_file.gui"] = str(tcl)
if 'gtkw' in cfg:
gtkw = self.base / cfg['gtkw']
if not gtkw.exists():
log.warn('GTKW wave file {} not found'.format(cfg['gtkw']))
else:
gtkw = tcl.with_suffix('.gtkw')
tclfname = tcl.relative_to(self.base)
base = str(tclfname.with_suffix("")).replace('/', '__')
gtkw = self.build / (base+'.gtkw')
ghw_file = self.build / (tb.name+'.elab.ghw')
wave_opt_file = gtkw.with_suffix('.wevaopt.txt')
# We need the GHW file for TCL -> GTKW conversion. If we are
# generating them, there is no sense in actually doing
# the conversion now.
if self.create_ghws:
log.info('Will generate {}'.format(ghw_file))
sim_flags = get_common_sim_flags()
sim_flags += ['--wave=' + str(ghw_file)]
tb.set_sim_option("ghdl.sim_flags", sim_flags)
sim_options["ghdl.sim_flags"] += ['--wave=' + str(ghw_file)]
else:
if not ghw_file.exists():
log.warning("Cannot convert wave file {} to gtkw, because"
......@@ -89,17 +139,44 @@ class TestsBase:
"--create-ghws.".format(tclfname))
gtkw = None
else:
log.info('Converting wave file {} to gtkw ...'.format(tclfname))
tcl2gtkw(str(tcl), tcl_init_files, str(gtkw), ghw_file)
log.debug('Converting wave file {} to gtkw ...'.format(tclfname))
used_signals = tcl2gtkw(str(tcl), tcl_init_files, str(gtkw), ghw_file)
with wave_opt_file.open('wt') as f:
f.write('$ version 1.1\n')
f.writelines('\n'.join(used_signals))
if not cfg['dump_all_signals'] and not self.force_unrestricted_dump_signals:
log.info('Only signals included in the layout file '
'will be dumped. To see them all, run with '
'--dumpall.')
sim_options['ghdl.sim_flags'] += ['--read-wave-opt='+str(wave_opt_file)]
if gtkw:
try:
tb.set_sim_option("ghdl.gtkwave_flags", ['--save='+str(gtkw)])
tb.set_sim_option("ghdl.gtkwave_flags", [])
sim_options["ghdl.gtkwave_flags"] = ['--save='+str(gtkw)]
except ValueError:
try:
tb.set_sim_option("ghdl.gtkw_file", str(gtkw))
tb.set_sim_option("ghdl.gtkw_file", "")
sim_options["ghdl.gtkw_file"] = str(gtkw)
except ValueError:
log.warning('Setting GTKW file per test is not supported in this VUnit version.')
return OptionsDict(sim_options)
def get_default_sim_options(self) -> OptionsDict:
c, s = get_default_compile_and_sim_options()
return s
def add_psl_cov(self, name) -> OptionsDict:
name = re.sub(r'[^a-zA-Z0-9_-]', '_', name)
psl_path = self.build / "functional_coverage" / "coverage_data" \
/ "psl_cov_{}.json".format(name)
sim_flags = ["--psl-report={}".format(psl_path)]
return OptionsDict({"ghdl.sim_flags": sim_flags})
@staticmethod
def set_sim_options(tb, options: OptionsDict) -> None:
for k, v in options.items():
tb.set_sim_option(k, v)
def add_sources(lib, patterns) -> None:
for pattern in patterns:
......@@ -109,6 +186,7 @@ def add_sources(lib, patterns) -> None:
if f != "tb_wrappers.vhd":
lib.add_source_file(str(f))
def add_common_sources(lib, ui) -> None:
add_sources(lib, ['../src/**/*.vhd'])
ui.enable_check_preprocessing()
......@@ -116,38 +194,41 @@ def add_common_sources(lib, ui) -> None:
add_sources(lib, ['*.vhd', 'lib/*.vhd', 'models/*.vhd'])
def get_common_modelsim_init_files() -> List[str]:
modelsim_init_files = ['../lib/test_lib.tcl', 'modelsim_init.tcl']
modelsim_init_files = [str(d/x) for x in modelsim_init_files]
return modelsim_init_files
def get_common_sim_flags() -> List[str]:
return ["--ieee-asserts=disable-at-0"]
def add_flags(ui, lib, build) -> None:
unit_tests = lib.get_test_benches('*_unit_test', allow_empty=True)
for ut in unit_tests:
ut.scan_tests_from_file(str(build / "../unit/vunittb_wrapper.vhd"))
reference_tests = lib.get_test_benches('*reference*', allow_empty=True)
for rt in reference_tests:
rt.scan_tests_from_file(str(build / "../reference/vunit_reference_wrapper.vhd"))
#lib.add_compile_option("ghdl.flags", ["-Wc,-g"])
lib.add_compile_option("ghdl.flags", ["-fprofile-arcs", "-ftest-coverage", "-fpsl", "-g"])
elab_flags = ["-Wl,-lgcov", "-g"]
elab_flags.append("-Wl,--coverage")
elab_flags.append("-Wl,-no-pie")
elab_flags.append("-fpsl")
ui.set_sim_option("ghdl.elab_flags", elab_flags)
# Global simulation flags
sim_flags = get_common_sim_flags()
ui.set_sim_option("ghdl.sim_flags", sim_flags)
modelsim_init_files = get_common_modelsim_init_files()
ui.set_sim_option("modelsim.init_files.after_load", modelsim_init_files)
def get_default_compile_and_sim_options() -> Tuple[OptionsDict, OptionsDict]:
# TODO: move to config
debug = True
coverage = True
psl = True
compile_flags = [] # type: List[str]
elab_flags = ["-Wl,-no-pie"]
if debug:
compile_flags += ['-g']
elab_flags += ['-g']
if coverage:
compile_flags += ["-fprofile-arcs", "-ftest-coverage"]
elab_flags += ["-Wl,-lgcov", "-Wl,--coverage"]
if psl:
compile_flags += ['-fpsl']
elab_flags += ['-fpsl']
compile_options = OptionsDict()
compile_options["ghdl.flags"] = compile_flags
cmif = ['../lib/test_lib.tcl', 'modelsim_init.tcl']
common_modelsim_init_files = [str(d/x) for x in cmif]
sim_options = OptionsDict({
"ghdl.elab_flags": elab_flags,
"modelsim.init_files.after_load": common_modelsim_init_files,
"ghdl.sim_flags": ["--ieee-asserts=disable-at-0"],
})
return compile_options, sim_options
def get_compile_options() -> OptionsDict:
c, s = get_default_compile_and_sim_options()
return c
def get_seed(cfg) -> int:
......
import logging
from pathlib import Path
from .test_common import add_sources, TestsBase, dict_merge, \
get_common_modelsim_init_files, get_seed
get_seed, OptionsDict
from textwrap import dedent
import re
......@@ -26,25 +26,14 @@ class FeatureTests(TestsBase):
tb = self.lib.get_test_benches('*tb_feature')[0]
tb.scan_tests_from_file(str(wrname))
def create_psl_cov_file_opt(self, name):
psl_path = "functional_coverage/coverage_data/psl_cov_feature_{}.json".format(name)
psl_flag = "--psl-report={}".format(psl_path)
return {"ghdl.sim_flags" : [psl_flag]}
def configure(self) -> bool:
tb = self.lib.get_test_benches('*tb_feature')[0]
default = self.config['default']
sim_options = self.get_default_sim_options()
# generate & set per-test modelsim tcl file
tcl = self.build / 'modelsim_init_feature.tcl'
with tcl.open('wt', encoding='utf-8') as f:
print(dedent('''\
global TCOMP
set TCOMP tb_feature/test_comp
'''), file=f)
init_files = get_common_modelsim_init_files()
init_files += [str(tcl)]
tb.set_sim_option("modelsim.init_files.after_load", init_files)
sim_options += self.generate_init_tcl('modelsim_init_feature.tcl', 'tb_feature/test_comp')
sim_options += self.add_modelsim_gui_file(tb, default, 'feature', sim_options['modelsim.init_files.after_load'])
for name, cfg in self.config['tests'].items():
if cfg is None:
......@@ -64,12 +53,13 @@ class FeatureTests(TestsBase):
'seed' : get_seed(cfg)
}
if (cfg['psl_coverage']):
psl_opts = self.create_psl_cov_file_opt(name)
tb.add_config(name, generics=generics, sim_options=psl_opts)
else:
tb.add_config(name, generics=generics)
self.add_modelsim_gui_file(tb, default, 'feature', init_files)
local_sim_options = OptionsDict()
if cfg['psl_coverage']:
local_sim_options += self.add_psl_cov('{}.{}'.format(tb.name, name))
local_sim_options = sim_options + local_sim_options
tb.add_config(name, generics=generics, sim_options=local_sim_options)
return self._check_for_unconfigured()
def _check_for_unconfigured(self) -> bool:
......
import os
import sys
from json2html import *
import random
import os.path
import logging
from os.path import join, abspath
from pathlib import Path
import json
from pathlib import Path
from yattag import Doc
from typing import Tuple
from typing import Tuple, List, Dict, Any, NewType
from json2html import *
TPslPoint = NewType('TPslPoint', Dict[str, Any])
test_dir = Path(Path(abspath(__file__)).parent).parent
build_dir = os.path.join(str(test_dir.absolute()), "build")
func_cov_dir = os.path.join(str(build_dir), "functional_coverage")
psl_dir = os.path.join(str(func_cov_dir), "coverage_data")
html_dir = os.path.join(str(func_cov_dir), "html")
test_dir = Path(__file__).parent.parent.absolute()
build_dir = test_dir.absolute() / "build"
func_cov_dir = build_dir / "functional_coverage"
psl_dir = func_cov_dir / "coverage_data"
html_dir = func_cov_dir / "html"
dut_top = " "
......@@ -22,411 +21,407 @@ log = logging.getLogger(__name__)
def merge_psl_coverage_files(out_file: str, in_file_prefix: str) -> None:
"""
Merge PSL coverage details from multiple files to single file
"""
if (out_file.startswith(in_file_prefix)):
raise ValueError("File name for merging should not have the same prefix as merged files")
json_out_path = os.path.join(func_cov_dir, out_file)
json_out_list = []
for filename in os.listdir(psl_dir):
if (not (filename.startswith(in_file_prefix) and \
filename.endswith(".json"))):
continue
"""
Merge PSL coverage details from multiple files to single file
"""
if out_file.startswith(in_file_prefix):
raise ValueError("File name for merging should not have the same prefix as merged files")
in_filename = os.path.join(psl_dir, filename)
print("Merging JSON PSL coverage from: {}\n".format(in_filename))
with open(in_filename, 'r') as json_in_file:
json_obj = json.load(json_in_file)
json_out_path = func_cov_dir / out_file
json_out_list = [] # type: List[TPslPoint]
for in_filename in psl_dir.glob('{}*.json'.format(in_file_prefix)):
log.info("Merging JSON PSL coverage from: {}\n".format(in_filename))
with in_filename.open('rt') as f:
json_obj = json.load(f)
# Add test name to each PSL point
for psl_point in json_obj["details"]:
psl_point["test"] = filename.strip(in_file_prefix).replace(".json","")
# Add test name to each PSL point
for psl_point in json_obj["details"]:
psl_point["test"] = in_filename.with_suffix('').name \
.strip(in_file_prefix)
json_out_list.extend(json_obj["details"])
json_out_list += json_obj["details"]
with open(json_out_path, 'w') as json_out_file:
json.dump(json_out_list, json_out_file, indent=1)
with json_out_path.open('wt') as f:
json.dump(json_out_list, f, indent=1)
def collapse_psl_coverage_files(non_collapsed):
"""
Collapses PSL coverage which is output from multiple testcase/testbench
runs into single psl_coverage output.
If DUT is instantiated in multiple testbenches, above levels of
hierarchy from "dut_top" will be ignored and these files will be collapsed.
E.g. if "dut_top" = "can_top_level",
then multiple instances of CTU CAN FD will not generate multiple PSL outputs.
Collapsing policy is following:
- cover - If at least one of collapsed points is covered -> COVERED
- assert - If at least one of collapsed points is failed -> FAILED
Each cover point which is covered has also appended a testcase name where
it was covered.
"""
log.info("Collapsing PSL points with common hierarchy below: {}".format(dut_top))
collapsed = []
# We do stupid quadratic sort because we don't really care if it is gonna last 10
# or 40 seconds... If we ever get to the point that this takes too long we know
# that we have reeealy lot of PSL points and we turned into Semiconductor monster!
for psl_in in non_collapsed:
found = False
for psl_out in collapsed:
# Check if name in output list is equal to searched name from "dut_top"
# entity down. Skip if not
in_name = psl_in["name"].split(dut_top)[-1]
out_name = psl_out["name"].split(dut_top)[-1]
if (out_name != in_name):
continue
if (not ("colapsed_points" in psl_out)):
psl_out["colapsed_name"] = str(dut_top + in_name)
psl_out["colapsed_points"] = []
psl_out["colapsed_points"].append(psl_in)
# If any of colapsed points is covered -> whole point is covered
if (psl_in["status"] == "covered"):
psl_out["status"] = "covered"
psl_out["count"] += psl_in["count"]
# If any of colapsed points is failed -> whole point is failed
if (psl_in["status"] == "failed"):
psl_out["status"] = "failed"
# Assertion hits add up for both failed and passed
if (psl_out["directive"] == "assertion"):
psl_out["count"] += psl_in["count"]
found = True
break;
# Input point was not collapsed into any of output points -> Add directly
if (not found):
collapsed.append(psl_in)
return collapsed
def get_collapsed_file_name(psl_point) -> str:
"""
Create unique file name for collapsed PSL points
"""
file_name = dut_top + psl_point["name"].split(dut_top)[-1]
file_name = file_name.replace(".","_")
file_name = file_name.replace(" ","_")
file_name = file_name.replace(")","_")
file_name = file_name.replace("(","_")
file_name = file_name.replace("@","_")
file_name = file_name + "_" + str(psl_point["line"])
return file_name
"""
Collapses PSL coverage which is output from multiple testcase/testbench
runs into single psl_coverage output.
If DUT is instantiated in multiple testbenches, above levels of
hierarchy from "dut_top" will be ignored and these files will be collapsed.
E.g. if "dut_top" = "can_top_level",
then multiple instances of CTU CAN FD will not generate multiple PSL outputs.
Collapsing policy is following:
- cover - If at least one of collapsed points is covered -> COVERED
- assert - If at least one of collapsed points is failed -> FAILED
Each cover point which is covered has also appended a testcase name where
it was covered.
"""
log.info("Collapsing PSL points with common hierarchy below: {}".format(dut_top))
collapsed = []
# We do stupid quadratic sort because we don't really care if it is gonna
# last 10 or 40 seconds... If we ever get to the point that this takes too
# long, we know that we have reeealy lot of PSL points and we turned into
# Semiconductor monster!
for psl_in in non_collapsed:
found = False
for psl_out in collapsed:
# Check if name in output list is equal to searched name from
# "dut_top" entity down. Skip if not
in_name = psl_in["name"].split(dut_top)[-1]
out_name = psl_out["name"].split(dut_top)[-1]
if (out_name != in_name):
continue
if (not ("colapsed_points" in psl_out)):
psl_out["colapsed_name"] = str(dut_top + in_name)
psl_out["colapsed_points"] = []
psl_out["colapsed_points"].append(psl_in)
# If any of colapsed points is covered -> whole point is covered
if (psl_in["status"] == "covered"):
psl_out["status"] = "covered"
psl_out["count"] += psl_in["count"]
# If any of colapsed points is failed -> whole point is failed
if (psl_in["status"] == "failed"):
psl_out["status"] = "failed"
# Assertion hits add up for both failed and passed
if (psl_out["directive"] == "assertion"):
psl_out["count"] += psl_in["count"]
found = True
break
# Input point was not collapsed into any of output points -> Add directly
if not found:
collapsed.append(psl_in)
return collapsed
def get_collapsed_file_name(psl_point: TPslPoint) -> str:
"""
Create unique file name for collapsed PSL points
"""
file_name = dut_top + psl_point["name"].split(dut_top)[-1]
file_name = file_name.replace(".", "_")
file_name = file_name.replace(" ", "_")
file_name = file_name.replace(")", "_")
file_name = file_name.replace("(", "_")
file_name = file_name.replace("@", "_")
file_name = file_name + "_" + str(psl_point["line"])
return file_name
def load_json_psl_coverage(filename: str):
"""
Load PSL Coverage JSON file to JSON object.
"""
psl_cov_path = os.path.join(func_cov_dir, filename)
# Read JSON string from file
log.info("Loading JSON PSL output: {}".format(psl_cov_path))
with open(psl_cov_path, 'r') as json_file:
return json.load(json_file)
def split_json_coverage_by_file(json):
"""
Parse input PSL Coverage JSON file. Group PSL endpoints by file.
Return dictionary in format:
{filename : psl_points} where psl_points is a list of PSL points in
filename.
"""
file_dict = {}
for psl_point in json:
# Create new list if first PSL of a file is parsed
if (not(psl_point["file"] in file_dict)):
file_dict[psl_point["file"]] = []
file_dict[psl_point["file"]].append(psl_point)
return file_dict
"""
Load PSL Coverage JSON file to JSON object.
"""
psl_cov_path = func_cov_dir / filename
# Read JSON string from file
log.info("Loading JSON PSL output: {}".format(psl_cov_path))
with psl_cov_path.open('rt') as json_file:
return json.load(json_file)
def split_json_coverage_by_file(json) -> Dict[Path, List[TPslPoint]]:
"""
Parse input PSL Coverage JSON file. Group PSL endpoints by file.
Return dictionary in format:
{filename : psl_points} where psl_points is a list of PSL points in
filename.
"""
file_dict = {} # type: Dict[Path, List[TPslPoint]]
for psl_point in json:
file = Path(psl_point["file"])
# Create new list if first PSL of a file is parsed
if file not in file_dict:
file_dict[file] = []
file_dict[file].append(psl_point)
return file_dict
def add_html_table_header(doc, tag, text, headers, back_color="White"):
"""
Add header to HTML table.
"""
with tag('tr'):
for header in headers:
with tag('th', bgcolor=back_color):
text(header)
def calc_coverage_results(psl_points, psl_type) -> Tuple[int,int]:
"""
Calculate coverage results from list of PSL points in JSON format.
"""
ok = 0
nok = 0
for psl_point in psl_points:
if (psl_point["directive"] != psl_type):
continue;
if (psl_point["status"] == "passed" or
psl_point["status"] == "covered"):
ok += 1
else:
nok +=1
return ok, nok
"""
Add header to HTML table.
"""
with tag('tr'):
for header in headers:
with tag('th', bgcolor=back_color):
text(header)
def calc_coverage_results(psl_points: List[TPslPoint], psl_type) -> Tuple[int, int]:
"""
Calculate coverage results from list of PSL points in JSON format.
"""
ok = 0
nok = 0
for psl_point in psl_points:
if (psl_point["directive"] != psl_type):
continue
if (psl_point["status"] == "passed" or
psl_point["status"] == "covered"):
ok += 1
else:
nok += 1
return ok, nok
def calc_coverage_color(coverage: float) -> str:
"""
Return color based on coverage result.
"""
if (coverage < 0 or coverage > 100):
raise ValueError("Invalid coverage input should be between 0 - 100 %")
if (coverage > 90):
return "Lime"
elif (coverage > 80):
return "Orange"
elif (coverage > 70):
return "OrangeRed"
else:
return "Red"
def print_cov_cell_percentage(doc, tag, text, psl_points, coverage_type, merge_abs_vals) -> None:
"""
"""
ok, nok = calc_coverage_results(psl_points, coverage_type)
summ = max(1, ok + nok)
percents = ok/summ * 100
color = calc_coverage_color(percents)
if (merge_abs_vals):
if (ok + nok > 0):
with tag('td', bgcolor=color):
text("({}/{}) {}%".format(ok, summ, percents))
else:
with tag('td', bgcolor="Silver"):
text("NA")
else:
with tag('td'):
text(ok)
with tag('td'):
text(nok)
if (ok + nok > 0):
with tag('td', bgcolor=color):
text("{}%".format(percents))
else:
with tag('td', bgcolor="Silver"):
text("NA")
def add_psl_html_header(doc, tag, text, filename, psl_points):
"""
Create HTML page header with info about coverage data within list of
PSL points in JSON format.
"""
with tag('table', width='100%', border=0, cellspacing=0, cellpadding=0):
with tag('tr'):
with tag('th', ('class','title')):
with tag('font', size=10):
text("GHDL PSL Functional coverage report")
with tag('table', width='100%', border="1px solid black"):
headers = ["Filename"]
headers.append("Covered")
headers.append("Not-Covered")
headers.append("Functional coverage")
headers.append("Passed")
headers.append("Failed")
headers.append("Assertions passed")
add_html_table_header(doc, tag, text, headers, back_color="Aquamarine")
with tag('td'):
text(filename)
# Calculate results for each type
coverage_types = ["cover", "assertion"]
for coverage_type in coverage_types:
print_cov_cell_percentage(doc, tag, text, psl_points, \
coverage_type, merge_abs_vals=False)
def add_non_colapsed_psl_table_entry(doc, tag, text, psl_point, def_bg_color="White"):
"""
Add HTML table entry for non-collapsed PSL functional coverage point.
"""
with tag('td'):
text(psl_point["name"].split(".")[-1])
with tag('td'):
text(psl_point["test"])
with tag('td', width="50%", style="word-break:break-all;"):
text(dut_top + psl_point["name"])
with tag('td'):
text(psl_point["line"])
with tag('td'):
text(psl_point["count"])
if (psl_point["status"] == "covered" or \
psl_point["status"] == "passed"):
color = "Lime"
else:
color = "red"
with tag('td', ('bgcolor',color)):
text(psl_point["status"])
def add_colapsed_psl_table_entry(doc, tag, text, psl_point, def_bg_color="White"):
"""
Add HTML table entry for collapsed PSL functional coverage point. Adds
llink reference to collapsed entries on separate site.
"""
with tag('td'):
text(psl_point["name"].split(".")[-1])
with tag('td'):
file_name = get_collapsed_file_name(psl_point)
with tag('a', href=file_name+".html"):
text("Open collapsed tests")
with tag('td'):
text(dut_top + psl_point["name"].split(dut_top)[-1])
with tag('td'):
text(psl_point["line"])
with tag('td'):
text(psl_point["count"])
if (psl_point["status"] == "covered" or \
psl_point["status"] == "passed"):
color = "Lime"
else:
color = "red"
with tag('td', ('bgcolor',color)):
text(psl_point["status"])
def add_psl_table_entry(doc, tag, text, psl_point, def_bg_color="White"):
"""
Add PSL point in JSON format to HTML table. For collapsed entries,
overall result is shown and link to collapsed points is inserted.
"""
# Add default entry (single or collapsed)
with tag('tr', ('bgcolor',def_bg_color)):
if ("colapsed_points" in psl_point):
add_colapsed_psl_table_entry(doc, tag, text, psl_point, def_bg_color="White")
else:
add_non_colapsed_psl_table_entry(doc, tag, text, psl_point, def_bg_color="White")
# Create separate page with collapsed PSL points for this PSL statement
# Add unique filename
if ("colapsed_points" in psl_point):
file_name = os.path.join(html_dir, get_collapsed_file_name(psl_point))
create_psl_file_page(file_name, psl_point["colapsed_points"]);
def create_psl_file_page(filename: str, psl_points):
"""
Create HTML file with list of PSL coverage statements.
"""
parsed_file_name = os.path.basename(filename)
html_cov_path = os.path.join(html_dir,
"{}.html".format(parsed_file_name))
doc, tag, text = Doc().tagtext()
# Add Common header
add_psl_html_header(doc, tag, text, parsed_file_name, psl_points)
# Add "Cover" and "Assertion" points
psl_types = [{"name" : "Cover Points" , "type" : "cover"}, \
{"name" : "Assertions" , "type" : "assertion"}]
for psl_type in psl_types:
with tag('p'):
with tag('table', width='100%', border="1px solid black"):
with tag('caption'):
with tag('font', size=5):
text(psl_type["name"])
titles = ["PSL Point Name", "Test name", "Full Path Name", "Line", "Count", "Status"]
add_html_table_header(doc, tag, text, titles, back_color="Peru")
for psl_point in psl_points:
if (psl_point["directive"] == psl_type["type"]):
add_psl_table_entry(doc, tag, text, psl_point)
with open(html_cov_path, 'w', encoding='utf-8') as html_file:
html_file.write(doc.getvalue())
def create_psl_file_refs_table(doc, tag, text, psl_by_files):
"""
Create entries to HTML table for each file. Calculates
coverage summary for each file. Adds Reference to files.
"""
for file_name, psl_list in psl_by_files.items():
with tag('tr'):
with tag('td'):
name = os.path.basename(file_name)
with tag('a', href= os.path.join("html", name + ".html")):
text(name)
coverage_types = ["cover", "assertion"]
for coverage_type in coverage_types:
print_cov_cell_percentage(doc, tag, text, psl_list, \
coverage_type, merge_abs_vals=True)
def create_psl_report(psl_by_files, psl_orig):
"""
Generates PSL report. Each list within psl_by_files has separate
HTML page. Summary page is created from psl_orig
"""
# Create HTML page for each source file
for file_name, psl_list in psl_by_files.items():
create_psl_file_page(file_name, psl_list)
html_rep_path = os.path.join(func_cov_dir, "functional_coverage_report.html")
doc, tag, text = Doc().tagtext()
# Add Common Header
add_psl_html_header(doc, tag, text, "TOP LEVEL", psl_orig)
with tag('p'):
with tag('table', width="100%", border="1px solid black"):
header = ["File name", "Coverage", "Asserts"]
add_html_table_header(doc, tag, text, header, back_color="Peru")
create_psl_file_refs_table(doc, tag, text, psl_by_files)
with open(html_rep_path, 'w', encoding='utf-8') as html_file:
html_file.write(doc.getvalue())
"""
Return color based on coverage result.
"""
if (coverage < 0 or coverage > 100):
raise ValueError("Invalid coverage input should be between 0 - 100 %")
if (coverage > 90):
return "Lime"
elif (coverage > 80):
return "Orange"
elif (coverage > 70):
return "OrangeRed"
else:
return "Red"
def print_cov_cell_percentage(doc, tag, text, psl_points: List[TPslPoint],
coverage_type, merge_abs_vals) -> None:
"""
"""
ok, nok = calc_coverage_results(psl_points, coverage_type)
summ = max(1, ok + nok)
percents = ok/summ * 100
color = calc_coverage_color(percents)
if (merge_abs_vals):
if (ok + nok > 0):
with tag('td', bgcolor=color):
text("({}/{}) {:.1f}%".format(ok, summ, percents))
else:
with tag('td', bgcolor="Silver"):
text("NA")
else:
with tag('td'):
text(ok)
with tag('td'):
text(nok)
if (ok + nok > 0):
with tag('td', bgcolor=color):
text("{:.1f}%".format(percents))
else:
with tag('td', bgcolor="Silver"):
text("NA")
def add_psl_html_header(doc, tag, text, filename, psl_points: List[TPslPoint]):
"""
Create HTML page header with info about coverage data within list of
PSL points in JSON format.
"""
with tag('table', width='100%', border=0, cellspacing=0, cellpadding=0):
with tag('tr'):
with tag('th', ('class','title')):
with tag('font', size=10):
text("GHDL PSL Functional coverage report")
with tag('table', width='100%', border="1px solid black"):
headers = ["Filename"]
headers.append("Covered")
headers.append("Not-Covered")
headers.append("Functional coverage")
headers.append("Passed")
headers.append("Failed")
headers.append("Assertions passed")
add_html_table_header(doc, tag, text, headers, back_color="Aquamarine")
with tag('td'):
text(filename)
# Calculate results for each type
coverage_types = ["cover", "assertion"]
for coverage_type in coverage_types:
print_cov_cell_percentage(doc, tag, text, psl_points, \
coverage_type, merge_abs_vals=False)
def add_non_colapsed_psl_table_entry(doc, tag, text, psl_point: TPslPoint,
def_bg_color="White"):
"""
Add HTML table entry for non-collapsed PSL functional coverage point.
"""
with tag('td'):
text(psl_point["name"].split(".")[-1])
with tag('td'):