From ef519e366c68087d0e9fe8e0a278718854442163 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Fri, 3 Feb 2023 00:25:18 -0700 Subject: [PATCH 01/13] Cache randomization variables and reuse on next call; Minor doc fixes Locally this appears to make randomization calls 25-50% faster for the first couple hundred calls, but falls off as the Boolector instances grow. Noted the 'and' and 'or' non-overloadable operators and an issue with one of the examples calling randomization() using an empty with block. --- doc/source/constraints.rst | 2 +- doc/source/features.rst | 1 + src/vsc/model/randomizer.py | 447 ++++++++++++++++++++++-------------- 3 files changed, 271 insertions(+), 179 deletions(-) diff --git a/doc/source/constraints.rst b/doc/source/constraints.rst index f196169..2e2cea9 100644 --- a/doc/source/constraints.rst +++ b/doc/source/constraints.rst @@ -81,7 +81,7 @@ condition without knowing the details of what that condition is. my_i = my_cls() - with my_i.randomize() + my_i.randomize() with my_i.randomize_with() as it: it.a_small() diff --git a/doc/source/features.rst b/doc/source/features.rst index 82aa415..f969bce 100644 --- a/doc/source/features.rst +++ b/doc/source/features.rst @@ -5,6 +5,7 @@ PyVSC Features Constraint Features +The short-circuit operators, ``and`` and ``or``, in Python can cannot be overloaded by PyVSC and shouldn't be used in constraints. ========================= ====== ============= === =========== Feature PyVSC SystemVerilog PSS Description diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index db15b38..5ae840f 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -20,7 +20,7 @@ # @author: ballance -import random +import copy import sys import time from typing import List, Dict @@ -67,7 +67,9 @@ class Randomizer(RandIF): """Implements the core randomization algorithm""" EN_DEBUG = False - + + randomize_cache = {} + def __init__(self, randstate, debug=0, lint=0, solve_fail_debug=0, solve_info=None): self.randstate = randstate self.pretty_printer = ModelPrettyPrinter() @@ -83,13 +85,16 @@ def __init__(self, randstate, debug=0, lint=0, solve_fail_debug=0, solve_info=No # self.swizzler = SolveGroupSwizzlerRange(solve_info) self.swizzler = SolveGroupSwizzlerPartsel(randstate, solve_info, debug=self.debug) + self.cache = {} + # TODO Reset btor cache after so many uses maybe to release resources? + self.cache_uses = 100 _state_p = [0,1] _rng = None - + def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel]): """Randomize the variables and constraints in a RandInfo collection""" - + if self.solve_info is not None: self.solve_info.n_randsets = len(ri.randsets()) @@ -113,7 +118,7 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] for uf in ri.unconstrained(): print("Unconstrained: " + uf.fullname) - + # Assign values to the unconstrained fields first uc_rand = list(filter(lambda f:f.is_used_rand, ri.unconstrained())) for uf in uc_rand: @@ -140,125 +145,149 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # max_fields = 20 max_fields = 0 while rs_i < len(ri.randsets()): - btor = Boolector() - self.btor = btor - btor.Set_opt(pyboolector.BTOR_OPT_INCREMENTAL, True) - btor.Set_opt(pyboolector.BTOR_OPT_MODEL_GEN, True) + # If missing from cache, initialize/build btor and randset + # TODO What is going on with max_fields? It would break this + # caching setup. + if rs_i not in self.cache: + btor = Boolector() + # self.btor = btor + btor.Set_opt(pyboolector.BTOR_OPT_INCREMENTAL, True) + btor.Set_opt(pyboolector.BTOR_OPT_MODEL_GEN, True) - start_rs_i = rs_i + start_rs_i = rs_i - constraint_l = [] - soft_constraint_l = [] + constraint_l = [] + soft_constraint_l = [] - # Collect up to max_fields fields to randomize at a time - n_fields = 0 - while rs_i < len(ri.randsets()): - rs = ri.randsets()[rs_i] - - rs_node_builder = RandSetNodeBuilder(btor) + # Collect up to max_fields fields to randomize at a time + n_fields = 0 + while rs_i < len(ri.randsets()): + rs = ri.randsets()[rs_i] - all_fields = rs.all_fields() - if self.debug > 0: - print("Pre-Randomize: RandSet[%d]" % rs_i) - for f in all_fields: - if f in bound_m.keys(): - print(" Field: %s is_rand=%s %s var=%s" % (f.fullname, str(f.is_used_rand), str(bound_m[f].domain.range_l), str(f.var))) - else: - print(" Field: %s is_rand=%s (unbounded)" % (f.fullname, str(f.is_used_rand))) - for c in rs.constraints(): - print(" Constraint: " + self.pretty_printer.do_print(c, show_exp=True, print_values=True)) - for c in rs.soft_constraints(): - print(" SoftConstraint: " + self.pretty_printer.do_print(c, show_exp=True, print_values=True)) - - if self.solve_info is not None: - self.solve_info.n_cfields += len(all_fields) + rs_node_builder = RandSetNodeBuilder(btor) - rs_node_builder.build(rs) - n_fields += len(all_fields) - - constraint_l.extend(list(map(lambda c:(c,c.build(btor, False)), rs.constraints()))) - soft_constraint_l.extend(list(map(lambda c:(c,c.build(btor, True)), rs.soft_constraints()))) - - # Sort the list in descending order so we know which constraints - # to prioritize - soft_constraint_l.sort(key=lambda c:c[0].priority, reverse=True) - - rs_i += 1 - if n_fields > max_fields or rs.order != -1: - break - - for c in constraint_l: - try: - btor.Assume(c[1]) - except Exception as e: - print("Exception: " + self.pretty_printer.print(c[0])) - raise e - - if self.solve_info is not None: - self.solve_info.n_sat_calls += 1 - - if btor.Sat() != btor.SAT: - # If the system doesn't solve with hard constraints added, - # then we may as well bail now - active_randsets = [] - for rs in ri.randsets(): - active_randsets.append(rs) - for f in rs.all_fields(): - f.dispose() - - if self.solve_fail_debug > 0: - raise SolveFailure( - "solve failure", - self.create_diagnostics(active_randsets)) - else: - raise SolveFailure( - "solve failure", - "Solve failure: set 'solve_fail_debug=1' for more details") - else: - # Lock down the hard constraints that are confirmed - # to be valid + all_fields = rs.all_fields() + if self.debug > 0: + print("Pre-Randomize: RandSet[%d]" % rs_i) + for f in all_fields: + if f in bound_m.keys(): + print(" Field: %s is_rand=%s %s var=%s" % (f.fullname, str(f.is_used_rand), str(bound_m[f].domain.range_l), str(f.var))) + else: + print(" Field: %s is_rand=%s (unbounded)" % (f.fullname, str(f.is_used_rand))) + for c in rs.constraints(): + print(" Constraint: " + self.pretty_printer.do_print(c, show_exp=True, print_values=True)) + for c in rs.soft_constraints(): + print(" SoftConstraint: " + self.pretty_printer.do_print(c, show_exp=True, print_values=True)) + + if self.solve_info is not None: + self.solve_info.n_cfields += len(all_fields) + + rs_node_builder.build(rs) + n_fields += len(all_fields) + + constraint_l.extend(list(map(lambda c:(c,c.build(btor, False)), rs.constraints()))) + soft_constraint_l.extend(list(map(lambda c:(c,c.build(btor, True)), rs.soft_constraints()))) + + # Sort the list in descending order so we know which constraints + # to prioritize + soft_constraint_l.sort(key=lambda c:c[0].priority, reverse=True) + + # TODO: Is this part of a disabled feature to solve randsets together? + # rs_i += 1 + if n_fields > max_fields or rs.order != -1: + break + for c in constraint_l: - btor.Assert(c[1]) - - # If there are soft constraints, add these now - if len(soft_constraint_l) > 0: - for c in soft_constraint_l: try: btor.Assume(c[1]) except Exception as e: - from ..visitors.model_pretty_printer import ModelPrettyPrinter - print("Exception: " + ModelPrettyPrinter.print(c[0])) + print("Exception: " + self.pretty_printer.print(c[0])) raise e if self.solve_info is not None: - self.solve_info.n_sat_calls += 1 + self.solve_info.n_sat_calls += 1 + if btor.Sat() != btor.SAT: - # All the soft constraints cannot be satisfied. We'll need to - # add them incrementally - if self.debug > 0: - print("Note: some of the %d soft constraints could not be satisfied" % len(soft_constraint_l)) - - for c in soft_constraint_l: - btor.Assume(c[1]) - - if self.solve_info is not None: - self.solve_info.n_sat_calls += 1 - if btor.Sat() == btor.SAT: - if self.debug > 0: - print("Note: soft constraint %s (%d) passed" % ( - self.pretty_printer.print(c[0]), c[0].priority)) - btor.Assert(c[1]) - else: - if self.debug > 0: - print("Note: soft constraint %s (%d) failed" % ( - self.pretty_printer.print(c[0]), c[0].priority)) + # If the system doesn't solve with hard constraints added, + # then we may as well bail now + active_randsets = [] + for rs in ri.randsets(): + active_randsets.append(rs) + for f in rs.all_fields(): + f.dispose() + + if self.solve_fail_debug > 0: + raise SolveFailure( + "solve failure", + self.create_diagnostics(active_randsets)) + else: + raise SolveFailure( + "solve failure", + "Solve failure: set 'solve_fail_debug=1' for more details") else: - # All the soft constraints could be satisfied. Assert them now - if self.debug > 0: - print("Note: all %d soft constraints could be satisfied" % len(soft_constraint_l)) - for c in soft_constraint_l: + # Lock down the hard constraints that are confirmed + # to be valid + for c in constraint_l: btor.Assert(c[1]) - + + # If there are soft constraints, add these now + if len(soft_constraint_l) > 0: + for c in soft_constraint_l: + try: + btor.Assume(c[1]) + except Exception as e: + from ..visitors.model_pretty_printer import ModelPrettyPrinter + print("Exception: " + ModelPrettyPrinter.print(c[0])) + raise e + + if self.solve_info is not None: + self.solve_info.n_sat_calls += 1 + if btor.Sat() != btor.SAT: + # All the soft constraints cannot be satisfied. We'll need to + # add them incrementally + if self.debug > 0: + print("Note: some of the %d soft constraints could not be satisfied" % len(soft_constraint_l)) + + for c in soft_constraint_l: + btor.Assume(c[1]) + + if self.solve_info is not None: + self.solve_info.n_sat_calls += 1 + if btor.Sat() == btor.SAT: + if self.debug > 0: + print("Note: soft constraint %s (%d) passed" % ( + self.pretty_printer.print(c[0]), c[0].priority)) + btor.Assert(c[1]) + else: + if self.debug > 0: + print("Note: soft constraint %s (%d) failed" % ( + self.pretty_printer.print(c[0]), c[0].priority)) + else: + # All the soft constraints could be satisfied. Assert them now + if self.debug > 0: + print("Note: all %d soft constraints could be satisfied" % len(soft_constraint_l)) + for c in soft_constraint_l: + btor.Assert(c[1]) + + # Changes made to the randset are covered by the randomization_cache + # Cache btor reference for use later + # TODO Use @dataclass instead of a dict of str + self.cache[rs_i] = {} + self.cache[rs_i]['btor'] = btor + else: + # Setup some necessary variables + start_rs_i = rs_i + rs = ri.randsets()[rs_i] + btor = self.cache[rs_i]['btor'] + # self.btor = btor + all_fields = rs.all_fields() + + rs_i += 1 + + # TODO Boolector Push()/Pop() do not release orphan expressions/nodes and + # cause Sat() calls to slow down and leak memory. Add release feature to PyBoolector? + btor.Push() + # btor.Sat() x = start_rs_i while x < rs_i: @@ -270,19 +299,24 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # Finalize the value of the field x = start_rs_i - reset_v = DynamicExprResetVisitor() + # TODO What cleanup do we need if caching? + # reset_v = DynamicExprResetVisitor() while x < rs_i: rs = ri.randsets()[x] for f in rs.all_fields(): + f: FieldScalarModel = f f.post_randomize() - f.set_used_rand(False, 0) - f.dispose() # Get rid of the solver var, since we're done with it - f.accept(reset_v) + # TODO What does this all do? Do we need to clean up if we're going to reuse this agian? + # f.set_used_rand(False, 0) + # f.dispose() # Get rid of the solver var, since we're done with it + # f.accept(reset_v) # for f in rs.nontarget_field_s: # f.dispose() - for c in rs.constraints(): - c.accept(reset_v) - RandSetDisposeVisitor().dispose(rs) + # TODO What does this all do? Do we need to clean up if we're going to reuse this agian? + # for c in rs.constraints(): + # c.accept(reset_v) + # TODO What does this all do? Do we need to clean up if we're going to reuse this agian? + # RandSetDisposeVisitor().dispose(rs) if self.debug > 0: print("Post-Randomize: RandSet[%d]" % x) @@ -298,7 +332,9 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] print(" SoftConstraint: " + self.pretty_printer.do_print(c, show_exp=True, print_values=True)) x += 1 - + + # TODO Cleanup boolector state. See Push() comment above. + btor.Pop() end = int(round(time.time() * 1000)) @@ -524,74 +560,129 @@ def do_randomize( randomize_start(srcinfo, field_model_l, constraint_l) else: solve_info = None - - clear_soft_priority = ClearSoftPriorityVisitor() - - for f in field_model_l: - f.set_used_rand(True, 0) - clear_soft_priority.clear(f) - - if debug > 0: - print("Initial Model:") - for fm in field_model_l: - print(" " + ModelPrettyPrinter.print(fm)) - - # First, invoke pre_randomize on all elements + + # Create a unique string for the cache dict key + # TODO filename and lineno probably aren't necessary + # TODO Is there a better way to do this? Cache this by object ref in caller instead? + call_hash = f'{srcinfo.filename}:{str(srcinfo.lineno)}\n' for fm in field_model_l: - fm.pre_randomize() - - if constraint_l is None: - constraint_l = [] - - for c in constraint_l: - clear_soft_priority.clear(c) + call_hash += ModelPrettyPrinter.print(fm) + if constraint_l is not None: + for c in constraint_l: + call_hash += ModelPrettyPrinter.print(c, show_exp=True) - # Collect all variables (pre-array) and establish bounds - bounds_v = VariableBoundVisitor() - bounds_v.process(field_model_l, constraint_l, False) + if call_hash in Randomizer.randomize_cache: + bounds_v = Randomizer.randomize_cache[call_hash]['bounds_v'] + ri = Randomizer.randomize_cache[call_hash]['ri'] + r = Randomizer.randomize_cache[call_hash]['r'] + constraint_l = Randomizer.randomize_cache[call_hash]['constraints_l'] - # TODO: need to handle inline constraints that impact arrays - constraints_len = len(constraint_l) - for fm in field_model_l: - constraint_l.extend(ArrayConstraintBuilder.build( - fm, bounds_v.bound_m)) - # Now, handle dist constraints - DistConstraintBuilder.build(randstate, fm) - - for c in constraint_l: - constraint_l.extend(ArrayConstraintBuilder.build( - c, bounds_v.bound_m)) - # Now, handle dist constraints - DistConstraintBuilder.build(randstate, c) - - # If we made changes during array remodeling, - # re-run bounds checking on the updated model -# if len(constraint_l) != constraints_len: - bounds_v.process(field_model_l, constraint_l) - - if debug > 0: - print("Final Model:") + clear_soft_priority = ClearSoftPriorityVisitor() + + # Reset cached field_model_l vars to be rand again + for f in field_model_l: + f.set_used_rand(True, 0) + clear_soft_priority.clear(f) + + # First, invoke pre_randomize on all elements for fm in field_model_l: - print(" " + ModelPrettyPrinter.print(fm)) - for c in constraint_l: - print(" " + ModelPrettyPrinter.print(c, show_exp=True)) + fm.pre_randomize() -# if lint > 0: -# LintVisitor().lint( -# field_model_l, -# constraint_l) - + Randomizer.fast_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri) - r = Randomizer( - randstate, - solve_info=solve_info, - debug=debug, - lint=lint, - solve_fail_debug=solve_fail_debug) -# if Randomizer._rng is None: -# Randomizer._rng = random.Random(random.randrange(sys.maxsize)) - ri = RandInfoBuilder.build(field_model_l, constraint_l, Randomizer._rng) + else: + # Make copy of field and constraint models + field_model_l_copy = copy.deepcopy(field_model_l) + # Keep field_model_l copy unique except for the val references so + # that __getattr__ or however value lookup works still work + # TODO Is there a better way to pass on a clone/copy? + for fc, f in zip(field_model_l_copy, field_model_l): + for vc, v in zip(fc.field_l, f.field_l): + vc.val = v.val + field_model_l = field_model_l_copy + if constraint_l is not None: + # print('constraint_l', dill.detect.baditems(constraint_l)) + constraint_l = copy.deepcopy(constraint_l) + + clear_soft_priority = ClearSoftPriorityVisitor() + for f in field_model_l: + f.set_used_rand(True, 0) + clear_soft_priority.clear(f) + + if debug > 0: + print("Initial Model:") + for fm in field_model_l: + print(" " + ModelPrettyPrinter.print(fm)) + + # First, invoke pre_randomize on all elements + for fm in field_model_l: + fm.pre_randomize() + + if constraint_l is None: + constraint_l = [] + + for c in constraint_l: + clear_soft_priority.clear(c) + + # Collect all variables (pre-array) and establish bounds + bounds_v = VariableBoundVisitor() + bounds_v.process(field_model_l, constraint_l, False) + + # TODO: need to handle inline constraints that impact arrays + constraints_len = len(constraint_l) + for fm in field_model_l: + constraint_l.extend(ArrayConstraintBuilder.build( + fm, bounds_v.bound_m)) + # Now, handle dist constraints + DistConstraintBuilder.build(randstate, fm) + + for c in constraint_l: + constraint_l.extend(ArrayConstraintBuilder.build( + c, bounds_v.bound_m)) + # Now, handle dist constraints + DistConstraintBuilder.build(randstate, c) + + # If we made changes during array remodeling, + # re-run bounds checking on the updated model +# if len(constraint_l) != constraints_len: + bounds_v.process(field_model_l, constraint_l) + + if debug > 0: + print("Final Model:") + for fm in field_model_l: + print(" " + ModelPrettyPrinter.print(fm)) + for c in constraint_l: + print(" " + ModelPrettyPrinter.print(c, show_exp=True)) + +# if lint > 0: +# LintVisitor().lint( +# field_model_l, +# constraint_l) + + r = Randomizer( + randstate, + solve_info=solve_info, + debug=debug, + lint=lint, + solve_fail_debug=solve_fail_debug) +# if Randomizer._rng is None: +# Randomizer._rng = random.Random(random.randrange(sys.maxsize)) + ri = RandInfoBuilder.build(field_model_l, constraint_l, Randomizer._rng) + + # TODO Unecessary function refactor? + Randomizer.fast_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri) + + # Cache all interesting variables for later + # TODO Use @dataclass instead of a dict of str + Randomizer.randomize_cache[call_hash] = {} + Randomizer.randomize_cache[call_hash]['bounds_v'] = bounds_v + Randomizer.randomize_cache[call_hash]['ri'] = ri + Randomizer.randomize_cache[call_hash]['r'] = r + Randomizer.randomize_cache[call_hash]['constraints_l'] = constraint_l + + @staticmethod + def fast_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri): try: r.randomize(ri, bounds_v.bound_m) finally: From 5819bb9932e320e85d6698e6cc6d192213d97ee3 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Fri, 3 Feb 2023 22:27:53 -0700 Subject: [PATCH 02/13] Drop randomization cache key to mitigate Boolector model slowdown Around 100 uses seemed to be the sweet spot for the constraints I was testing, but that probably depends on how many Assume() and Sat() calls each randomize call results in. --- src/vsc/model/randomizer.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index 5ae840f..b5ba718 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -86,7 +86,8 @@ def __init__(self, randstate, debug=0, lint=0, solve_fail_debug=0, solve_info=No # self.swizzler = SolveGroupSwizzlerRange(solve_info) self.swizzler = SolveGroupSwizzlerPartsel(randstate, solve_info, debug=self.debug) self.cache = {} - # TODO Reset btor cache after so many uses maybe to release resources? + # TODO Reset btor cache after so many uses to circumvent Boolector instance + # slowing down as more expressions permanently become part of the model/formula. self.cache_uses = 100 _state_p = [0,1] @@ -571,6 +572,12 @@ def do_randomize( for c in constraint_l: call_hash += ModelPrettyPrinter.print(c, show_exp=True) + if call_hash in Randomizer.randomize_cache: + r = Randomizer.randomize_cache[call_hash]['r'] + r.cache_uses -= 1 + if r.cache_uses <= 0: + del Randomizer.randomize_cache[call_hash] + if call_hash in Randomizer.randomize_cache: bounds_v = Randomizer.randomize_cache[call_hash]['bounds_v'] ri = Randomizer.randomize_cache[call_hash]['ri'] From f69d692fdf2d795c4e05791069520c9675db0028 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Sat, 4 Feb 2023 19:32:20 -0700 Subject: [PATCH 03/13] Fix randomization caching on with constraints Performing deepcopy independently on fields and with constraints resulted in different FieldScalarModels which create separate and duplicate btor vars for the same intended var. This deepcopies them together and expands the FieldScalarModel deepcopy to keep val as a reference so that value lookups continue to work. --- src/vsc/model/field_scalar_model.py | 15 ++++++++++++++- src/vsc/model/randomizer.py | 15 +++------------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/vsc/model/field_scalar_model.py b/src/vsc/model/field_scalar_model.py index 0e0c423..ca996d4 100644 --- a/src/vsc/model/field_scalar_model.py +++ b/src/vsc/model/field_scalar_model.py @@ -19,6 +19,7 @@ # # @author: ballance +from copy import deepcopy from vsc.model.field_model import FieldModel from vsc.model.rand_gen_data import RandGenData @@ -44,7 +45,19 @@ def __init__(self, self.rand_if = rand_if self.var = None self.val = ValueScalar(0) - + + def __deepcopy__(self, memo): + cls = self.__class__ + result = cls.__new__(cls) + memo[id(self)] = result + for k, v in self.__dict__.items(): + setattr(result, k, deepcopy(v, memo)) + + # NOTE Deepcopy everything except val, keep it as reference so top level can get value + result.val = self.val + + return result + def set_used_rand(self, is_rand, level=0): # Field is considered rand when # - It is a root field, on which 'randomize' is called diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index b5ba718..ee3154d 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -598,18 +598,9 @@ def do_randomize( Randomizer.fast_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri) else: - # Make copy of field and constraint models - field_model_l_copy = copy.deepcopy(field_model_l) - # Keep field_model_l copy unique except for the val references so - # that __getattr__ or however value lookup works still work - # TODO Is there a better way to pass on a clone/copy? - for fc, f in zip(field_model_l_copy, field_model_l): - for vc, v in zip(fc.field_l, f.field_l): - vc.val = v.val - field_model_l = field_model_l_copy - if constraint_l is not None: - # print('constraint_l', dill.detect.baditems(constraint_l)) - constraint_l = copy.deepcopy(constraint_l) + # Make copy of field and constraint models, together to keep FieldScalarModels the same + # TODO The deepcopy() in FieldScalarModel keeps the val reference, is that the best way? + (field_model_l, constraint_l) = copy.deepcopy((field_model_l, constraint_l)) clear_soft_priority = ClearSoftPriorityVisitor() From e1eb9084385a95a32ee9c061363301cbcbf3d948 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Sun, 5 Feb 2023 20:28:19 -0700 Subject: [PATCH 04/13] Modify call_hash to use obj ids and mode bits; Cache inspect frame The pretty print of the fields and constraints to use as a key in the randomization cache is expensive. Since each call has unique objects for the fields and constraints we can use those instead. This also grabs the obj id of the with constraint (block?) and the expressions/constraints within. Saving the inspect.stack() frame in the randobj_interposer causes deepcopy/pickle issues in the Randomization call. I'm not entirely sure why yet, but saving the filename str and lineno int in the SourceInfo and saving that to reuse later instead seems safer and avoids the fairly large penalty I saw in py-spy. --- src/vsc/model/randomizer.py | 25 ++++++++++++++++++------- src/vsc/rand_obj.py | 10 ++++++++-- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index ee3154d..aa446ad 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -562,15 +562,26 @@ def do_randomize( else: solve_info = None - # Create a unique string for the cache dict key - # TODO filename and lineno probably aren't necessary - # TODO Is there a better way to do this? Cache this by object ref in caller instead? - call_hash = f'{srcinfo.filename}:{str(srcinfo.lineno)}\n' + # Create a unique string for this call based on object ids and mode bits + # Is there more than rand_mode and constraint_mode to cover here? + # TODO Can we cache the base constraints so that with constraints have a prebuilt + # model and such to build off of? + call_hash = '' for fm in field_model_l: - call_hash += ModelPrettyPrinter.print(fm) + # Each field and its rand_mode + for f in fm.field_l: + call_hash += f'{hex(id(f))}-{f.fullname}-{f.rand_mode=}\n' + # Each constraint block and whether it's enabled + for cm in fm.constraint_model_l: + call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' if constraint_l is not None: - for c in constraint_l: - call_hash += ModelPrettyPrinter.print(c, show_exp=True) + # Each with constraint(block?) and its expressions + # TODO Is this missing anything? Dynamic expressions? Too aggressive? + for cm in constraint_l: + call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' + for c in cm.constraint_l: + call_hash += f'{hex(id(c))}\n' + call_hash = call_hash if call_hash in Randomizer.randomize_cache: r = Randomizer.randomize_cache[call_hash]['r'] diff --git a/src/vsc/rand_obj.py b/src/vsc/rand_obj.py index 04d92db..b428071 100644 --- a/src/vsc/rand_obj.py +++ b/src/vsc/rand_obj.py @@ -58,6 +58,10 @@ def __call__(self, T): class randobj_interposer(T): def __init__(self, *args, **kwargs): + # Used to cache data from inspect.stack() since that is + # an expensive call. Saving the frame itself isn't pickle-safe. + self.src_info = None + ro_i = self._get_ro_int() ro_i.srcinfo = srcinfo @@ -155,13 +159,15 @@ def randomize(self, lint=0, solve_fail_debug=0): ro_int = self._get_ro_int() - frame = inspect.stack()[1] + if self.src_info == None: + frame = inspect.stack()[1] + self.src_info = SourceInfo(frame.filename, frame.lineno) model = self.get_model() try: Randomizer.do_randomize( ro_int.get_randstate(), - SourceInfo(frame.filename, frame.lineno), + self.src_info, [model], debug=debug, lint=lint, From ba1e85cf8c3d17e384a8124d80120a8144320efa Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Sun, 5 Feb 2023 21:39:19 -0700 Subject: [PATCH 05/13] Fix re-randomizing unconstrained variables Usually unconstrained variables get marked as having been randomized, but since caching reuses them we don't want to mark them. Or at least this fixes that, but leaving a TODO to check back. --- src/vsc/model/randomizer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index aa446ad..2f7bc98 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -137,9 +137,10 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # TODO: are there any cases where these could be ranges? idx = self.randstate.randint(0, len(range_l)-1) uf.set_val(range_l[idx][0]) - + + # TODO We want to re-randomize unconstrained variables when caching. Do we need to lock? # Lock so we don't overwrite - uf.set_used_rand(False) + # uf.set_used_rand(False) rs_i = 0 start_rs_i = 0 From 01800dc4b6d9175db15ba09208103d35b9886487 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Wed, 8 Feb 2023 14:10:07 -0700 Subject: [PATCH 06/13] Fix FieldArrayModel accesses; Dist constraint caching; Misc cleanup Mulitple HACKs in this that need to be reconsidered, but this passes all tests except the covergroup ones and the nested / sub constraint test. --- src/vsc/model/field_array_model.py | 12 +- src/vsc/model/randomizer.py | 225 +++++++++++++++++++++++------ src/vsc/rand_obj.py | 6 +- src/vsc/types.py | 14 +- ve/unit/test_constraint_dist.py | 2 +- 5 files changed, 201 insertions(+), 58 deletions(-) diff --git a/src/vsc/model/field_array_model.py b/src/vsc/model/field_array_model.py index 3388f83..cb10292 100644 --- a/src/vsc/model/field_array_model.py +++ b/src/vsc/model/field_array_model.py @@ -48,7 +48,17 @@ def __init__(self, is_rand_sz) self.size.parent = self self._set_size(0) - + + # Update this field at the end of each randomization + # TODO Remove this field from the deepcopy? + self.latest_field_l = None + + def get_field_l(self): + """Retrieve the latest field_l""" + if self.latest_field_l is not None: + return self.latest_field_l + return self.field_l + def append(self, fm): super().add_field(fm) self._set_size(len(self.field_l)) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index 2f7bc98..40ec3c6 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -21,6 +21,7 @@ import copy +from dataclasses import dataclass import sys import time from typing import List, Dict @@ -30,6 +31,8 @@ from vsc.constraints import constraint, soft from vsc.model.bin_expr_type import BinExprType from vsc.model.constraint_model import ConstraintModel +from vsc.model.constraint_override_model import ConstraintOverrideModel +from vsc.model.constraint_foreach_model import ConstraintForeachModel from vsc.model.constraint_soft_model import ConstraintSoftModel from vsc.model.expr_bin_model import ExprBinModel from vsc.model.expr_fieldref_model import ExprFieldRefModel @@ -62,7 +65,6 @@ from vsc.model.solvegroup_swizzler_partsel import SolveGroupSwizzlerPartsel from vsc.impl.ctor import glbl_debug, glbl_solvefail_debug - class Randomizer(RandIF): """Implements the core randomization algorithm""" @@ -70,6 +72,11 @@ class Randomizer(RandIF): randomize_cache = {} + # HACK This is meant to keep constraint object IDs unique by preventing garbage collection + # and cycling IDs. Generating unique IDs regardless of lifetimes or better hashing that + # explores the entire constraint might be better. + constraint_keep = [] + def __init__(self, randstate, debug=0, lint=0, solve_fail_debug=0, solve_info=None): self.randstate = randstate self.pretty_printer = ModelPrettyPrinter() @@ -85,10 +92,10 @@ def __init__(self, randstate, debug=0, lint=0, solve_fail_debug=0, solve_info=No # self.swizzler = SolveGroupSwizzlerRange(solve_info) self.swizzler = SolveGroupSwizzlerPartsel(randstate, solve_info, debug=self.debug) - self.cache = {} + self.btor_cache = {} # TODO Reset btor cache after so many uses to circumvent Boolector instance # slowing down as more expressions permanently become part of the model/formula. - self.cache_uses = 100 + self.btor_cache_uses = 100 _state_p = [0,1] _rng = None @@ -150,8 +157,9 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # If missing from cache, initialize/build btor and randset # TODO What is going on with max_fields? It would break this # caching setup. - if rs_i not in self.cache: + if rs_i not in self.btor_cache: btor = Boolector() + # TODO Is self.btor used anywhere? # self.btor = btor btor.Set_opt(pyboolector.BTOR_OPT_INCREMENTAL, True) btor.Set_opt(pyboolector.BTOR_OPT_MODEL_GEN, True) @@ -273,14 +281,13 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # Changes made to the randset are covered by the randomization_cache # Cache btor reference for use later - # TODO Use @dataclass instead of a dict of str - self.cache[rs_i] = {} - self.cache[rs_i]['btor'] = btor + self.btor_cache[rs_i] = btor else: # Setup some necessary variables start_rs_i = rs_i rs = ri.randsets()[rs_i] - btor = self.cache[rs_i]['btor'] + btor = self.btor_cache[rs_i] + # TODO Is self.btor used anywhere? # self.btor = btor all_fields = rs.all_fields() @@ -298,7 +305,7 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] ri.randsets()[x], bound_m) x += 1 - + # Finalize the value of the field x = start_rs_i # TODO What cleanup do we need if caching? @@ -563,52 +570,58 @@ def do_randomize( else: solve_info = None + if constraint_l is None: + constraint_l = [] + + # HACK Fill out field_l in FieldArrayModels so that look ups work + # This breaks deepcopy since it'll now have deepcopy references... + for fm in field_model_l: + if hasattr(fm, 'field_l'): + for f in fm.field_l: + if hasattr(f, 'field_l'): + f.latest_field_l = None + + # Generate dist constraints early for generating call hash + (field_model_l_og, constraint_l_og) = (field_model_l, constraint_l) # copy.deepcopy((field_model_l, constraint_l)) + randstate_copy = copy.deepcopy(randstate) + for fm in field_model_l_og: + DistConstraintBuilder.build(randstate_copy, fm) + for c in constraint_l_og: + DistConstraintBuilder.build(randstate_copy, c) + # Create a unique string for this call based on object ids and mode bits # Is there more than rand_mode and constraint_mode to cover here? # TODO Can we cache the base constraints so that with constraints have a prebuilt # model and such to build off of? - call_hash = '' - for fm in field_model_l: - # Each field and its rand_mode - for f in fm.field_l: - call_hash += f'{hex(id(f))}-{f.fullname}-{f.rand_mode=}\n' - # Each constraint block and whether it's enabled - for cm in fm.constraint_model_l: - call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' - if constraint_l is not None: - # Each with constraint(block?) and its expressions - # TODO Is this missing anything? Dynamic expressions? Too aggressive? - for cm in constraint_l: - call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' - for c in cm.constraint_l: - call_hash += f'{hex(id(c))}\n' - call_hash = call_hash + # call_hash = Randomizer.get_id_call_hash(field_model_l, constraint_l, field_model_l_copy, constraint_l_copy) + call_hash = Randomizer.get_pretty_call_hash(randstate, field_model_l_og, constraint_l_og) + + for fm in field_model_l_og: + ConstraintOverrideRollbackVisitor.rollback(fm) if call_hash in Randomizer.randomize_cache: - r = Randomizer.randomize_cache[call_hash]['r'] - r.cache_uses -= 1 - if r.cache_uses <= 0: + cache = Randomizer.randomize_cache[call_hash] + cache.r.btor_cache_uses -= 1 + if cache.r.btor_cache_uses <= 0: del Randomizer.randomize_cache[call_hash] if call_hash in Randomizer.randomize_cache: - bounds_v = Randomizer.randomize_cache[call_hash]['bounds_v'] - ri = Randomizer.randomize_cache[call_hash]['ri'] - r = Randomizer.randomize_cache[call_hash]['r'] - constraint_l = Randomizer.randomize_cache[call_hash]['constraints_l'] + cache = Randomizer.randomize_cache[call_hash] clear_soft_priority = ClearSoftPriorityVisitor() # Reset cached field_model_l vars to be rand again - for f in field_model_l: + # TODO This is untested. Are there deepcopy issues here? + for f in cache.field_model_l: f.set_used_rand(True, 0) clear_soft_priority.clear(f) # First, invoke pre_randomize on all elements - for fm in field_model_l: + # TODO This is untested. What happens to pre_randomize() on a deepcopy? + for fm in cache.field_model_l: fm.pre_randomize() - Randomizer.fast_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri) - + Randomizer.try_randomize(srcinfo, cache.field_model_l, solve_info, cache.bounds_v, cache.r, cache.ri) else: # Make copy of field and constraint models, together to keep FieldScalarModels the same # TODO The deepcopy() in FieldScalarModel keeps the val reference, is that the best way? @@ -629,9 +642,6 @@ def do_randomize( for fm in field_model_l: fm.pre_randomize() - if constraint_l is None: - constraint_l = [] - for c in constraint_l: clear_soft_priority.clear(c) @@ -641,16 +651,19 @@ def do_randomize( # TODO: need to handle inline constraints that impact arrays constraints_len = len(constraint_l) + # TODO dist are handled as soft constraints that caching doesn't recalculate... for fm in field_model_l: constraint_l.extend(ArrayConstraintBuilder.build( fm, bounds_v.bound_m)) # Now, handle dist constraints + # TODO Does this depend on the ArrayConstraintBuilder above? DistConstraintBuilder.build(randstate, fm) for c in constraint_l: constraint_l.extend(ArrayConstraintBuilder.build( c, bounds_v.bound_m)) # Now, handle dist constraints + # TODO Does this depend on the ArrayConstraintBuilder above? DistConstraintBuilder.build(randstate, c) # If we made changes during array remodeling, @@ -681,18 +694,127 @@ def do_randomize( ri = RandInfoBuilder.build(field_model_l, constraint_l, Randomizer._rng) # TODO Unecessary function refactor? - Randomizer.fast_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri) + Randomizer.try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri) # Cache all interesting variables for later - # TODO Use @dataclass instead of a dict of str - Randomizer.randomize_cache[call_hash] = {} - Randomizer.randomize_cache[call_hash]['bounds_v'] = bounds_v - Randomizer.randomize_cache[call_hash]['ri'] = ri - Randomizer.randomize_cache[call_hash]['r'] = r - Randomizer.randomize_cache[call_hash]['constraints_l'] = constraint_l + Randomizer.randomize_cache[call_hash] = rand_cache_entry(bounds_v, ri, r, field_model_l, constraint_l) + + # HACK Fill out field_l in FieldArrayModels so that look ups work + # This breaks deepcopy since it'll now have deepcopy references... + field_model_l = Randomizer.randomize_cache[call_hash].field_model_l + for fm_new, fm_og in zip(field_model_l, field_model_l_og): + if hasattr(fm_og, 'field_l'): + for f_new, f_og in zip(fm_new.field_l, fm_og.field_l): + if hasattr(f_og, 'field_l'): + f_og.latest_field_l = f_new.field_l + @staticmethod - def fast_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri): + def get_pretty_call_hash(randstate, field_model_l, constraint_l): + call_hash = '' + call_hash += f'{hex(id(randstate))}\n' + if field_model_l is not None: + for fm in field_model_l: + # TODO This pretty print is an expensive call. Need a better way + # to construct a unique ID/hash that doesn't depend on + # object lifetimes + call_hash += ModelPrettyPrinter.print(fm) + # Each constraint block and whether it's enabled + if hasattr(fm, 'field_l'): + for f in fm.field_l: + call_hash += f'{f.fullname}-{f.rand_mode=}\n' + # Each constraint block and whether it's enabled + if hasattr(fm, 'constraint_model_l'): + for cm in fm.constraint_model_l: + call_hash += f'{cm.name}-{cm.enabled=}\n' + if hasattr(fm, 'constraint_model_l'): + for cm in fm.constraint_model_l: + # TODO dist constraint hack + # T + for c in cm.constraint_l: + if isinstance(c, ConstraintOverrideModel): + # dist_value = c.new_constraint.constraint_l[-1].expr.rhs.val().toString() + # call_hash += f'{cm.name}-{dist_value=}\n' + call_hash += f'{hex(id(c.new_constraint))}\n' + if isinstance(c, ConstraintForeachModel): + for fe_c in c.constraint_l: + if isinstance(fe_c, ConstraintOverrideModel): + call_hash += f'{hex(id(fe_c.new_constraint))}\n' + + if constraint_l is not None: + # Each with constraint(block?) and its expressions + # TODO Is this missing anything? Dynamic expressions? Too aggressive? + for cm in constraint_l: + call_hash += ModelPrettyPrinter.print(cm) + # HACK Place with constraints inside list forever to prevent obj ID reuse + Randomizer.constraint_keep.append(cm) + call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' + for c in cm.constraint_l: + call_hash += f'{hex(id(c))}\n' + for c in cm.constraint_l: + # TODO dist constraint hack + if isinstance(c, ConstraintOverrideModel): + # dist_value = c.new_constraint.constraint_l[-1].expr.rhs.val().toString() + # call_hash += f'{c.name}-{dist_value=}\n' + call_hash += f'{hex(id(c.new_constraint))}\n' + if isinstance(c, ConstraintForeachModel): + for fe_c in c.constraint_l: + if isinstance(fe_c, ConstraintOverrideModel): + call_hash += f'{hex(id(fe_c.new_constraint))}\n' + return call_hash + + @staticmethod + def get_id_call_hash(field_model_l, constraint_l, fml_copy, cl_copy): + call_hash = '' + if field_model_l is not None: + for fm in field_model_l: + # Each field and its rand_mode + # TODO call_hash += f'{hex(id(fm))}-{fm.fullname}\n' + if hasattr(fm, 'field_l'): + for f in fm.field_l: + call_hash += f'{hex(id(f))}-{f.fullname}-{f.rand_mode=}\n' + # Each constraint block and whether it's enabled + if hasattr(fm, 'constraint_model_l'): + for cm in fm.constraint_model_l: + call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' + for fm in fml_copy: + # Each constraint block and whether it's enabled + if hasattr(fm, 'constraint_model_l'): + for cm in fm.constraint_model_l: + # TODO dist constraint hack + for c in cm.constraint_l: + if isinstance(c, ConstraintOverrideModel): + # dist_value = c.new_constraint.constraint_l[-1].expr.rhs.val().toString() + # call_hash += f'{cm.name}-{dist_value=}\n' + call_hash += f'{hex(id(c.new_constraint))}\n' + if isinstance(c, ConstraintForeachModel): + for fe_c in c.constraint_l: + if isinstance(fe_c, ConstraintOverrideModel): + call_hash += f'{hex(id(fe_c.new_constraint))}\n' + if constraint_l is not None: + # Each with constraint(block?) and its expressions + # TODO Is this missing anything? Dynamic expressions? Too aggressive? + for cm in constraint_l: + Randomizer.constraint_keep.append(cm) + call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' + for c in cm.constraint_l: + call_hash += f'{hex(id(c))}\n' + + for cm in cl_copy: + for c in cm.constraint_l: + # TODO dist constraint hack + if isinstance(c, ConstraintOverrideModel): + # dist_value = c.new_constraint.constraint_l[-1].expr.rhs.val().toString() + # call_hash += f'{c.name}-{dist_value=}\n' + call_hash += f'{hex(id(c.new_constraint))}\n' + if isinstance(c, ConstraintForeachModel): + for fe_c in c.constraint_l: + if isinstance(fe_c, ConstraintOverrideModel): + call_hash += f'{hex(id(fe_c.new_constraint))}\n' + return call_hash + + @staticmethod + def try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri): try: r.randomize(ri, bounds_v.bound_m) finally: @@ -708,4 +830,11 @@ def fast_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri): # Process constraints to identify variable/constraint sets - + +@dataclass +class rand_cache_entry: + bounds_v: VariableBoundVisitor + ri: RandInfo + r: Randomizer + field_model_l: list + constraint_l: list \ No newline at end of file diff --git a/src/vsc/rand_obj.py b/src/vsc/rand_obj.py index b428071..d16784e 100644 --- a/src/vsc/rand_obj.py +++ b/src/vsc/rand_obj.py @@ -85,7 +85,11 @@ def __init__(self, *args, **kwargs): if ro_i.ctor_level == 0: self.build_field_model(None) pop_srcinfo_mode() - + + # Prevent parent references from deepcopying + def __deepcopy__(self, memo): + return self + # Add the interposer class ret = type(T.__name__, (randobj_interposer,), dict()) diff --git a/src/vsc/types.py b/src/vsc/types.py index 4a72b80..a75e5f6 100644 --- a/src/vsc/types.py +++ b/src/vsc/types.py @@ -964,11 +964,11 @@ def __contains__(self, lhs): if self.is_enum: ei : EnumInfo = self.t.enum_i val = ei.e2v(lhs) - for f in model.field_l: + for f in model.get_field_l(): if int(f.get_val()) == val: return True elif self.is_scalar: - for f in model.field_l: + for f in model.get_field_l(): if int(f.get_val()) == int(lhs): return True else: @@ -987,7 +987,7 @@ def __next__(self): raise StopIteration() else: # The model's view is always masked 2's complement - v = int(self.model.field_l[self.idx].get_val()) + v = int(self.model.get_field_l()[self.idx].get_val()) if self.l.t.is_signed: if (v & (1 << (self.l.t.width-1))) != 0: @@ -1041,10 +1041,10 @@ def __getitem__(self, k): else: if self.is_enum: ei : EnumInfo = self.t.enum_i - return ei.v2e(model.field_l[k].get_val()) + return ei.v2e(model.get_field_l()[k].get_val()) elif self.is_scalar: # The model's view is always masked 2's complement - v = int(model.field_l[k].get_val()) + v = int(model.get_field_l()[k].get_val()) if self.t.is_signed: if (v & (1 << (self.t.width-1))) != 0: @@ -1073,12 +1073,12 @@ def __str__(self): for i in range(self.size): if i > 0: ret += ", " - ret += str(ei.v2e(model.field_l[i].get_val())) + ret += str(ei.v2e(model.get_field_l()[i].get_val())) elif self.is_scalar: for i in range(self.size): if i > 0: ret += ", " - ret += model.field_l[i].get_val().toString() + ret += model.get_field_l()[i].get_val().toString() else: for i in range(self.size): if i > 0: diff --git a/ve/unit/test_constraint_dist.py b/ve/unit/test_constraint_dist.py index 2391b49..d4bce47 100644 --- a/ve/unit/test_constraint_dist.py +++ b/ve/unit/test_constraint_dist.py @@ -277,7 +277,7 @@ def dist_a(self): for i in range(4): hist.append([0]*4) - for i in range(400): + for _ in range(400): my.randomize(debug=0) for i in range(4): v = my.a[i] From 0627b6be807193265903a6bb29054d1b5443a271 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Fri, 10 Feb 2023 15:38:11 -0700 Subject: [PATCH 07/13] Reorder set_val calls in post_rand; Add non-rand values to call_hash The segmented_randomization test was reusing constraints with nested vsc.attr objects, which are non-rand and change the constraints with each call if they are different. Added a bit to the pretty printer to grab that. The order of set_val in nested post_randomize calls was setting values from the parent randomization call after the post-rand randomization call. This worked before caching b/c they were the same Boolector var, but now they are distinct. Swapped the order in FieldCompositeModel since it seemed to be backward. --- src/vsc/model/field_composite_model.py | 10 ++++++---- src/vsc/model/randomizer.py | 4 ++++ src/vsc/visitors/model_pretty_printer.py | 5 ++++- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/src/vsc/model/field_composite_model.py b/src/vsc/model/field_composite_model.py index 7dad5f5..b2404a8 100644 --- a/src/vsc/model/field_composite_model.py +++ b/src/vsc/model/field_composite_model.py @@ -142,13 +142,15 @@ def pre_randomize(self): def post_randomize(self): """Called during the randomization process to propagate `post_randomize` event""" - + + # If randomize is called inside post_randomize with caching enabled, which + # value should be written? Prioritize writing current field values before re-randomizing. + for f in self.field_l: + f.post_randomize() + # Perform a phase callback if available if self.is_used_rand and self.rand_if is not None: self.rand_if.do_post_randomize() - - for f in self.field_l: - f.post_randomize() def accept(self, v): v.visit_composite_field(self) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index 40ec3c6..01eff33 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -581,6 +581,10 @@ def do_randomize( if hasattr(f, 'field_l'): f.latest_field_l = None + # The call_hash needs the value of non-rand fields in the pretty printer + for f in field_model_l: + f.set_used_rand(True, 0) + # Generate dist constraints early for generating call hash (field_model_l_og, constraint_l_og) = (field_model_l, constraint_l) # copy.deepcopy((field_model_l, constraint_l)) randstate_copy = copy.deepcopy(randstate) diff --git a/src/vsc/visitors/model_pretty_printer.py b/src/vsc/visitors/model_pretty_printer.py index 2b134b8..0036b23 100644 --- a/src/vsc/visitors/model_pretty_printer.py +++ b/src/vsc/visitors/model_pretty_printer.py @@ -245,7 +245,10 @@ def visit_expr_fieldref(self, e : vm.ExprFieldRefModel): else: self.write(e.fm.fullname + "(" + str(int(e.fm.get_val())) + ")") else: - self.write(e.fm.fullname) + if e.fm.is_used_rand: + self.write(e.fm.fullname) + else: + self.write(e.fm.fullname + '(' + str(int(e.fm.get_val())) + ')') def visit_expr_unary(self, e : vm.ExprUnaryModel): self.write(UnaryExprType.toString(e.op)) From 5d693a1e3f7a03e0473a254e7963c42abbdf9216 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Fri, 10 Feb 2023 17:26:46 -0700 Subject: [PATCH 08/13] Disable randomize cache for GeneratorModel; Another PrettyPrint change --- src/vsc/model/randomizer.py | 104 +++++++++++++---------- src/vsc/visitors/model_pretty_printer.py | 10 +-- 2 files changed, 63 insertions(+), 51 deletions(-) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index 01eff33..f3ba46b 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -40,6 +40,7 @@ from vsc.model.expr_model import ExprModel from vsc.model.field_model import FieldModel from vsc.model.field_scalar_model import FieldScalarModel +from vsc.model.generator_model import GeneratorModel from vsc.model.model_visitor import ModelVisitor from vsc.model.rand_if import RandIF from vsc.model.rand_info import RandInfo @@ -572,44 +573,54 @@ def do_randomize( if constraint_l is None: constraint_l = [] - - # HACK Fill out field_l in FieldArrayModels so that look ups work - # This breaks deepcopy since it'll now have deepcopy references... + + cache_enabled = True for fm in field_model_l: - if hasattr(fm, 'field_l'): - for f in fm.field_l: - if hasattr(f, 'field_l'): - f.latest_field_l = None - - # The call_hash needs the value of non-rand fields in the pretty printer - for f in field_model_l: - f.set_used_rand(True, 0) - - # Generate dist constraints early for generating call hash - (field_model_l_og, constraint_l_og) = (field_model_l, constraint_l) # copy.deepcopy((field_model_l, constraint_l)) - randstate_copy = copy.deepcopy(randstate) - for fm in field_model_l_og: - DistConstraintBuilder.build(randstate_copy, fm) - for c in constraint_l_og: - DistConstraintBuilder.build(randstate_copy, c) - - # Create a unique string for this call based on object ids and mode bits - # Is there more than rand_mode and constraint_mode to cover here? - # TODO Can we cache the base constraints so that with constraints have a prebuilt - # model and such to build off of? - # call_hash = Randomizer.get_id_call_hash(field_model_l, constraint_l, field_model_l_copy, constraint_l_copy) - call_hash = Randomizer.get_pretty_call_hash(randstate, field_model_l_og, constraint_l_og) - - for fm in field_model_l_og: - ConstraintOverrideRollbackVisitor.rollback(fm) - - if call_hash in Randomizer.randomize_cache: - cache = Randomizer.randomize_cache[call_hash] - cache.r.btor_cache_uses -= 1 - if cache.r.btor_cache_uses <= 0: - del Randomizer.randomize_cache[call_hash] + # Skip GeneratorModel since it adds soft constraints + if isinstance(fm, GeneratorModel): + cache_enabled = False + break - if call_hash in Randomizer.randomize_cache: + if cache_enabled: + # HACK Fill out field_l in FieldArrayModels so that look ups work + # This breaks deepcopy since it'll now have deepcopy references... + for fm in field_model_l: + if hasattr(fm, 'field_l'): + for f in fm.field_l: + if hasattr(f, 'field_l'): + f.latest_field_l = None + + # The call_hash needs the value of non-rand fields in the pretty printer + for f in field_model_l: + f.set_used_rand(True, 0) + + # Generate dist constraints early for generating call hash + (field_model_l_og, constraint_l_og) = (field_model_l, constraint_l) + # Save state so that rebuilding dist constraints is exactly the same for the copy + state = randstate.rng.getstate() + for fm in field_model_l_og: + DistConstraintBuilder.build(randstate, fm) + for c in constraint_l_og: + DistConstraintBuilder.build(randstate, c) + randstate.rng.setstate(state) + + # Create a unique string for this call based on object ids and mode bits + # Is there more than rand_mode and constraint_mode to cover here? + # TODO Can we cache the base constraints so that with constraints have a prebuilt + # model and such to build off of? + # call_hash = Randomizer.get_id_call_hash(field_model_l, constraint_l, field_model_l_copy, constraint_l_copy) + call_hash = Randomizer.get_pretty_call_hash(randstate, field_model_l_og, constraint_l_og) + + for fm in field_model_l_og: + ConstraintOverrideRollbackVisitor.rollback(fm) + + if call_hash in Randomizer.randomize_cache: + cache = Randomizer.randomize_cache[call_hash] + cache.r.btor_cache_uses -= 1 + if cache.r.btor_cache_uses <= 0: + del Randomizer.randomize_cache[call_hash] + + if cache_enabled and call_hash in Randomizer.randomize_cache: cache = Randomizer.randomize_cache[call_hash] clear_soft_priority = ClearSoftPriorityVisitor() @@ -701,16 +712,18 @@ def do_randomize( Randomizer.try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri) # Cache all interesting variables for later - Randomizer.randomize_cache[call_hash] = rand_cache_entry(bounds_v, ri, r, field_model_l, constraint_l) + if cache_enabled: + Randomizer.randomize_cache[call_hash] = rand_cache_entry(bounds_v, ri, r, field_model_l, constraint_l) # HACK Fill out field_l in FieldArrayModels so that look ups work # This breaks deepcopy since it'll now have deepcopy references... - field_model_l = Randomizer.randomize_cache[call_hash].field_model_l - for fm_new, fm_og in zip(field_model_l, field_model_l_og): - if hasattr(fm_og, 'field_l'): - for f_new, f_og in zip(fm_new.field_l, fm_og.field_l): - if hasattr(f_og, 'field_l'): - f_og.latest_field_l = f_new.field_l + if cache_enabled: + field_model_l = Randomizer.randomize_cache[call_hash].field_model_l + for fm_new, fm_og in zip(field_model_l, field_model_l_og): + if hasattr(fm_og, 'field_l'): + for f_new, f_og in zip(fm_new.field_l, fm_og.field_l): + if hasattr(f_og, 'field_l'): + f_og.latest_field_l = f_new.field_l @staticmethod @@ -722,7 +735,7 @@ def get_pretty_call_hash(randstate, field_model_l, constraint_l): # TODO This pretty print is an expensive call. Need a better way # to construct a unique ID/hash that doesn't depend on # object lifetimes - call_hash += ModelPrettyPrinter.print(fm) + call_hash += ModelPrettyPrinter.print(fm, print_values=True) # Each constraint block and whether it's enabled if hasattr(fm, 'field_l'): for f in fm.field_l: @@ -749,7 +762,7 @@ def get_pretty_call_hash(randstate, field_model_l, constraint_l): # Each with constraint(block?) and its expressions # TODO Is this missing anything? Dynamic expressions? Too aggressive? for cm in constraint_l: - call_hash += ModelPrettyPrinter.print(cm) + call_hash += ModelPrettyPrinter.print(cm, print_values=True) # HACK Place with constraints inside list forever to prevent obj ID reuse Randomizer.constraint_keep.append(cm) call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' @@ -832,7 +845,6 @@ def try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri): for fm in field_model_l: fm.post_randomize() - # Process constraints to identify variable/constraint sets @dataclass diff --git a/src/vsc/visitors/model_pretty_printer.py b/src/vsc/visitors/model_pretty_printer.py index 0036b23..0af7571 100644 --- a/src/vsc/visitors/model_pretty_printer.py +++ b/src/vsc/visitors/model_pretty_printer.py @@ -240,15 +240,15 @@ def visit_expr_fieldref(self, e : vm.ExprFieldRefModel): for i, f in enumerate(e.fm.field_l): if i > 0: self.write(", ") - self.write(str(int(f.get_val()))) + try: + self.write(str(int(f.get_val()))) + except NotImplementedError: + self.write('?') self.write("]") else: self.write(e.fm.fullname + "(" + str(int(e.fm.get_val())) + ")") else: - if e.fm.is_used_rand: - self.write(e.fm.fullname) - else: - self.write(e.fm.fullname + '(' + str(int(e.fm.get_val())) + ')') + self.write(e.fm.fullname) def visit_expr_unary(self, e : vm.ExprUnaryModel): self.write(UnaryExprType.toString(e.op)) From b610af8616b596c8738326054cd2ed5922e18438 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Sat, 11 Feb 2023 19:01:23 -0700 Subject: [PATCH 09/13] Fix pre_rand issues and add unit test; Minor cleanup I think it's possibly and intended that pre_randomize could change the constraints for a randomize call, so it should be called everytime before generating the call_hash. This causes some issues with deepcopy since rand_obj was trying to prevent deepcopying self in the parent reference of other objects. Workaround for now is just to skip deepcopying the Boolector var object in FieldScalarModel. This might not be the correct thing to do, but passes unit tests for now. --- src/vsc/model/field_scalar_model.py | 7 ++- src/vsc/model/randomizer.py | 78 ++++------------------------- src/vsc/rand_obj.py | 4 -- ve/unit/test_pre_post_randomize.py | 31 ++++++++++++ 4 files changed, 46 insertions(+), 74 deletions(-) diff --git a/src/vsc/model/field_scalar_model.py b/src/vsc/model/field_scalar_model.py index ca996d4..363dfa3 100644 --- a/src/vsc/model/field_scalar_model.py +++ b/src/vsc/model/field_scalar_model.py @@ -51,7 +51,12 @@ def __deepcopy__(self, memo): result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): - setattr(result, k, deepcopy(v, memo)) + if k not in ['var']: + setattr(result, k, deepcopy(v, memo)) + + # TODO This is a workaround for the deepcopy in do_randomize somewhere getting + # access to Boolector objects, which in turn can't be deepcopied. + result.var = None # NOTE Deepcopy everything except val, keep it as reference so top level can get value result.val = self.val diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index f3ba46b..53ee794 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -574,6 +574,15 @@ def do_randomize( if constraint_l is None: constraint_l = [] + clear_soft_priority = ClearSoftPriorityVisitor() + + for f in field_model_l: + f.set_used_rand(True, 0) + clear_soft_priority.clear(f) + + for fm in field_model_l: + fm.pre_randomize() + cache_enabled = True for fm in field_model_l: # Skip GeneratorModel since it adds soft constraints @@ -590,10 +599,6 @@ def do_randomize( if hasattr(f, 'field_l'): f.latest_field_l = None - # The call_hash needs the value of non-rand fields in the pretty printer - for f in field_model_l: - f.set_used_rand(True, 0) - # Generate dist constraints early for generating call hash (field_model_l_og, constraint_l_og) = (field_model_l, constraint_l) # Save state so that rebuilding dist constraints is exactly the same for the copy @@ -631,32 +636,17 @@ def do_randomize( f.set_used_rand(True, 0) clear_soft_priority.clear(f) - # First, invoke pre_randomize on all elements - # TODO This is untested. What happens to pre_randomize() on a deepcopy? - for fm in cache.field_model_l: - fm.pre_randomize() - Randomizer.try_randomize(srcinfo, cache.field_model_l, solve_info, cache.bounds_v, cache.r, cache.ri) else: # Make copy of field and constraint models, together to keep FieldScalarModels the same # TODO The deepcopy() in FieldScalarModel keeps the val reference, is that the best way? (field_model_l, constraint_l) = copy.deepcopy((field_model_l, constraint_l)) - clear_soft_priority = ClearSoftPriorityVisitor() - - for f in field_model_l: - f.set_used_rand(True, 0) - clear_soft_priority.clear(f) - if debug > 0: print("Initial Model:") for fm in field_model_l: print(" " + ModelPrettyPrinter.print(fm)) - # First, invoke pre_randomize on all elements - for fm in field_model_l: - fm.pre_randomize() - for c in constraint_l: clear_soft_priority.clear(c) @@ -780,56 +770,6 @@ def get_pretty_call_hash(randstate, field_model_l, constraint_l): call_hash += f'{hex(id(fe_c.new_constraint))}\n' return call_hash - @staticmethod - def get_id_call_hash(field_model_l, constraint_l, fml_copy, cl_copy): - call_hash = '' - if field_model_l is not None: - for fm in field_model_l: - # Each field and its rand_mode - # TODO call_hash += f'{hex(id(fm))}-{fm.fullname}\n' - if hasattr(fm, 'field_l'): - for f in fm.field_l: - call_hash += f'{hex(id(f))}-{f.fullname}-{f.rand_mode=}\n' - # Each constraint block and whether it's enabled - if hasattr(fm, 'constraint_model_l'): - for cm in fm.constraint_model_l: - call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' - for fm in fml_copy: - # Each constraint block and whether it's enabled - if hasattr(fm, 'constraint_model_l'): - for cm in fm.constraint_model_l: - # TODO dist constraint hack - for c in cm.constraint_l: - if isinstance(c, ConstraintOverrideModel): - # dist_value = c.new_constraint.constraint_l[-1].expr.rhs.val().toString() - # call_hash += f'{cm.name}-{dist_value=}\n' - call_hash += f'{hex(id(c.new_constraint))}\n' - if isinstance(c, ConstraintForeachModel): - for fe_c in c.constraint_l: - if isinstance(fe_c, ConstraintOverrideModel): - call_hash += f'{hex(id(fe_c.new_constraint))}\n' - if constraint_l is not None: - # Each with constraint(block?) and its expressions - # TODO Is this missing anything? Dynamic expressions? Too aggressive? - for cm in constraint_l: - Randomizer.constraint_keep.append(cm) - call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' - for c in cm.constraint_l: - call_hash += f'{hex(id(c))}\n' - - for cm in cl_copy: - for c in cm.constraint_l: - # TODO dist constraint hack - if isinstance(c, ConstraintOverrideModel): - # dist_value = c.new_constraint.constraint_l[-1].expr.rhs.val().toString() - # call_hash += f'{c.name}-{dist_value=}\n' - call_hash += f'{hex(id(c.new_constraint))}\n' - if isinstance(c, ConstraintForeachModel): - for fe_c in c.constraint_l: - if isinstance(fe_c, ConstraintOverrideModel): - call_hash += f'{hex(id(fe_c.new_constraint))}\n' - return call_hash - @staticmethod def try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri): try: diff --git a/src/vsc/rand_obj.py b/src/vsc/rand_obj.py index d16784e..ce29f49 100644 --- a/src/vsc/rand_obj.py +++ b/src/vsc/rand_obj.py @@ -86,10 +86,6 @@ def __init__(self, *args, **kwargs): self.build_field_model(None) pop_srcinfo_mode() - # Prevent parent references from deepcopying - def __deepcopy__(self, memo): - return self - # Add the interposer class ret = type(T.__name__, (randobj_interposer,), dict()) diff --git a/ve/unit/test_pre_post_randomize.py b/ve/unit/test_pre_post_randomize.py index 76dd9e4..a07cd82 100644 --- a/ve/unit/test_pre_post_randomize.py +++ b/ve/unit/test_pre_post_randomize.py @@ -44,3 +44,34 @@ def post_randomize(self): self.assertEqual(len(my.temp), i+1) print("<-- randomize(%d)" % i) + + def test_pre_rand(self): + @vsc.randobj + class my_s(object): + def __init__(self): + self.a = vsc.rand_bit_t(8) + + @vsc.constraint + def a_five_c(self): + self.a == 5 + + @vsc.constraint + def a_not_five_c(self): + self.a != 5 + + def pre_randomize(self): + self.a_not_five_c.constraint_mode(not self.a_not_five_c.constraint_model.enabled) + self.a_five_c.constraint_mode(not self.a_five_c.constraint_model.enabled) + + my = my_s() + + # Alternate between constraints in pre_rand to test randomize caching + my.a_not_five_c.constraint_mode(False) + + for i in range(5): + my.randomize() + print(my.a, my.a_five_c.constraint_model.enabled) + if my.a_five_c.constraint_model.enabled: + self.assertEqual(my.a, 5) + else: + self.assertNotEqual(my.a, 5) \ No newline at end of file From 8841f32f4da8a0ea2e6c5e147a33bc5dec03952d Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Mon, 13 Feb 2023 13:33:57 -0700 Subject: [PATCH 10/13] Add cache_enabled bit --- src/vsc/model/randomizer.py | 61 +++++++++++++++++++------------------ src/vsc/rand_obj.py | 13 +++++--- 2 files changed, 41 insertions(+), 33 deletions(-) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index 53ee794..b5d5308 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -101,7 +101,7 @@ def __init__(self, randstate, debug=0, lint=0, solve_fail_debug=0, solve_info=No _state_p = [0,1] _rng = None - def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel]): + def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel], cache_enabled: bool): """Randomize the variables and constraints in a RandInfo collection""" if self.solve_info is not None: @@ -158,7 +158,7 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # If missing from cache, initialize/build btor and randset # TODO What is going on with max_fields? It would break this # caching setup. - if rs_i not in self.btor_cache: + if not cache_enabled or rs_i not in self.btor_cache: btor = Boolector() # TODO Is self.btor used anywhere? # self.btor = btor @@ -282,7 +282,8 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # Changes made to the randset are covered by the randomization_cache # Cache btor reference for use later - self.btor_cache[rs_i] = btor + if cache_enabled: + self.btor_cache[rs_i] = btor else: # Setup some necessary variables start_rs_i = rs_i @@ -294,9 +295,9 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] rs_i += 1 - # TODO Boolector Push()/Pop() do not release orphan expressions/nodes and - # cause Sat() calls to slow down and leak memory. Add release feature to PyBoolector? - btor.Push() + # Boolector Push/Pop does _not_ release old swizzler expressions. + if cache_enabled: + btor.Push() # btor.Sat() x = start_rs_i @@ -309,24 +310,24 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # Finalize the value of the field x = start_rs_i - # TODO What cleanup do we need if caching? - # reset_v = DynamicExprResetVisitor() while x < rs_i: rs = ri.randsets()[x] for f in rs.all_fields(): f: FieldScalarModel = f f.post_randomize() - # TODO What does this all do? Do we need to clean up if we're going to reuse this agian? - # f.set_used_rand(False, 0) - # f.dispose() # Get rid of the solver var, since we're done with it - # f.accept(reset_v) -# for f in rs.nontarget_field_s: -# f.dispose() - # TODO What does this all do? Do we need to clean up if we're going to reuse this agian? - # for c in rs.constraints(): - # c.accept(reset_v) - # TODO What does this all do? Do we need to clean up if we're going to reuse this agian? - # RandSetDisposeVisitor().dispose(rs) + + # TODO Does some of this need to be done while caching, too? + if not cache_enabled: + reset_v = DynamicExprResetVisitor() + for f in rs.all_fields(): + f.set_used_rand(False, 0) + f.dispose() # Get rid of the solver var, since we're done with it + f.accept(reset_v) +# for f in rs.nontarget_field_s: +# f.dispose() + for c in rs.constraints(): + c.accept(reset_v) + RandSetDisposeVisitor().dispose(rs) if self.debug > 0: print("Post-Randomize: RandSet[%d]" % x) @@ -343,8 +344,9 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] x += 1 - # TODO Cleanup boolector state. See Push() comment above. - btor.Pop() + # Release swizzler assertions + if cache_enabled: + btor.Pop() end = int(round(time.time() * 1000)) @@ -563,7 +565,8 @@ def do_randomize( constraint_l : List[ConstraintModel] = None, debug=0, lint=0, - solve_fail_debug=0): + solve_fail_debug=0, + cache_enabled=True): if profile_on(): solve_info = SolveInfo() solve_info.totaltime = time.time() @@ -583,7 +586,6 @@ def do_randomize( for fm in field_model_l: fm.pre_randomize() - cache_enabled = True for fm in field_model_l: # Skip GeneratorModel since it adds soft constraints if isinstance(fm, GeneratorModel): @@ -628,7 +630,7 @@ def do_randomize( if cache_enabled and call_hash in Randomizer.randomize_cache: cache = Randomizer.randomize_cache[call_hash] - clear_soft_priority = ClearSoftPriorityVisitor() + # clear_soft_priority = ClearSoftPriorityVisitor() # Reset cached field_model_l vars to be rand again # TODO This is untested. Are there deepcopy issues here? @@ -636,11 +638,12 @@ def do_randomize( f.set_used_rand(True, 0) clear_soft_priority.clear(f) - Randomizer.try_randomize(srcinfo, cache.field_model_l, solve_info, cache.bounds_v, cache.r, cache.ri) + Randomizer.try_randomize(srcinfo, cache.field_model_l, solve_info, cache.bounds_v, cache.r, cache.ri, cache_enabled) else: # Make copy of field and constraint models, together to keep FieldScalarModels the same # TODO The deepcopy() in FieldScalarModel keeps the val reference, is that the best way? - (field_model_l, constraint_l) = copy.deepcopy((field_model_l, constraint_l)) + if cache_enabled: + (field_model_l, constraint_l) = copy.deepcopy((field_model_l, constraint_l)) if debug > 0: print("Initial Model:") @@ -699,7 +702,7 @@ def do_randomize( ri = RandInfoBuilder.build(field_model_l, constraint_l, Randomizer._rng) # TODO Unecessary function refactor? - Randomizer.try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri) + Randomizer.try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri, cache_enabled) # Cache all interesting variables for later if cache_enabled: @@ -771,9 +774,9 @@ def get_pretty_call_hash(randstate, field_model_l, constraint_l): return call_hash @staticmethod - def try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri): + def try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri, cache_enabled): try: - r.randomize(ri, bounds_v.bound_m) + r.randomize(ri, bounds_v.bound_m, cache_enabled) finally: # Rollback any constraints we've replaced for arrays if solve_info is not None: diff --git a/src/vsc/rand_obj.py b/src/vsc/rand_obj.py index ce29f49..c64cd55 100644 --- a/src/vsc/rand_obj.py +++ b/src/vsc/rand_obj.py @@ -157,7 +157,8 @@ def set_randstate(self, rs): def randomize(self, debug=0, lint=0, - solve_fail_debug=0): + solve_fail_debug=0, + cache_enabled=True): ro_int = self._get_ro_int() if self.src_info == None: frame = inspect.stack()[1] @@ -171,7 +172,8 @@ def randomize(self, [model], debug=debug, lint=lint, - solve_fail_debug=solve_fail_debug) + solve_fail_debug=solve_fail_debug, + cache_enabled=cache_enabled) except SolveFailure as e: print(e.diagnostics) raise e @@ -271,7 +273,8 @@ def __exit__(self, t, v, tb): [c], debug=self.debug, lint=self.lint, - solve_fail_debug=self.solve_fail_debug) + solve_fail_debug=self.solve_fail_debug, + cache_enabled=self.cache_enabled) except SolveFailure as e: print(e.diagnostics) raise e @@ -279,12 +282,14 @@ def __exit__(self, t, v, tb): def randomize_with(self, debug=0, lint=0, - solve_fail_debug=0): + solve_fail_debug=0, + cache_enabled=True): # Ensure the 'model' data structures have been built self.get_model() self.debug = debug self.lint = lint self.solve_fail_debug = solve_fail_debug + self.cache_enabled=cache_enabled return self From de3eca313b5d4ebf7db1fcff55ec1164316b162f Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Wed, 15 Feb 2023 17:08:54 -0700 Subject: [PATCH 11/13] Skip caching on dist constraints; Comment cleanup Generating the dist constraints and then trying to grab the object ids for ConstraintOverrideModels inside constraints didn't seem very robust. Now it just looks for the ' dist { ' string the pretty printer uses to detect dist constraints and skip caching on them. Seems cleaner for now to just skip them. --- src/vsc/model/randomizer.py | 83 ++++++++----------------------------- 1 file changed, 18 insertions(+), 65 deletions(-) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index b5d5308..dcf6cdb 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -73,11 +73,6 @@ class Randomizer(RandIF): randomize_cache = {} - # HACK This is meant to keep constraint object IDs unique by preventing garbage collection - # and cycling IDs. Generating unique IDs regardless of lifetimes or better hashing that - # explores the entire constraint might be better. - constraint_keep = [] - def __init__(self, randstate, debug=0, lint=0, solve_fail_debug=0, solve_info=None): self.randstate = randstate self.pretty_printer = ModelPrettyPrinter() @@ -587,7 +582,7 @@ def do_randomize( fm.pre_randomize() for fm in field_model_l: - # Skip GeneratorModel since it adds soft constraints + # Skip GeneratorModel since it adds new soft constraints each call, not cache friendly if isinstance(fm, GeneratorModel): cache_enabled = False break @@ -600,44 +595,27 @@ def do_randomize( for f in fm.field_l: if hasattr(f, 'field_l'): f.latest_field_l = None - - # Generate dist constraints early for generating call hash + # Save off original variables for FieldArrayModel hack after randomize (field_model_l_og, constraint_l_og) = (field_model_l, constraint_l) - # Save state so that rebuilding dist constraints is exactly the same for the copy - state = randstate.rng.getstate() - for fm in field_model_l_og: - DistConstraintBuilder.build(randstate, fm) - for c in constraint_l_og: - DistConstraintBuilder.build(randstate, c) - randstate.rng.setstate(state) # Create a unique string for this call based on object ids and mode bits - # Is there more than rand_mode and constraint_mode to cover here? + # TODO Is there more than rand_mode and constraint_mode to cover here? # TODO Can we cache the base constraints so that with constraints have a prebuilt # model and such to build off of? - # call_hash = Randomizer.get_id_call_hash(field_model_l, constraint_l, field_model_l_copy, constraint_l_copy) call_hash = Randomizer.get_pretty_call_hash(randstate, field_model_l_og, constraint_l_og) - - for fm in field_model_l_og: - ConstraintOverrideRollbackVisitor.rollback(fm) - - if call_hash in Randomizer.randomize_cache: - cache = Randomizer.randomize_cache[call_hash] - cache.r.btor_cache_uses -= 1 - if cache.r.btor_cache_uses <= 0: - del Randomizer.randomize_cache[call_hash] + # Skip dist constraints b/c they cost building bounds and array/dist constraints first + # HACK What's the best way to detect if there are dist constraints? + if ' dist { ' in call_hash: + cache_enabled = False + else: + if call_hash in Randomizer.randomize_cache: + cache = Randomizer.randomize_cache[call_hash] + cache.r.btor_cache_uses -= 1 + if cache.r.btor_cache_uses <= 0: + del Randomizer.randomize_cache[call_hash] if cache_enabled and call_hash in Randomizer.randomize_cache: cache = Randomizer.randomize_cache[call_hash] - - # clear_soft_priority = ClearSoftPriorityVisitor() - - # Reset cached field_model_l vars to be rand again - # TODO This is untested. Are there deepcopy issues here? - for f in cache.field_model_l: - f.set_used_rand(True, 0) - clear_soft_priority.clear(f) - Randomizer.try_randomize(srcinfo, cache.field_model_l, solve_info, cache.bounds_v, cache.r, cache.ri, cache_enabled) else: # Make copy of field and constraint models, together to keep FieldScalarModels the same @@ -727,9 +705,9 @@ def get_pretty_call_hash(randstate, field_model_l, constraint_l): for fm in field_model_l: # TODO This pretty print is an expensive call. Need a better way # to construct a unique ID/hash that doesn't depend on - # object lifetimes + # object lifetimes. Can some of this be cached? call_hash += ModelPrettyPrinter.print(fm, print_values=True) - # Each constraint block and whether it's enabled + # Each variable and whether it's rand if hasattr(fm, 'field_l'): for f in fm.field_l: call_hash += f'{f.fullname}-{f.rand_mode=}\n' @@ -737,40 +715,15 @@ def get_pretty_call_hash(randstate, field_model_l, constraint_l): if hasattr(fm, 'constraint_model_l'): for cm in fm.constraint_model_l: call_hash += f'{cm.name}-{cm.enabled=}\n' - if hasattr(fm, 'constraint_model_l'): - for cm in fm.constraint_model_l: - # TODO dist constraint hack - # T - for c in cm.constraint_l: - if isinstance(c, ConstraintOverrideModel): - # dist_value = c.new_constraint.constraint_l[-1].expr.rhs.val().toString() - # call_hash += f'{cm.name}-{dist_value=}\n' - call_hash += f'{hex(id(c.new_constraint))}\n' - if isinstance(c, ConstraintForeachModel): - for fe_c in c.constraint_l: - if isinstance(fe_c, ConstraintOverrideModel): - call_hash += f'{hex(id(fe_c.new_constraint))}\n' + # TODO Is there a way to detect or quickly generate dist constraints? if constraint_l is not None: # Each with constraint(block?) and its expressions # TODO Is this missing anything? Dynamic expressions? Too aggressive? for cm in constraint_l: call_hash += ModelPrettyPrinter.print(cm, print_values=True) - # HACK Place with constraints inside list forever to prevent obj ID reuse - Randomizer.constraint_keep.append(cm) - call_hash += f'{hex(id(cm))}-{cm.name}-{cm.enabled=}\n' - for c in cm.constraint_l: - call_hash += f'{hex(id(c))}\n' - for c in cm.constraint_l: - # TODO dist constraint hack - if isinstance(c, ConstraintOverrideModel): - # dist_value = c.new_constraint.constraint_l[-1].expr.rhs.val().toString() - # call_hash += f'{c.name}-{dist_value=}\n' - call_hash += f'{hex(id(c.new_constraint))}\n' - if isinstance(c, ConstraintForeachModel): - for fe_c in c.constraint_l: - if isinstance(fe_c, ConstraintOverrideModel): - call_hash += f'{hex(id(fe_c.new_constraint))}\n' + call_hash += f'{cm.name}-{cm.enabled=}\n' + # TODO Is there a way to detect or quickly generate dist constraints? return call_hash @staticmethod From c22f9a0940a959a405094c316503ac7a125ea9bd Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Fri, 17 Feb 2023 13:11:52 -0700 Subject: [PATCH 12/13] Add min num of calls before caching; Change FieldArrayModel caching hack Testing out risc-dv discovered a memory leak issue where caching every call and then being deepcopied would explode memory. Adding a min number of calls before caching limits that from happening. The real fix would be to look into deepcopy and make sure the cache is dropped (or even shared?) when deepcopied. The risc-dv pygen tests also discovered more issues with FieldArrayModel and syncing the top level references to FieldScalarModel to the cached / deepcopied instances. This still looks hacky, but is restricted to just the Randomizer class when caching is enabled. Lots of little whitespace and TODO/HACK message cleanup. --- src/vsc/model/field_array_model.py | 12 +---- src/vsc/model/randomizer.py | 76 ++++++++++++++++-------------- src/vsc/types.py | 14 +++--- 3 files changed, 48 insertions(+), 54 deletions(-) diff --git a/src/vsc/model/field_array_model.py b/src/vsc/model/field_array_model.py index cb10292..3388f83 100644 --- a/src/vsc/model/field_array_model.py +++ b/src/vsc/model/field_array_model.py @@ -48,17 +48,7 @@ def __init__(self, is_rand_sz) self.size.parent = self self._set_size(0) - - # Update this field at the end of each randomization - # TODO Remove this field from the deepcopy? - self.latest_field_l = None - - def get_field_l(self): - """Retrieve the latest field_l""" - if self.latest_field_l is not None: - return self.latest_field_l - return self.field_l - + def append(self, fm): super().add_field(fm) self._set_size(len(self.field_l)) diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index dcf6cdb..3bda1fb 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -19,9 +19,9 @@ # # @author: ballance - import copy from dataclasses import dataclass +import random import sys import time from typing import List, Dict @@ -31,8 +31,6 @@ from vsc.constraints import constraint, soft from vsc.model.bin_expr_type import BinExprType from vsc.model.constraint_model import ConstraintModel -from vsc.model.constraint_override_model import ConstraintOverrideModel -from vsc.model.constraint_foreach_model import ConstraintForeachModel from vsc.model.constraint_soft_model import ConstraintSoftModel from vsc.model.expr_bin_model import ExprBinModel from vsc.model.expr_fieldref_model import ExprFieldRefModel @@ -72,6 +70,7 @@ class Randomizer(RandIF): EN_DEBUG = False randomize_cache = {} + randomize_call_count = {} def __init__(self, randstate, debug=0, lint=0, solve_fail_debug=0, solve_info=None): self.randstate = randstate @@ -141,18 +140,18 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] idx = self.randstate.randint(0, len(range_l)-1) uf.set_val(range_l[idx][0]) - # TODO We want to re-randomize unconstrained variables when caching. Do we need to lock? # Lock so we don't overwrite - # uf.set_used_rand(False) + if not cache_enabled: + uf.set_used_rand(False) rs_i = 0 start_rs_i = 0 + # TODO What is going on with max_fields? It would probably + # break this caching setup. # max_fields = 20 max_fields = 0 while rs_i < len(ri.randsets()): # If missing from cache, initialize/build btor and randset - # TODO What is going on with max_fields? It would break this - # caching setup. if not cache_enabled or rs_i not in self.btor_cache: btor = Boolector() # TODO Is self.btor used anywhere? @@ -202,7 +201,7 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] # rs_i += 1 if n_fields > max_fields or rs.order != -1: break - + for c in constraint_l: try: btor.Assume(c[1]) @@ -237,7 +236,7 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] btor.Assert(c[1]) # If there are soft constraints, add these now - if len(soft_constraint_l) > 0: + if len(soft_constraint_l) > 0: for c in soft_constraint_l: try: btor.Assume(c[1]) @@ -247,7 +246,7 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] raise e if self.solve_info is not None: - self.solve_info.n_sat_calls += 1 + self.solve_info.n_sat_calls += 1 if btor.Sat() != btor.SAT: # All the soft constraints cannot be satisfied. We'll need to # add them incrementally @@ -258,7 +257,7 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] btor.Assume(c[1]) if self.solve_info is not None: - self.solve_info.n_sat_calls += 1 + self.solve_info.n_sat_calls += 1 if btor.Sat() == btor.SAT: if self.debug > 0: print("Note: soft constraint %s (%d) passed" % ( @@ -275,7 +274,7 @@ def randomize(self, ri : RandInfo, bound_m : Dict[FieldModel,VariableBoundModel] for c in soft_constraint_l: btor.Assert(c[1]) - # Changes made to the randset are covered by the randomization_cache + # Changes made to the randset are covered by the randomization_cache # Cache btor reference for use later if cache_enabled: self.btor_cache[rs_i] = btor @@ -588,13 +587,16 @@ def do_randomize( break if cache_enabled: - # HACK Fill out field_l in FieldArrayModels so that look ups work - # This breaks deepcopy since it'll now have deepcopy references... + # HACK Clear out field_l in FieldArrayModel from previous cache for fm in field_model_l: if hasattr(fm, 'field_l'): for f in fm.field_l: - if hasattr(f, 'field_l'): - f.latest_field_l = None + if hasattr(f, 'field_l') and hasattr(f, 'old_field_l'): + # Revert to original value + f.field_l = f.old_field_l + elif hasattr(f, 'field_l'): + # Save off old, original value + f.old_field_l = f.field_l # Save off original variables for FieldArrayModel hack after randomize (field_model_l_og, constraint_l_og) = (field_model_l, constraint_l) @@ -608,6 +610,15 @@ def do_randomize( if ' dist { ' in call_hash: cache_enabled = False else: + if call_hash not in Randomizer.randomize_call_count: + Randomizer.randomize_call_count[call_hash] = 0 + Randomizer.randomize_call_count[call_hash] += 1 + + # Don't cache until this call_hash was seen N times + if Randomizer.randomize_call_count[call_hash] < 2: + cache_enabled = False + + # Reset cache entry after N uses due to Boolector model growth if call_hash in Randomizer.randomize_cache: cache = Randomizer.randomize_cache[call_hash] cache.r.btor_cache_uses -= 1 @@ -623,33 +634,30 @@ def do_randomize( if cache_enabled: (field_model_l, constraint_l) = copy.deepcopy((field_model_l, constraint_l)) - if debug > 0: - print("Initial Model:") + if debug > 0: + print("Initial Model:") for fm in field_model_l: print(" " + ModelPrettyPrinter.print(fm)) for c in constraint_l: clear_soft_priority.clear(c) - # Collect all variables (pre-array) and establish bounds + # Collect all variables (pre-array) and establish bounds bounds_v = VariableBoundVisitor() bounds_v.process(field_model_l, constraint_l, False) # TODO: need to handle inline constraints that impact arrays constraints_len = len(constraint_l) - # TODO dist are handled as soft constraints that caching doesn't recalculate... for fm in field_model_l: constraint_l.extend(ArrayConstraintBuilder.build( fm, bounds_v.bound_m)) # Now, handle dist constraints - # TODO Does this depend on the ArrayConstraintBuilder above? DistConstraintBuilder.build(randstate, fm) for c in constraint_l: constraint_l.extend(ArrayConstraintBuilder.build( c, bounds_v.bound_m)) # Now, handle dist constraints - # TODO Does this depend on the ArrayConstraintBuilder above? DistConstraintBuilder.build(randstate, c) # If we made changes during array remodeling, @@ -658,7 +666,7 @@ def do_randomize( bounds_v.process(field_model_l, constraint_l) if debug > 0: - print("Final Model:") + print("Final Model:") for fm in field_model_l: print(" " + ModelPrettyPrinter.print(fm)) for c in constraint_l: @@ -672,36 +680,35 @@ def do_randomize( r = Randomizer( randstate, solve_info=solve_info, - debug=debug, - lint=lint, + debug=debug, + lint=lint, solve_fail_debug=solve_fail_debug) # if Randomizer._rng is None: # Randomizer._rng = random.Random(random.randrange(sys.maxsize)) ri = RandInfoBuilder.build(field_model_l, constraint_l, Randomizer._rng) - # TODO Unecessary function refactor? Randomizer.try_randomize(srcinfo, field_model_l, solve_info, bounds_v, r, ri, cache_enabled) - # Cache all interesting variables for later + # Cache all interesting variables for later if cache_enabled: Randomizer.randomize_cache[call_hash] = rand_cache_entry(bounds_v, ri, r, field_model_l, constraint_l) - # HACK Fill out field_l in FieldArrayModels so that look ups work - # This breaks deepcopy since it'll now have deepcopy references... + # HACK Fill out field_l in FieldArrayModels so that array lookups work in model if cache_enabled: field_model_l = Randomizer.randomize_cache[call_hash].field_model_l for fm_new, fm_og in zip(field_model_l, field_model_l_og): if hasattr(fm_og, 'field_l'): for f_new, f_og in zip(fm_new.field_l, fm_og.field_l): if hasattr(f_og, 'field_l'): - f_og.latest_field_l = f_new.field_l + f_og.field_l = f_new.field_l + @staticmethod def get_pretty_call_hash(randstate, field_model_l, constraint_l): call_hash = '' call_hash += f'{hex(id(randstate))}\n' - if field_model_l is not None: + if field_model_l is not None: for fm in field_model_l: # TODO This pretty print is an expensive call. Need a better way # to construct a unique ID/hash that doesn't depend on @@ -715,15 +722,12 @@ def get_pretty_call_hash(randstate, field_model_l, constraint_l): if hasattr(fm, 'constraint_model_l'): for cm in fm.constraint_model_l: call_hash += f'{cm.name}-{cm.enabled=}\n' - # TODO Is there a way to detect or quickly generate dist constraints? - if constraint_l is not None: - # Each with constraint(block?) and its expressions - # TODO Is this missing anything? Dynamic expressions? Too aggressive? + if constraint_l is not None: + # Each with constraint(block?), its expressions, and enabled bit for cm in constraint_l: call_hash += ModelPrettyPrinter.print(cm, print_values=True) call_hash += f'{cm.name}-{cm.enabled=}\n' - # TODO Is there a way to detect or quickly generate dist constraints? return call_hash @staticmethod diff --git a/src/vsc/types.py b/src/vsc/types.py index a75e5f6..4a72b80 100644 --- a/src/vsc/types.py +++ b/src/vsc/types.py @@ -964,11 +964,11 @@ def __contains__(self, lhs): if self.is_enum: ei : EnumInfo = self.t.enum_i val = ei.e2v(lhs) - for f in model.get_field_l(): + for f in model.field_l: if int(f.get_val()) == val: return True elif self.is_scalar: - for f in model.get_field_l(): + for f in model.field_l: if int(f.get_val()) == int(lhs): return True else: @@ -987,7 +987,7 @@ def __next__(self): raise StopIteration() else: # The model's view is always masked 2's complement - v = int(self.model.get_field_l()[self.idx].get_val()) + v = int(self.model.field_l[self.idx].get_val()) if self.l.t.is_signed: if (v & (1 << (self.l.t.width-1))) != 0: @@ -1041,10 +1041,10 @@ def __getitem__(self, k): else: if self.is_enum: ei : EnumInfo = self.t.enum_i - return ei.v2e(model.get_field_l()[k].get_val()) + return ei.v2e(model.field_l[k].get_val()) elif self.is_scalar: # The model's view is always masked 2's complement - v = int(model.get_field_l()[k].get_val()) + v = int(model.field_l[k].get_val()) if self.t.is_signed: if (v & (1 << (self.t.width-1))) != 0: @@ -1073,12 +1073,12 @@ def __str__(self): for i in range(self.size): if i > 0: ret += ", " - ret += str(ei.v2e(model.get_field_l()[i].get_val())) + ret += str(ei.v2e(model.field_l[i].get_val())) elif self.is_scalar: for i in range(self.size): if i > 0: ret += ", " - ret += model.get_field_l()[i].get_val().toString() + ret += model.field_l[i].get_val().toString() else: for i in range(self.size): if i > 0: From cc0f365380595a1b05412be48fe3935d25af6dc8 Mon Sep 17 00:00:00 2001 From: Alex Wilson Date: Wed, 22 Feb 2023 12:27:20 -0700 Subject: [PATCH 13/13] Add initial rand seed to call hash string Sometimes the rand stability test would fail due to a call hash repeating triggering the cache_enabled path. This is probably due to the randstate obj ID being reused, so this adds the initial seed to that as well. Not sure if this is the proper fix yet. --- src/vsc/model/rand_state.py | 4 +++- src/vsc/model/randomizer.py | 6 ++++-- ve/unit/test_constraint_dist.py | 4 ++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/vsc/model/rand_state.py b/src/vsc/model/rand_state.py index 0c07500..33360f0 100644 --- a/src/vsc/model/rand_state.py +++ b/src/vsc/model/rand_state.py @@ -9,11 +9,13 @@ class RandState(object): def __init__(self, seed): self.rng = random.Random() - self.rng.seed(f"{seed}") + self.init_seed = f"{seed}" + self.rng.seed(self.init_seed) def clone(self) -> 'RandState': randState = RandState("") randState.rng.setstate(self.rng.getstate()) + randState.init_seed = self.init_seed return randState def rand_u(self): diff --git a/src/vsc/model/randomizer.py b/src/vsc/model/randomizer.py index 3bda1fb..01479fd 100644 --- a/src/vsc/model/randomizer.py +++ b/src/vsc/model/randomizer.py @@ -43,6 +43,7 @@ from vsc.model.rand_if import RandIF from vsc.model.rand_info import RandInfo from vsc.model.rand_info_builder import RandInfoBuilder +from vsc.model.rand_state import RandState from vsc.model.variable_bound_model import VariableBoundModel from vsc.visitors.array_constraint_builder import ArrayConstraintBuilder from vsc.visitors.constraint_override_rollback_visitor import ConstraintOverrideRollbackVisitor @@ -553,7 +554,7 @@ def _collect_failing_constraints(self, @staticmethod def do_randomize( - randstate, + randstate: RandState, srcinfo : SourceInfo, field_model_l : List[FieldModel], constraint_l : List[ConstraintModel] = None, @@ -705,9 +706,10 @@ def do_randomize( @staticmethod - def get_pretty_call_hash(randstate, field_model_l, constraint_l): + def get_pretty_call_hash(randstate: RandState, field_model_l, constraint_l): call_hash = '' call_hash += f'{hex(id(randstate))}\n' + call_hash += f'{randstate.init_seed=}\n' if field_model_l is not None: for fm in field_model_l: # TODO This pretty print is an expensive call. Need a better way diff --git a/ve/unit/test_constraint_dist.py b/ve/unit/test_constraint_dist.py index d4bce47..1490b05 100644 --- a/ve/unit/test_constraint_dist.py +++ b/ve/unit/test_constraint_dist.py @@ -690,7 +690,7 @@ def show_it(obj, rs): print() def run_obj(obj): - cnt = 6 + cnt = 100 print(f'Using {type(obj).__name__}') print('------------------') @@ -703,4 +703,4 @@ def run_obj(obj): obj = With_Dist() run_obj(obj) - \ No newline at end of file +