We attempt to replicate the Monoid proof but with our new workflow and with the local provers.

In [1]:
import $cp.bin.`provingground-core-jvm-3d48753.fat.jar`
Out[1]:
import $cp.$                                           
In [2]:
import provingground._ , interface._, HoTT._, learning._ 
repl.pprinter() = {
  val p = repl.pprinter()
  p.copy(
    additionalHandlers = p.additionalHandlers.orElse {
      translation.FansiShow.fansiHandler
    }
  )
}
Out[2]:
import provingground._ , interface._, HoTT._, learning._ 
In [3]:
import provingground._ , interface._, HoTT._
import learning._
import library._, MonoidSimple._
Out[3]:
import provingground._ , interface._, HoTT._

import learning._

import library._, MonoidSimple._
In [4]:
val tg = TermGenParams(lmW=0, piW=0)
val ts = TermState(dist1, dist1.map(_.typ))
Out[4]:
tg: TermGenParams = TermGenParams(
  0.1,
  0.1,
  0.1,
  0.0,
  0.0,
  0.05,
  0.05,
  0.05,
  0.0,
  0.0,
  0.0,
  0.0,
  0.3,
  0.7,
  0.5,
  0.0,
  0.0,
  0.0
)
ts: TermState = TermState(
  FiniteDistribution(
    Vector(
      Weighted(e_l, 0.047619047619047616),
      Weighted(e_r, 0.047619047619047616),
      Weighted(mul, 0.047619047619047616),
      Weighted(eqM, 0.047619047619047616),
      Weighted(axiom_{eqM(a)(a)}, 0.047619047619047616),
      Weighted(axiom_{(eqM(a)(b) \to eqM(b)(a))}, 0.047619047619047616),
      Weighted(
        axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))},
        0.047619047619047616
      ),
      Weighted(axiom_{eqM(mul(e_l)(a))(a)}, 0.047619047619047616),
      Weighted(axiom_{eqM(mul(a)(e_r))(a)}, 0.047619047619047616),
      Weighted(eqM, 0.2857142857142857),
      Weighted(mul, 0.2857142857142857)
    )
  ),
  FiniteDistribution(
    Vector(
      Weighted(M, 0.047619047619047616),
      Weighted(M, 0.047619047619047616),
      Weighted((M → (M → M)), 0.047619047619047616),
      Weighted((M → (M → 𝒰 )), 0.047619047619047616),
      Weighted(∏(a : M){ eqM(a)(a) }, 0.047619047619047616),
      Weighted(
        ∏(a : M){ ∏(b : M){ (eqM(a)(b) → eqM(b)(a)) } },
        0.047619047619047616
      ),
      Weighted(
        ∏(a : M){ ∏(b : M){ ∏(c : M){ (eqM(a)(b) → (eqM(b)(c) → eqM(a)(c))) } } },
        0.047619047619047616
      ),
      Weighted(∏(a : M){ eqM(mul(e_l)(a))(a) }, 0.047619047619047616),
      Weighted(∏(a : M){ eqM(mul(a)(e_r))(a) }, 0.047619047619047616),
      Weighted((M → (M → 𝒰 )), 0.2857142857142857),
      Weighted((M → (M → M)), 0.2857142857142857)
...
In [5]:
import monix.execution.Scheduler.Implicits.global
Out[5]:
import monix.execution.Scheduler.Implicits.global 
In [6]:
val lp = LocalProver(ts).noIsles.sharpen(10)
Out[6]:
lp: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(
      Vector(
        Weighted(e_l, 0.047619047619047616),
        Weighted(e_r, 0.047619047619047616),
        Weighted(mul, 0.047619047619047616),
        Weighted(eqM, 0.047619047619047616),
        Weighted(axiom_{eqM(a)(a)}, 0.047619047619047616),
        Weighted(axiom_{(eqM(a)(b) \to eqM(b)(a))}, 0.047619047619047616),
        Weighted(
          axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))},
          0.047619047619047616
        ),
        Weighted(axiom_{eqM(mul(e_l)(a))(a)}, 0.047619047619047616),
        Weighted(axiom_{eqM(mul(a)(e_r))(a)}, 0.047619047619047616),
        Weighted(eqM, 0.2857142857142857),
        Weighted(mul, 0.2857142857142857)
      )
    ),
    FiniteDistribution(
      Vector(
        Weighted(M, 0.047619047619047616),
        Weighted(M, 0.047619047619047616),
        Weighted((M → (M → M)), 0.047619047619047616),
        Weighted((M → (M → 𝒰 )), 0.047619047619047616),
        Weighted(∏(a : M){ eqM(a)(a) }, 0.047619047619047616),
        Weighted(
          ∏(a : M){ ∏(b : M){ (eqM(a)(b) → eqM(b)(a)) } },
          0.047619047619047616
        ),
        Weighted(
          ∏(a : M){ ∏(b : M){ ∏(c : M){ (eqM(a)(b) → (eqM(b)(c) → eqM(a)(c))) } } },
          0.047619047619047616
        ),
        Weighted(∏(a : M){ eqM(mul(e_l)(a))(a) }, 0.047619047619047616),
        Weighted(∏(a : M){ eqM(mul(a)(e_r))(a) }, 0.047619047619047616),
        Weighted((M → (M → 𝒰 )), 0.2857142857142857),
...
In [7]:
val lemT = lp.lemmas
Out[7]:
lemT: monix.eval.Task[Vector[(Typ[Term], Double)]] = Async(
  <function2>,
  false,
  true,
  true
)
In [8]:
val lemF = lemT.runToFuture
lemF: monix.execution.CancelableFuture[Vector[(Typ[Term], Double)]] = Success(
  Vector(
    (eqM(e_l)(mul(e_l)(e_r)), 0.0011464127623522545),
    (eqM(e_l)(mul(e_l)(e_l)), 0.0011464127623522545),
    (eqM(e_r)(mul(e_l)(e_r)), 0.0011464127623522545),
    (eqM(e_r)(mul(e_r)(e_r)), 0.0011464127623522545),
    (eqM(e_r)(e_r), 1.4502068672697299E-5),
    (eqM(e_l)(e_l), 1.4502068672697299E-5),
    (eqM(mul(e_l)(e_r))(e_l), 1.2959421862468418E-5),
    (eqM(mul(e_l)(e_l))(e_l), 1.2959421862468418E-5),
    (eqM(mul(e_l)(e_r))(e_r), 1.2959421862468418E-5),
    (eqM(mul(e_r)(e_r))(e_r), 1.2959421862468418E-5),
    (eqM(mul(e_l)(e_r))(mul(e_l)(mul(e_l)(e_r))), 1.2720945176056593E-5),
    (eqM(mul(e_r)(e_l))(mul(e_l)(mul(e_r)(e_l))), 1.2720945176056593E-5),
    (eqM(mul(e_r)(e_r))(mul(e_l)(mul(e_r)(e_r))), 1.2720945176056593E-5),
    (eqM(mul(e_l)(e_l))(mul(e_l)(mul(e_l)(e_l))), 1.2720945176056593E-5),
    (eqM(mul(e_l)(e_r))(mul(e_l)(e_r)), 1.1645910816131709E-5),
    (eqM(mul(e_r)(e_l))(mul(e_r)(e_l)), 1.1645910816131709E-5),
    (eqM(mul(e_l)(e_l))(mul(e_l)(e_l)), 1.1645910816131709E-5),
    (eqM(mul(e_r)(e_r))(mul(e_r)(e_r)), 1.1645910816131709E-5)
  )
)
In [9]:
val tangLpT = lp.proofTangent()
Out[9]:
tangLpT: monix.eval.Task[LocalTangentProver] = FlatMap(
  FlatMap(
    Async(<function2>, false, true, true),
    provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
  ),
  provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
)
In [10]:
val thmsT = tangLpT.flatMap(_.theoremsByStatement)
Out[10]:
thmsT: monix.eval.Task[FiniteDistribution[Typ[Term]]] = FlatMap(
  FlatMap(
    FlatMap(
      Async(<function2>, false, true, true),
      provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
    ),
    provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
  ),
  ammonite.$sess.cmd9$Helper$$Lambda$2835/317800253@7be17ff9
)
In [11]:
val thmsF = thmsT.runToFuture
thmsF: monix.execution.CancelableFuture[FiniteDistribution[Typ[Term]]] = Failure(
  java.util.concurrent.TimeoutException: Task timed-out after 12 minutes of inactivity
)
In [12]:
thmsF.value
Out[12]:
res11: Option[scala.util.Try[FiniteDistribution[Typ[Term]]]] = None
In [13]:
val tangLpT0 = tangLpT.map(_.copy(cutoff = math.pow(10, -3)))
Out[13]:
tangLpT0: monix.eval.Task[LocalTangentProver] = Map(
  FlatMap(
    FlatMap(
      Async(<function2>, false, true, true),
      provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
    ),
    provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
  ),
  ammonite.$sess.cmd12$Helper$$Lambda$2889/1942287874@15f5d189,
  0
)
In [14]:
val thmsT0 = tangLpT0.flatMap(_.theoremsByStatement)
Out[14]:
thmsT0: monix.eval.Task[FiniteDistribution[Typ[Term]]] = FlatMap(
  Map(
    FlatMap(
      FlatMap(
        Async(<function2>, false, true, true),
        provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
      ),
      provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
    ),
    ammonite.$sess.cmd12$Helper$$Lambda$2889/1942287874@15f5d189,
    0
  ),
  ammonite.$sess.cmd13$Helper$$Lambda$2893/1861179875@286ad0c
)
In [15]:
val thmsF0 = thmsT0.runToFuture
thmsF0: monix.execution.CancelableFuture[FiniteDistribution[Typ[Term]]] = Success(
  FiniteDistribution(
    Vector(
      Weighted(eqM(e_r)(e_r), 0.2004378885521395),
      Weighted(eqM(mul(e_l)(e_r))(mul(e_l)(e_r)), 8.942367376597229E-4),
      Weighted(eqM(mul(e_l)(e_r))(e_l), 0.004920889764529055),
      Weighted(
        eqM(mul(e_l)(e_r))(mul(e_l)(mul(e_l)(e_r))),
        2.356598569957204E-4
      ),
      Weighted(eqM(mul(e_l)(e_l))(mul(e_l)(e_r)), 8.942367376597229E-4),
      Weighted(eqM(e_l)(e_l), 0.2004378885521395),
      Weighted(∏(a : M){ eqM(mul(e_l)(a))(a) }, 0.026244220822213234),
      Weighted(eqM(mul(e_l)(e_l))(e_l), 0.004920889764529055),
      Weighted(M, 0.05248844164442647),
      Weighted(eqM(e_l)(mul(e_l)(e_r)), 0.005053873310466531),
      Weighted((M → (M → M)), 0.18370954575549264),
      Weighted(eqM(mul(e_l)(e_r))(mul(e_r)(e_r)), 8.942367376597229E-4),
      Weighted(
        ∏(a : M){ ∏(b : M){ ∏(c : M){ (eqM(a)(b) → (eqM(b)(c) → eqM(a)(c))) } } },
        0.026244220822213234
      ),
      Weighted(
        ∏(a : M){ ∏(b : M){ (eqM(a)(b) → eqM(b)(a)) } },
        0.026244220822213234
      ),
      Weighted(eqM(mul(e_r)(e_l))(mul(e_r)(e_l)), 8.942367376597229E-4),
      Weighted(∏(a : M){ eqM(a)(a) }, 0.026244220822213234),
      Weighted(eqM(mul(e_l)(e_l))(mul(e_l)(e_l)), 8.942367376597229E-4),
      Weighted(∏(a : M){ eqM(mul(a)(e_r))(a) }, 0.026244220822213234),
      Weighted(eqM(mul(e_l)(e_r))(mul(e_l)(e_l)), 8.942367376597229E-4),
      Weighted((M → (M → 𝒰 )), 0.18370954575549264),
      Weighted(eqM(e_l)(mul(e_l)(e_l)), 0.005053873310466531),
      Weighted(eqM(mul(e_r)(e_r))(mul(e_l)(e_r)), 8.942367376597229E-4),
      Weighted(eqM(mul(e_l)(e_r))(e_r), 0.004920889764529055),
      Weighted(eqM(mul(e_r)(e_r))(e_r), 0.004920889764529055),
      Weighted(eqM(mul(e_r)(e_r))(mul(e_r)(e_r)), 8.942367376597229E-4),
      Weighted(eqM(e_r)(mul(e_l)(e_r)), 0.005053873310466531),
...
In [16]:
thmsF0.map(_(eqM(l)(r)))
res15: monix.execution.CancelableFuture[Double] = Success(0.0)
In [17]:
val tangLpT1 = tangLpT.map(_.copy(cutoff = 5*  math.pow(10, -3)))
val thmsT1 = tangLpT1.flatMap(_.theoremsByStatement)
val goal = eqM(l)(r)
val goalT = thmsT1.map(_(goal))
Out[17]:
tangLpT1: monix.eval.Task[LocalTangentProver] = Map(
  FlatMap(
    FlatMap(
      Async(<function2>, false, true, true),
      provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
    ),
    provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
  ),
  ammonite.$sess.cmd16$Helper$$Lambda$2968/585079881@eb2ed8b,
  0
)
thmsT1: monix.eval.Task[FiniteDistribution[Typ[Term]]] = FlatMap(
  Map(
    FlatMap(
      FlatMap(
        Async(<function2>, false, true, true),
        provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
      ),
      provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
    ),
    ammonite.$sess.cmd16$Helper$$Lambda$2968/585079881@eb2ed8b,
    0
  ),
  ammonite.$sess.cmd16$Helper$$Lambda$2969/343570645@726c59bb
)
goal: Typ[Term] = eqM(e_l)(e_r)
goalT: monix.eval.Task[Double] = Map(
  FlatMap(
    Map(
      FlatMap(
        FlatMap(
          Async(<function2>, false, true, true),
          provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
        ),
        provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
      ),
      ammonite.$sess.cmd16$Helper$$Lambda$2968/585079881@eb2ed8b,
      0
    ),
    ammonite.$sess.cmd16$Helper$$Lambda$2969/343570645@726c59bb
  ),
  ammonite.$sess.cmd16$Helper$$Lambda$2971/1656662693@24a94a38,
  0
)
In [18]:
val goalF = goalT.runToFuture
goalF: monix.execution.CancelableFuture[Double] = Success(0.0)
In [19]:
val tangLpT2 = tangLpT.map(_.copy(cutoff =  math.pow(10, -4)))
val thmsT2 = tangLpT2.flatMap(_.theoremsByStatement)
val goalT2 = thmsT2.map(_(goal))
Out[19]:
tangLpT2: monix.eval.Task[LocalTangentProver] = Map(
  FlatMap(
    FlatMap(
      Async(<function2>, false, true, true),
      provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
    ),
    provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
  ),
  ammonite.$sess.cmd18$Helper$$Lambda$2992/1782248102@473e4a43,
  0
)
thmsT2: monix.eval.Task[FiniteDistribution[Typ[Term]]] = FlatMap(
  Map(
    FlatMap(
      FlatMap(
        Async(<function2>, false, true, true),
        provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
      ),
      provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
    ),
    ammonite.$sess.cmd18$Helper$$Lambda$2992/1782248102@473e4a43,
    0
  ),
  ammonite.$sess.cmd18$Helper$$Lambda$2993/1120699014@42247789
)
goalT2: monix.eval.Task[Double] = Map(
  FlatMap(
    Map(
      FlatMap(
        FlatMap(
          Async(<function2>, false, true, true),
          provingground.learning.LocalProverStep$$Lambda$2821/298946064@190c0b53
        ),
        provingground.learning.LocalProver$$Lambda$2822/309394961@2954245e
      ),
      ammonite.$sess.cmd18$Helper$$Lambda$2992/1782248102@473e4a43,
      0
    ),
    ammonite.$sess.cmd18$Helper$$Lambda$2993/1120699014@42247789
  ),
  ammonite.$sess.cmd18$Helper$$Lambda$2994/2064783468@13bf6399,
  0
)
In [20]:
val goalF2 = goalT2.runToFuture
goalF2: monix.execution.CancelableFuture[Double] = Failure(
  java.util.concurrent.TimeoutException: Task timed-out after 12 minutes of inactivity
)

Conclusions

  • Searching the entire weighted set of tangent lemmas fails, with cutoffs either giving a timeout or not finding the result.
  • Hence it is crucial to have concurrent searches, say with cutoff specified for the maximum weight and then scaling with weight.
In [21]:
val lem = eqM(l)(op(l)(r))
Out[21]:
lem: Typ[Term] = eqM(e_l)(mul(e_l)(e_r))
In [22]:
val cmlT = lemT.map(_.filter(_._1 == lem)).runToFuture
cmlT: monix.execution.CancelableFuture[Vector[(Typ[Term], Double)]] = Success(Vector((eqM(e_l)(mul(e_l)(e_r)), 0.0011464127623522545)))
In [23]:
val tangS = ts.tangent("proof" :: lem)
Out[23]:
tangS: TermState = TermState(
  FiniteDistribution(Vector(Weighted(proof, 1.0))),
  FiniteDistribution(Vector()),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)
In [24]:
val lpTangT  = lp.distTangentProver(FiniteDistribution.unif("proof" :: lem))
Out[24]:
lpTangT: monix.eval.Task[LocalTangentProver] = FlatMap(
  Map(
    Async(<function2>, false, true, true),
    provingground.learning.LocalProver$$Lambda$3034/2014331541@6c10bd29,
    0
  ),
  provingground.learning.LocalProver$$Lambda$3035/114348400@75de2a33
)
In [25]:
val targGoalT = lpTangT.flatMap(lpt => lpt.theoremsByStatement).map(_(goal))
Out[25]:
targGoalT: monix.eval.Task[Double] = Map(
  FlatMap(
    FlatMap(
      Map(
        Async(<function2>, false, true, true),
        provingground.learning.LocalProver$$Lambda$3034/2014331541@6c10bd29,
        0
      ),
      provingground.learning.LocalProver$$Lambda$3035/114348400@75de2a33
    ),
    ammonite.$sess.cmd24$Helper$$Lambda$3040/2098991595@4607ba37
  ),
  ammonite.$sess.cmd24$Helper$$Lambda$3041/1245697299@4f7802ea,
  0
)
In [26]:
val targGoalF = targGoalT.runToFuture
targGoalF: monix.execution.CancelableFuture[Double] = Failure(
  java.util.concurrent.TimeoutException: Task timed-out after 12 minutes of inactivity
)

Final Conclusions

  • Even using the correct lemma failed, though one could try lower cutoffs.
  • We must narrow the coefficients, to use only (unified) application.
  • We should also do tiny tests with tangents as there could be bugs there.