We attempt to replicate the Monoid proof but with our new workflow and with the local provers, using only unified applications.

In [1]:
import $cp.bin.`provingground-core-jvm-3d48753.fat.jar`
Out[1]:
import $cp.$                                           
In [2]:
import provingground._ , interface._, HoTT._, learning._ 
repl.pprinter() = {
  val p = repl.pprinter()
  p.copy(
    additionalHandlers = p.additionalHandlers.orElse {
      translation.FansiShow.fansiHandler
    }
  )
}
Out[2]:
import provingground._ , interface._, HoTT._, learning._ 
In [3]:
import provingground._ , interface._, HoTT._
import learning._
import library._, MonoidSimple._
Out[3]:
import provingground._ , interface._, HoTT._

import learning._

import library._, MonoidSimple._
In [4]:
val tg = TermGenParams.zero.copy(unAppW = 0.2)
val ts = TermState(dist1, dist1.map(_.typ))
Out[4]:
tg: TermGenParams = TermGenParams(
  0.0,
  0.2,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.3,
  0.7,
  0.5,
  0.0,
  0.0,
  0.0
)
ts: TermState = TermState(
  FiniteDistribution(
    Vector(
      Weighted(e_l, 0.047619047619047616),
      Weighted(e_r, 0.047619047619047616),
      Weighted(mul, 0.047619047619047616),
      Weighted(eqM, 0.047619047619047616),
      Weighted(axiom_{eqM(a)(a)}, 0.047619047619047616),
      Weighted(axiom_{(eqM(a)(b) \to eqM(b)(a))}, 0.047619047619047616),
      Weighted(
        axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))},
        0.047619047619047616
      ),
      Weighted(axiom_{eqM(mul(e_l)(a))(a)}, 0.047619047619047616),
      Weighted(axiom_{eqM(mul(a)(e_r))(a)}, 0.047619047619047616),
      Weighted(eqM, 0.2857142857142857),
      Weighted(mul, 0.2857142857142857)
    )
  ),
  FiniteDistribution(
    Vector(
      Weighted(M, 0.047619047619047616),
      Weighted(M, 0.047619047619047616),
      Weighted((M → (M → M)), 0.047619047619047616),
      Weighted((M → (M → 𝒰 )), 0.047619047619047616),
      Weighted(∏(a : M){ eqM(a)(a) }, 0.047619047619047616),
      Weighted(
        ∏(a : M){ ∏(b : M){ (eqM(a)(b) → eqM(b)(a)) } },
        0.047619047619047616
      ),
      Weighted(
        ∏(a : M){ ∏(b : M){ ∏(c : M){ (eqM(a)(b) → (eqM(b)(c) → eqM(a)(c))) } } },
        0.047619047619047616
      ),
      Weighted(∏(a : M){ eqM(mul(e_l)(a))(a) }, 0.047619047619047616),
      Weighted(∏(a : M){ eqM(mul(a)(e_r))(a) }, 0.047619047619047616),
      Weighted((M → (M → 𝒰 )), 0.2857142857142857),
      Weighted((M → (M → M)), 0.2857142857142857)
...
In [5]:
val lp = LocalProver(ts, tg)
Out[5]:
lp: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(
      Vector(
        Weighted(e_l, 0.047619047619047616),
        Weighted(e_r, 0.047619047619047616),
        Weighted(mul, 0.047619047619047616),
        Weighted(eqM, 0.047619047619047616),
        Weighted(axiom_{eqM(a)(a)}, 0.047619047619047616),
        Weighted(axiom_{(eqM(a)(b) \to eqM(b)(a))}, 0.047619047619047616),
        Weighted(
          axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))},
          0.047619047619047616
        ),
        Weighted(axiom_{eqM(mul(e_l)(a))(a)}, 0.047619047619047616),
        Weighted(axiom_{eqM(mul(a)(e_r))(a)}, 0.047619047619047616),
        Weighted(eqM, 0.2857142857142857),
        Weighted(mul, 0.2857142857142857)
      )
    ),
    FiniteDistribution(
      Vector(
        Weighted(M, 0.047619047619047616),
        Weighted(M, 0.047619047619047616),
        Weighted((M → (M → M)), 0.047619047619047616),
        Weighted((M → (M → 𝒰 )), 0.047619047619047616),
        Weighted(∏(a : M){ eqM(a)(a) }, 0.047619047619047616),
        Weighted(
          ∏(a : M){ ∏(b : M){ (eqM(a)(b) → eqM(b)(a)) } },
          0.047619047619047616
        ),
        Weighted(
          ∏(a : M){ ∏(b : M){ ∏(c : M){ (eqM(a)(b) → (eqM(b)(c) → eqM(a)(c))) } } },
          0.047619047619047616
        ),
        Weighted(∏(a : M){ eqM(mul(e_l)(a))(a) }, 0.047619047619047616),
        Weighted(∏(a : M){ eqM(mul(a)(e_r))(a) }, 0.047619047619047616),
        Weighted((M → (M → 𝒰 )), 0.2857142857142857),
...
In [6]:
import monix.execution.Scheduler.Implicits.global
Out[6]:
import monix.execution.Scheduler.Implicits.global 
In [7]:
val lemT = lp.lemmas
Out[7]:
lemT: monix.eval.Task[Vector[(Typ[Term], Double)]] = Async(
  <function2>,
  false,
  true,
  true
)
In [8]:
val lemF = lemT.runToFuture
lemF: monix.execution.CancelableFuture[Vector[(Typ[Term], Double)]] = Success(
  Vector(
    (eqM(e_r)(e_r), 1.0578745194563616E-4),
    (eqM(e_l)(e_l), 1.0578745194563616E-4),
    (eqM(e_l)(mul(e_l)(e_r)), 3.9209752320968364E-5),
    (eqM(e_l)(mul(e_l)(e_l)), 3.9209752320968364E-5),
    (eqM(e_r)(mul(e_l)(e_r)), 3.9209752320968364E-5),
    (eqM(e_r)(mul(e_r)(e_r)), 3.9209752320968364E-5),
    (eqM(mul(e_l)(e_r))(e_l), 1.1673930754169824E-5),
    (eqM(mul(e_l)(e_l))(e_l), 1.1673930754169824E-5),
    (eqM(mul(e_l)(e_r))(e_r), 1.1673930754169824E-5),
    (eqM(mul(e_r)(e_r))(e_r), 1.1673930754169824E-5)
  )
)
In [9]:
val lem = eqM(l)(op(l)(r))
val tangS = ts.tangent("proof" :: lem)
Out[9]:
lem: Typ[Term] = eqM(e_l)(mul(e_l)(e_r))
tangS: TermState = TermState(
  FiniteDistribution(Vector(Weighted(proof, 1.0))),
  FiniteDistribution(Vector()),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)
In [10]:
val goal = eqM(l)(r)
val lpTangT  = lp.distTangentProver(FiniteDistribution.unif("proof" :: lem))
val targGoalT = lpTangT.flatMap(lpt => lpt.theoremsByStatement).map(_(goal))
Out[10]:
goal: Typ[Term] = eqM(e_l)(e_r)
lpTangT: monix.eval.Task[LocalTangentProver] = FlatMap(
  Map(
    Async(<function2>, false, true, true),
    provingground.learning.LocalProver$$Lambda$3055/1967470927@749bd4b2,
    0
  ),
  provingground.learning.LocalProver$$Lambda$3056/91428110@151a7801
)
targGoalT: monix.eval.Task[Double] = Map(
  FlatMap(
    FlatMap(
      Map(
        Async(<function2>, false, true, true),
        provingground.learning.LocalProver$$Lambda$3055/1967470927@749bd4b2,
        0
      ),
      provingground.learning.LocalProver$$Lambda$3056/91428110@151a7801
    ),
    ammonite.$sess.cmd9$Helper$$Lambda$3057/2060264848@6f468bdc
  ),
  ammonite.$sess.cmd9$Helper$$Lambda$3058/1264621820@3985c81c,
  0
)
In [11]:
val targGoalF = targGoalT.runToFuture
targGoalF: monix.execution.CancelableFuture[Double] = Success(0.0358205554070143)

Conclusions

  • When we draw attention to what we want in an extreme way, at least then we get the result.
  • A warning is that lemmas were misidentified here.
In [18]:
val tg1 = TermGenParams.zero.copy(appW = 0.1, unAppW = 0.1)
Out[18]:
tg1: TermGenParams = TermGenParams(
  0.1,
  0.1,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.0,
  0.3,
  0.7,
  0.5,
  0.0,
  0.0,
  0.0
)
In [19]:
val lp1 = LocalProver(ts, tg1)
Out[19]:
lp1: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(
      Vector(
        Weighted(e_l, 0.047619047619047616),
        Weighted(e_r, 0.047619047619047616),
        Weighted(mul, 0.047619047619047616),
        Weighted(eqM, 0.047619047619047616),
        Weighted(axiom_{eqM(a)(a)}, 0.047619047619047616),
        Weighted(axiom_{(eqM(a)(b) \to eqM(b)(a))}, 0.047619047619047616),
        Weighted(
          axiom_{(eqM(a)(b) \to (eqM(b)(c) \to eqM(a)(c)))},
          0.047619047619047616
        ),
        Weighted(axiom_{eqM(mul(e_l)(a))(a)}, 0.047619047619047616),
        Weighted(axiom_{eqM(mul(a)(e_r))(a)}, 0.047619047619047616),
        Weighted(eqM, 0.2857142857142857),
        Weighted(mul, 0.2857142857142857)
      )
    ),
    FiniteDistribution(
      Vector(
        Weighted(M, 0.047619047619047616),
        Weighted(M, 0.047619047619047616),
        Weighted((M → (M → M)), 0.047619047619047616),
        Weighted((M → (M → 𝒰 )), 0.047619047619047616),
        Weighted(∏(a : M){ eqM(a)(a) }, 0.047619047619047616),
        Weighted(
          ∏(a : M){ ∏(b : M){ (eqM(a)(b) → eqM(b)(a)) } },
          0.047619047619047616
        ),
        Weighted(
          ∏(a : M){ ∏(b : M){ ∏(c : M){ (eqM(a)(b) → (eqM(b)(c) → eqM(a)(c))) } } },
          0.047619047619047616
        ),
        Weighted(∏(a : M){ eqM(mul(e_l)(a))(a) }, 0.047619047619047616),
        Weighted(∏(a : M){ eqM(mul(a)(e_r))(a) }, 0.047619047619047616),
        Weighted((M → (M → 𝒰 )), 0.2857142857142857),
...
In [20]:
val lemT1 = lp1.lemmas
Out[20]:
lemT1: monix.eval.Task[Vector[(Typ[Term], Double)]] = Async(
  <function2>,
  false,
  true,
  true
)
In [21]:
val lemF1 = lemT1.runToFuture
lemF1: monix.execution.CancelableFuture[Vector[(Typ[Term], Double)]] = Success(
  Vector(
    (eqM(e_r)(e_r), 0.0027509184472828325),
    (eqM(e_l)(e_l), 0.0027509184472828325),
    (eqM(e_l)(mul(e_l)(e_r)), 0.0012526844961492322),
    (eqM(e_l)(mul(e_l)(e_l)), 0.0012526844961492322),
    (eqM(e_r)(mul(e_l)(e_r)), 0.0012526844961492322),
    (eqM(e_r)(mul(e_r)(e_r)), 0.0012526844961492322)
  )
)
In [22]:
val lpTangT1  = lp1.distTangentProver(FiniteDistribution.unif("proof" :: lem))
val targGoalT1 = lpTangT1.flatMap(lpt => lpt.theoremsByStatement).map(_(goal))
Out[22]:
lpTangT1: monix.eval.Task[LocalTangentProver] = FlatMap(
  Map(
    Async(<function2>, false, true, true),
    provingground.learning.LocalProver$$Lambda$3055/1967470927@383b4579,
    0
  ),
  provingground.learning.LocalProver$$Lambda$3056/91428110@54a814f7
)
targGoalT1: monix.eval.Task[Double] = Map(
  FlatMap(
    FlatMap(
      Map(
        Async(<function2>, false, true, true),
        provingground.learning.LocalProver$$Lambda$3055/1967470927@383b4579,
        0
      ),
      provingground.learning.LocalProver$$Lambda$3056/91428110@54a814f7
    ),
    ammonite.$sess.cmd21$Helper$$Lambda$3234/916371667@3e84a7d0
  ),
  ammonite.$sess.cmd21$Helper$$Lambda$3235/315745996@3dbbc5b4,
  0
)
In [23]:
val targGoalF1 = targGoalT1.runToFuture
targGoalF1: monix.execution.CancelableFuture[Double] = Success(0.03607619269409383)

Conclusions: round two

  • Having both applications and unified ones is a reasonable thing to try.
  • The theorem was proved in this case.
  • However lemmas were again not identified properly