Generating with successive refinements

The goal is to keep generating distributions from just Type, Zero, One and Star till we timeout

In [1]:
import $cp.bin.`provingground-core-jvm-09effc3.fat.jar`
Out[1]:
import $cp.$                                           
In [2]:
import provingground._ , interface._, HoTT._, learning._ 
repl.pprinter() = {
  val p = repl.pprinter()
  p.copy(
    additionalHandlers = p.additionalHandlers.orElse {
      translation.FansiShow.fansiHandler
    }
  )
}
Out[2]:
import provingground._ , interface._, HoTT._, learning._ 
In [19]:
val terms = FiniteDistribution.unif[Term](Type, Unit, Zero, Star)
val typs = FiniteDistribution[Typ[Term]](Type -> 0.8, Unit -> 0.05, Zero -> 0.15)
val ts = TermState(terms, typs)
Out[19]:
terms: FiniteDistribution[Term] = FiniteDistribution(
  Vector(
    Weighted(𝒰 , 0.25),
    Weighted(Unit, 0.25),
    Weighted(Zero, 0.25),
    Weighted(Star, 0.25)
  )
)
typs: FiniteDistribution[Typ[Term]] = FiniteDistribution(
  Vector(Weighted(𝒰 , 0.8), Weighted(Unit, 0.05), Weighted(Zero, 0.15))
)
ts: TermState = TermState(
  FiniteDistribution(
    Vector(
      Weighted(𝒰 , 0.25),
      Weighted(Unit, 0.25),
      Weighted(Zero, 0.25),
      Weighted(Star, 0.25)
    )
  ),
  FiniteDistribution(
    Vector(Weighted(𝒰 , 0.8), Weighted(Unit, 0.05), Weighted(Zero, 0.15))
  ),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)
In [20]:
import monix.execution.Scheduler.Implicits.global
def lp(n: Int) = LocalProver(ts).sharpen(math.pow(2, n))
Out[20]:
import monix.execution.Scheduler.Implicits.global

defined function lp
In [21]:
def ns(n: Int) = lp(n).nextState.map{ns => n -> ns}
Out[21]:
defined function ns
In [22]:
val bt = Utils.bestTask((1 to 30).map(ns))
Out[22]:
bt: monix.eval.Task[Option[(Int, TermState)]] = FlatMap(
  FlatMap(
    Map(
      Async(<function2>, false, true, true),
      ammonite.$sess.cmd20$Helper$$Lambda$3316/1857030182@487cdc04,
      0
    ),
    <function1>
  ),
  provingground.Utils$$$Lambda$2797/1331520012@313b87a8
)
In [23]:
val bf = bt.runToFuture
In [24]:
val A = "A" :: Type
val tsT = bt.map(_.get._2)
Out[24]:
A: Typ[Term] = A
tsT: monix.eval.Task[TermState] = Map(
  FlatMap(
    FlatMap(
      Map(
        Async(<function2>, false, true, true),
        ammonite.$sess.cmd20$Helper$$Lambda$3316/1857030182@487cdc04,
        0
      ),
      <function1>
    ),
    provingground.Utils$$$Lambda$2797/1331520012@313b87a8
  ),
  ammonite.$sess.cmd23$Helper$$Lambda$3327/584491755@78827119,
  0
)
In [25]:
val typT = tsT.map(_.typs)
Out[25]:
typT: monix.eval.Task[FiniteDistribution[Typ[Term]]] = Map(
  FlatMap(
    FlatMap(
      Map(
        Async(<function2>, false, true, true),
        ammonite.$sess.cmd20$Helper$$Lambda$3316/1857030182@487cdc04,
        0
      ),
      <function1>
    ),
    provingground.Utils$$$Lambda$2797/1331520012@313b87a8
  ),
  scala.Function1$$Lambda$275/1928301845@cf5ea27,
  1
)
In [26]:
val tpEntF = typT.map(_.entropyVec).runToFuture
In [27]:
val B = "B" :: Type
val MP = A ~>: (B ~>: (A ->: (A ->: B) ->: B))
Out[27]:
B: Typ[Term] = B
MP: GenFuncTyp[Typ[Term], FuncLike[Typ[Term], Func[Term, Func[Func[Term, Term], Term]]]] = ∏(A : 𝒰 ){ ∏(B : 𝒰 ){ (A β†’ ((A β†’ B) β†’ B)) } }
In [28]:
typT.map(_(MP)).runToFuture
In [29]:
val idTyp = A ~>: (A ->: A)
typT.map(_(idTyp)).runToFuture

Conclusions

  • Repeated running of tasks is okay.
  • However, we see single shot generation is pretty weak.
In [30]:
def lp1(n: Int) = LocalProver(TermState(FiniteDistribution.unif(A, B), FiniteDistribution.unif(A, B))).sharpen(math.pow(2, n))
Out[30]:
defined function lp1
In [31]:
def ns1(n: Int) = lp1(n).nextState.map{ns => n -> ns}
Out[31]:
defined function ns1
In [41]:
val bT1 = Utils.bestTask((1 to 30).map(ns1))
Out[41]:
bT1: monix.eval.Task[Option[(Int, TermState)]] = FlatMap(
  FlatMap(
    Map(
      Async(<function2>, false, true, true),
      ammonite.$sess.cmd30$Helper$$Lambda$3446/1142216442@2a9f3987,
      0
    ),
    <function1>
  ),
  provingground.Utils$$$Lambda$2797/1331520012@786eee75
)
In [42]:
val bF1 = bT1.runToFuture
In [43]:
val mp1T = bT1.map(_.get._2.typs(A ->: (A ->: B) ->: B))
Out[43]:
mp1T: monix.eval.Task[Double] = Map(
  FlatMap(
    FlatMap(
      Map(
        Async(<function2>, false, true, true),
        ammonite.$sess.cmd30$Helper$$Lambda$3446/1142216442@2a9f3987,
        0
      ),
      <function1>
    ),
    provingground.Utils$$$Lambda$2797/1331520012@786eee75
  ),
  ammonite.$sess.cmd42$Helper$$Lambda$3457/424448485@f71aa6e,
  0
)
In [44]:
val mp1F = mp1T.runToFuture
In [45]:
val typs1T = bT1.map(_.get._2.typs)
Out[45]:
typs1T: monix.eval.Task[FiniteDistribution[Typ[Term]]] = Map(
  FlatMap(
    FlatMap(
      Map(
        Async(<function2>, false, true, true),
        ammonite.$sess.cmd30$Helper$$Lambda$3446/1142216442@2a9f3987,
        0
      ),
      <function1>
    ),
    provingground.Utils$$$Lambda$2797/1331520012@786eee75
  ),
  ammonite.$sess.cmd44$Helper$$Lambda$3464/1416231077@4a8a7522,
  0
)
In [46]:
typs1T.map(_(A ->: B)).runToFuture

Conclusion 2

When we had variables $A$ and $B$ this did discover Modus Ponens (though it took a long time to run as it ran through 12 steps).