Memoized Liberal goal chomping

  • We run a variant of the most basic strategic prover - a goal chomper that keeps trying a succession of goals.
  • The change from the last time is that terms are remembered and looked up.
  • Termination is when all goals are finished; with failures recorded.
  • Proving is only by generation, with the generation including backward reasoning rules.
  • So far the goal chomper is untested; we test this.
  • We also test updating displays etc.
In [1]:
import $cp.bin.`provingground-core-jvm-c665e0185a.fat.jar`
import provingground._ , interface._, HoTT._, learning._ 
repl.pprinter() = {
  val p = repl.pprinter()
  p.copy(
    additionalHandlers = p.additionalHandlers.orElse {
      translation.FansiShow.fansiHandler
    }
  )
}
Out[1]:
import $cp.$                                              

import provingground._ , interface._, HoTT._, learning._ 
In [2]:
val terms = FiniteDistribution.unif[Term](Unit, Zero, Star)
val typs = FiniteDistribution.unif[Typ[Term]](Type, Unit, Zero)
val ts = TermState(terms, typs)
val ts0 = TermState(FiniteDistribution(), FiniteDistribution.unif(Type))
val tg = TermGenParams(solverW = 0.05)
Out[2]:
terms: FiniteDistribution[Term] = FiniteDistribution(
  Vector(
    Weighted(Unit, 0.3333333333333333),
    Weighted(Zero, 0.3333333333333333),
    Weighted(Star, 0.3333333333333333)
  )
)
typs: FiniteDistribution[Typ[Term]] = FiniteDistribution(
  Vector(
    Weighted(𝒰 , 0.3333333333333333),
    Weighted(Unit, 0.3333333333333333),
    Weighted(Zero, 0.3333333333333333)
  )
)
ts: TermState = TermState(
  FiniteDistribution(
    Vector(
      Weighted(Unit, 0.3333333333333333),
      Weighted(Zero, 0.3333333333333333),
      Weighted(Star, 0.3333333333333333)
    )
  ),
  FiniteDistribution(
    Vector(
      Weighted(𝒰 , 0.3333333333333333),
      Weighted(Unit, 0.3333333333333333),
      Weighted(Zero, 0.3333333333333333)
    )
  ),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)
ts0: TermState = TermState(
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector(Weighted(𝒰 , 1.0))),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)
tg: TermGenParams = TermGenParams(
  0.1,
  0.1,
  0.1,
  0.1,
  0.1,
  0.05,
  0.05,
  0.05,
  0.0,
  0.0,
  0.0,
  0.0,
  0.3,
  0.7,
  0.5,
  0.0,
  0.05,
  0.0,
  OrElse(
    OrElse(OrElse(OrElse(<function1>, <function1>), <function1>), <function1>),
    <function1>
  )
)
In [3]:
import monix.execution.Scheduler.Implicits.global
val lp = LocalProver(ts, tg).sharpen(10)
val lp0 = LocalProver(ts0).sharpen(10)
Out[3]:
import monix.execution.Scheduler.Implicits.global

lp: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(
      Vector(
        Weighted(Unit, 0.3333333333333333),
        Weighted(Zero, 0.3333333333333333),
        Weighted(Star, 0.3333333333333333)
      )
    ),
    FiniteDistribution(
      Vector(
        Weighted(𝒰 , 0.3333333333333333),
        Weighted(Unit, 0.3333333333333333),
        Weighted(Zero, 0.3333333333333333)
      )
    ),
    Vector(),
    FiniteDistribution(Vector()),
    FiniteDistribution(Vector()),
    Empty
  ),
  TermGenParams(
    0.1,
    0.1,
    0.1,
    0.1,
    0.1,
    0.05,
    0.05,
    0.05,
    0.0,
    0.0,
    0.0,
    0.0,
    0.3,
    0.7,
    0.5,
    0.0,
    0.05,
...
lp0: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(Vector()),
    FiniteDistribution(Vector(Weighted(𝒰 , 1.0))),
    Vector(),
    FiniteDistribution(Vector()),
    FiniteDistribution(Vector()),
    Empty
  ),
  TermGenParams(
    0.1,
    0.1,
    0.1,
    0.1,
    0.1,
    0.05,
    0.05,
    0.05,
    0.0,
    0.0,
    0.0,
    0.0,
    0.3,
    0.7,
    0.5,
    0.0,
    0.0,
    0.0,
    OrElse(
      OrElse(OrElse(OrElse(<function1>, <function1>), <function1>), <function1>),
      <function1>
    )
  ),
  1.0E-5,
  12 minutes,
  1.01,
  1.0,
  10000,
...
In [4]:
val unknownsT = lp0.unknownStatements.map(_.entropyVec.map(_.elem)).memoize
val unF = unknownsT.runToFuture
In [5]:
import StrategicProvers._
import almond.display._
val chompView = Markdown("## Results from Goal chomping\n")

Goal chomping status

  • current goal : Some((@a : 𝒰 _0 ) ~> ((((𝒰 _0) β†’ (@a)) , (𝒰 _0))))
  • successes : 63
  • failures : 1
  • terms : 10985
  • equation-nodes: 1116040
  • last success : Some(Vector((βˆ‘((@a : 𝒰 _0) ↦ ((@a) β†’ (Zero))),0.5,[((Zero) , (rec(Zero)(Zero))) : 0.01127994556862484, ((Zero) , ((@a : Zero) ↦ (@a))) : 0.00974783102948558])))
  • last failure : Some((𝒰 _0) β†’ ((((@b : 𝒰 _0 ) ~> (@b)) , (𝒰 _0))))
Out[5]:
import StrategicProvers._

import almond.display._
In [6]:
update = (_) => chompView.withContent(md).update()
update(())
In [7]:
val chT = unknownsT.flatMap(typs => liberalChomper(lp, typs, accumTerms = Set())).memoize
Out[7]:
chT: monix.eval.Task[(Vector[Successes], Vector[Typ[Term]], Set[EquationNode], Set[Term], Vector[Typ[Term]])] = Async(<function2>, false, true, true)
In [8]:
val chF = chT.runToFuture
In [9]:
val failHandle = Markdown(failures.reverse.mkString("## Failures\n\n * ", "\n * ", "\n"))

Failures

  • (𝒰 _0) β†’ ((((@b : 𝒰 _0 ) ~> (@b)) , (𝒰 _0)))
In [10]:
update = (_) => {
    chompView.withContent(md).update()  
    failHandle.withContent(failures.reverse.mkString("## Failures\n\n * ", "\n * ", "\n")).update()
}
In [11]:
update(())
In [17]:
val A = "A" :: Type
val P = (A ~>: A) && Type
val Q = Type ->: P
Out[17]:
A: Typ[Term] = A
P: ProdTyp[FuncLike[Typ[Term], Term], Typ[Term]] = ∏(A : 𝒰 ){ A }×𝒰 
Q: FuncTyp[Typ[Term], PairTerm[FuncLike[Typ[Term], Term], Typ[Term]]] = (𝒰  β†’ ∏(A : 𝒰 ){ A }×𝒰 )
In [18]:
failures.headOption == Some(Q)
Out[18]:
res17: Boolean = true
In [19]:
terms.filter(_.typ == P)
Out[19]:
res18: FiniteDistribution[Term] = FiniteDistribution(Vector())
In [20]:
terms.filter(_.typ == (A ~>: A))
Out[20]:
res19: FiniteDistribution[Term] = FiniteDistribution(Vector())
In [24]:
val termsT = lp.expressionEval.map(_.finalTerms).memoize
Out[24]:
termsT: monix.eval.Task[FiniteDistribution[Term]] = Async(
  <function2>,
  false,
  true,
  true
)
In [25]:
termsT.map(s => s.filter(_.typ == (A ~>: A))).runToFuture
In [26]:
termsT.map(_.map(_.typ).entropyVec).runToFuture
In [28]:
val NP = negate(A ~>: A)
Out[28]:
NP: Typ[Term] = βˆ‘(A : { (A β†’ Zero) }
In [29]:
terms.filter(_.typ == NP)
Out[29]:
res28: FiniteDistribution[Term] = FiniteDistribution(Vector())
In [30]:
termsT.map(s => s.filter(_.typ == NP)).runToFuture
In [31]:
unknownsT.map(ts => ts.contains(A ~>: A)).runToFuture
In [32]:
successes.head
Out[32]:
res31: Successes = Vector(
  (
    βˆ‘(@a : { (@a β†’ Zero) },
    0.5,
    FiniteDistribution(
      Vector(
        Weighted((Zero , rec_{ Zero ; Zero }), 0.01127994556862484),
        Weighted((Zero , (@a : Zero) ↦ @a), 0.00974783102948558)
      )
    )
  )
)
In [33]:
successes.headOption.map(v => v.head._1)
Out[33]:
res32: Option[Typ[Term]] = Some(βˆ‘(@a : { (@a β†’ Zero) })
In [34]:
successes.headOption.map(v => v.head._1) == Some(NP)
Out[34]:
res33: Boolean = true
In [35]:
failures.headOption.map(t => negate(t))
Out[35]:
res34: Option[Typ[Term]] = Some(((𝒰  β†’ ∏(@b : 𝒰 ){ @b }×𝒰 ) β†’ Zero))
In [36]:
val B = "B" :: Type
negate(A ->: B)
Out[36]:
B: Typ[Term] = B
res35_1: Typ[Term] = ((A β†’ B) β†’ Zero)

Conclusions

  • Memoization did not workin part because the correct things were not memoized.
  • Specifically a term that was part of the successes was not memoized.
  • We should look for not just the finalTerms from the equations, but the nextState.
  • We should also improve negation for implies if the conclusion is independent of the premise.