Codomain targeting.

This is a small experiment to see if targeting as a codomain leads to higher weight for a type. We need functions that act as distractions.

In [1]:
import $cp.bin.`provingground-core-jvm-9cc21d9ac6.fat.jar`
import provingground._ , interface._, HoTT._, learning._ 
repl.pprinter() = {
  val p = repl.pprinter()
  p.copy(
    additionalHandlers = p.additionalHandlers.orElse {
      translation.FansiShow.fansiHandler
    }
  )
}
Out[1]:
import $cp.$                                              

import provingground._ , interface._, HoTT._, learning._ 
In [2]:
val M = "M" :: Type
val mul = "mul" :: (M ->: M ->: M)
val eqM = "eqM" ::(M ->: M ->: Type)
val m = "m" :: M
val n = "n" :: M
Out[2]:
M: Typ[Term] = M
mul: Func[Term, Func[Term, Term]] = mul
eqM: Func[Term, Func[Term, Typ[Term]]] = eqM
m: Term = m
n: Term = n
In [3]:
val ts = TermState(FiniteDistribution.unif(mul, eqM, m , n), FiniteDistribution.unif(mul.typ, eqM.typ, M))
Out[3]:
ts: TermState = TermState(
  FiniteDistribution(
    Vector(
      Weighted(mul, 0.25),
      Weighted(eqM, 0.25),
      Weighted(m, 0.25),
      Weighted(n, 0.25)
    )
  ),
  FiniteDistribution(
    Vector(
      Weighted((M → (M → M)), 0.3333333333333333),
      Weighted((M → (M → 𝒰 )), 0.3333333333333333),
      Weighted(M, 0.3333333333333333)
    )
  ),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)
In [4]:
val lp1 = LocalProver(ts, tg = TermGenParams()).noIsles
val lp2 = LocalProver(ts, tg = TermGenParams(typAsCodW = 0.1)).noIsles
Out[4]:
lp1: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(
      Vector(
        Weighted(mul, 0.25),
        Weighted(eqM, 0.25),
        Weighted(m, 0.25),
        Weighted(n, 0.25)
      )
    ),
    FiniteDistribution(
      Vector(
        Weighted((M → (M → M)), 0.3333333333333333),
        Weighted((M → (M → 𝒰 )), 0.3333333333333333),
        Weighted(M, 0.3333333333333333)
      )
    ),
    Vector(),
    FiniteDistribution(Vector()),
    FiniteDistribution(Vector()),
    Empty
  ),
  TermGenParams(
    0.1,
    0.1,
    0.1,
    0.0,
    0.0,
    0.05,
    0.05,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.3,
    0.7,
    0.5,
    0.0,
...
lp2: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(
      Vector(
        Weighted(mul, 0.25),
        Weighted(eqM, 0.25),
        Weighted(m, 0.25),
        Weighted(n, 0.25)
      )
    ),
    FiniteDistribution(
      Vector(
        Weighted((M → (M → M)), 0.3333333333333333),
        Weighted((M → (M → 𝒰 )), 0.3333333333333333),
        Weighted(M, 0.3333333333333333)
      )
    ),
    Vector(),
    FiniteDistribution(Vector()),
    FiniteDistribution(Vector()),
    Empty
  ),
  TermGenParams(
    0.1,
    0.1,
    0.1,
    0.0,
    0.0,
    0.05,
    0.05,
    0.0,
    0.0,
    0.0,
    0.1,
    0.0,
    0.3,
    0.7,
    0.5,
    0.0,
...
In [5]:
val fd1T = lp1.varDist(TermGeneratorNodes.termsWithTyp(M))
Out[5]:
fd1T: monix.eval.Task[FiniteDistribution[Term]] = Map(
  FlatMap(
    Async(<function2>, true, true, true),
    monix.eval.Task$$Lambda$2561/832550944@1882ecc8
  ),
  scala.Function1$$Lambda$335/412925308@16068149,
  1
)
In [6]:
val fd2T = lp2.varDist(TermGeneratorNodes.termsWithTyp(M))
Out[6]:
fd2T: monix.eval.Task[FiniteDistribution[Term]] = Map(
  FlatMap(
    Async(<function2>, true, true, true),
    monix.eval.Task$$Lambda$2561/832550944@1a46478f
  ),
  scala.Function1$$Lambda$335/412925308@5eaa213b,
  1
)
In [7]:
import monix.execution.Scheduler.Implicits.global
fd1T.map(_.entropyVec).runToFuture
In [8]:
fd2T.map(_.entropyVec).runToFuture
In [9]:
Unify.targetCodomain(mul, M)
Out[9]:
res8: Option[Term] = Some(($dxlc : M) ↦ ($dxlb : M) ↦ mul($dxlb)($dxlc))

Conclusion

We see that with strong targeting we get a much sharper distribution. We should do this for better instantiation in the Achal problem.