Simple learning after sums of term probabilites

This is to check behaviour after making the sums of term probabilities constant by taking gradient perpendicular to these.

In [1]:
import $cp.bin.`provingground-core-jvm-dcc4d5d.fat.jar`
Out[1]:
import $cp.$                                           
In [2]:
import provingground._ , interface._, HoTT._, learning._ 
repl.pprinter() = {
  val p = repl.pprinter()
  p.copy(
    additionalHandlers = p.additionalHandlers.orElse {
      translation.FansiShow.fansiHandler
    }
  )
}
Out[2]:
import provingground._ , interface._, HoTT._, learning._ 
In [3]:
val A = "A" :: Type
val B = "B" :: Type
val a = "a" :: A
val f = "f"  :: (A ->: B)
val lp0 = LocalProver(TermState(FiniteDistribution.unif(a, f(a), f), FiniteDistribution(A -> 0.01, B -> 0.99)), hW = 0, klW = 10).noIsles
Out[3]:
A: Typ[Term] = A
B: Typ[Term] = B
a: Term = a
f: Func[Term, Term] = f
lp0: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(
      Vector(
        Weighted(a, 0.3333333333333333),
        Weighted(f(a), 0.3333333333333333),
        Weighted(f, 0.3333333333333333)
      )
    ),
    FiniteDistribution(Vector(Weighted(A, 0.01), Weighted(B, 0.99))),
    Vector(),
    FiniteDistribution(Vector()),
    FiniteDistribution(Vector()),
    Empty
  ),
  TermGenParams(
    0.1,
    0.1,
    0.1,
    0.0,
    0.0,
    0.05,
    0.05,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.3,
    0.7,
    0.5,
    0.0,
    0.0,
    0.0
  ),
  1.0E-4,
  12 minutes,
  1.01,
  1.0,
...
In [4]:
import monix.execution.Scheduler.Implicits.global
Out[4]:
import monix.execution.Scheduler.Implicits.global
In [5]:
val gT0 = lp0.expressionEval.flatMap(e => e.generatorIterant(0, 1, 0.000001, e.finalDist).take(100).toListL.map(_.zipWithIndex.filter(_._2 % 15 == 0)) )
Out[5]:
gT0: monix.eval.Task[List[(FiniteDistribution[Term], Int)]] = FlatMap(
  Async(<function2>, false, true, true),
  ammonite.$sess.cmd4$Helper$$Lambda$2723/1016219062@2ca9ec62
)
In [6]:
val gF0 = gT0.runToFuture
gF0: monix.execution.CancelableFuture[List[(FiniteDistribution[Term], Int)]] = Success(
  List(
    (
      FiniteDistribution(
        Vector(
          Weighted(f(a), 0.34762395733133417),
          Weighted(f, 0.3194693144987902),
          Weighted(a, 0.3329067281698756)
        )
      ),
      0
    ),
    (
      FiniteDistribution(
        Vector(
          Weighted(f(a), 0.3476239573313341),
          Weighted(f, 0.3194692909272),
          Weighted(a, 0.3329067517414659)
        )
      ),
      15
    ),
    (
      FiniteDistribution(
        Vector(
          Weighted(f(a), 0.3476239573313341),
          Weighted(f, 0.3194692909272),
          Weighted(a, 0.3329067517414659)
        )
      ),
      30
    ),
    (
      FiniteDistribution(
        Vector(
          Weighted(f(a), 0.3476239573313341),
          Weighted(f, 0.3194692909272),
          Weighted(a, 0.3329067517414659)
        )
...
In [7]:
val lp1 = LocalProver(TermState(FiniteDistribution.unif(a, f(a)), FiniteDistribution(A -> 0.8, B -> 0.2))).noIsles
Out[7]:
lp1: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(Vector(Weighted(a, 0.5), Weighted(f(a), 0.5))),
    FiniteDistribution(Vector(Weighted(A, 0.8), Weighted(B, 0.2))),
    Vector(),
    FiniteDistribution(Vector()),
    FiniteDistribution(Vector()),
    Empty
  ),
  TermGenParams(
    0.1,
    0.1,
    0.1,
    0.0,
    0.0,
    0.05,
    0.05,
    0.0,
    0.0,
    0.0,
    0.0,
    0.0,
    0.3,
    0.7,
    0.5,
    0.0,
    0.0,
    0.0
  ),
  1.0E-4,
  12 minutes,
  1.01,
  1.0,
  10000,
  10,
  1.0,
  1.0
)
In [8]:
val tF = lp1.tunedGenerators.runToFuture
tF: monix.execution.CancelableFuture[FiniteDistribution[Term]] = Success(
  FiniteDistribution(
    Vector(
      Weighted(f(a), 0.018886148869072937),
      Weighted(a, 0.9811138511309271)
    )
  )
)
In [9]:
val tF0 = lp0.tunedGenerators.runToFuture
tF0: monix.execution.CancelableFuture[FiniteDistribution[Term]] = Success(
  FiniteDistribution(
    Vector(
      Weighted(f(a), 0.4737319922622689),
      Weighted(f, 0.2051787292005911),
      Weighted(a, 0.32108927853714)
    )
  )
)

Conclusions

  • The behaviour is now as expected, though the tuning is slow.
  • While initial values are fixed for a while in the earlier experiments, other values could be changing with the change propagating.