In [1]:
import $cp.bin.`provingground-core-jvm.fat.jar`
Out[1]:
import $cp.$                                   

Fat jars work: Including a fat jar rather than local publishing has worked well.

In [2]:
import provingground._ , interface._, HoTT._, learning._
Out[2]:
import provingground._ , interface._, HoTT._, learning._ 
In [3]:
repl.pprinter() = {
  val p = repl.pprinter()
  p.copy(
    additionalHandlers = p.additionalHandlers.orElse {
      translation.FansiShow.fansiHandler
    }
  )
}
In [4]:
val terms = FiniteDistribution.unif[Term](Type, Unit, Zero, Star)
Out[4]:
terms: FiniteDistribution[Term] = FiniteDistribution(
  Vector(
    Weighted(𝒰 , 0.25),
    Weighted(Unit, 0.25),
    Weighted(Zero, 0.25),
    Weighted(Star, 0.25)
  )
)
In [5]:
val typs = FiniteDistribution.unif[Typ[Term]](Type, Unit, Zero)
Out[5]:
typs: FiniteDistribution[Typ[Term]] = FiniteDistribution(
  Vector(
    Weighted(𝒰 , 0.3333333333333333),
    Weighted(Unit, 0.3333333333333333),
    Weighted(Zero, 0.3333333333333333)
  )
)
In [6]:
val ts = TermState(terms, typs)
Out[6]:
ts: TermState = TermState(
  FiniteDistribution(
    Vector(
      Weighted(𝒰 , 0.25),
      Weighted(Unit, 0.25),
      Weighted(Zero, 0.25),
      Weighted(Star, 0.25)
    )
  ),
  FiniteDistribution(
    Vector(
      Weighted(𝒰 , 0.3333333333333333),
      Weighted(Unit, 0.3333333333333333),
      Weighted(Zero, 0.3333333333333333)
    )
  ),
  Vector(),
  FiniteDistribution(Vector()),
  FiniteDistribution(Vector()),
  Empty
)
In [7]:
val lp0 = LocalProver(ts, cutoff = 0.01)
Out[7]:
lp0: LocalProver = LocalProver(
  TermState(
    FiniteDistribution(
      Vector(
        Weighted(𝒰 , 0.25),
        Weighted(Unit, 0.25),
        Weighted(Zero, 0.25),
        Weighted(Star, 0.25)
      )
    ),
    FiniteDistribution(
      Vector(
        Weighted(𝒰 , 0.3333333333333333),
        Weighted(Unit, 0.3333333333333333),
        Weighted(Zero, 0.3333333333333333)
      )
    ),
    Vector(),
    FiniteDistribution(Vector()),
    FiniteDistribution(Vector()),
    Empty
  ),
  TermGenParams(
    0.1,
    0.1,
    0.1,
    0.1,
    0.1,
    0.05,
    0.05,
    0.05,
    0.0,
    0.0,
    0.0,
    0.0,
    0.3,
    0.7,
    0.5,
    0.0,
...
In [8]:
val nsT = lp0.nextState
Out[8]:
nsT: monix.eval.Task[TermState] = FlatMap(
  Map(
    Async(<function2>, false, true, true),
    provingground.learning.LocalProver$$Lambda$2573/891349477@334f7016,
    0
  ),
  provingground.learning.LocalProver$$Lambda$2574/712541575@3c2cac8a
)
In [9]:
import monix.execution.Scheduler.Implicits.global
Out[9]:
import monix.execution.Scheduler.Implicits.global
In [10]:
val nsF = nsT.runToFuture
In [11]:
nsF.value
Out[11]:
res10: Option[scala.util.Try[TermState]] = None
In [12]:
val lp = LocalProver(ts).sharpen(10)
val thmsT = lp.theoremsByStatement 
val thmsF = thmsT.runToFuture
In [13]:
import scala.concurrent._, scala.util._
val fu = Future(())(ExecutionContext.global)
val fut = fu.flatMap(_ => thmsF)
In [14]:
thmsF.map(_.entropyVec)
In [15]:
Future{Thread.sleep(1000); 5}(ExecutionContext.global)
In [16]:
Future{Thread.sleep(1000); Type ->: Type}(ExecutionContext.global)