-
-
Save andrejbauer/723aa10d90dc63a7ce0d07f0685c7a8b to your computer and use it in GitHub Desktop.
(* In Andromeda everything the user writes is "meta-level programming. *) | |
(* ML-level natural numbers *) | |
mltype rec mlnat = | |
| zero | |
| succ of mlnat | |
end | |
(* We can use the meta-level numbers to define a program which computes n-fold iterations | |
of f. Here we give an explicit type of iterate because the ML-type inference cannot tell | |
whether f is an ML-function or an object-level term. *) | |
let rec iterate f x n : judgment → judgment → mlnat → judgment = | |
match n with | |
| zero ⇒ x | |
| succ ?n ⇒ f (iterate f x n) | |
end | |
constant A : Type | |
constant a : A | |
constant g : A → A | |
do iterate g a (succ (succ (succ zero))) | |
(* output: ⊢ g (g (g a)) : A *) | |
(* Defining iteration of functions at object-level is a different business. We first | |
axiomatize the natural numbers. *) | |
constant nat : Type | |
constant O : nat | |
constant S : nat → nat | |
constant nat_rect : Π (P : nat → Type) (s0 : P O) (s : Π (n : nat), P n → P (S n)) (m : nat), P m | |
constant nat_iota_O : | |
Π (P : nat → Type) (s0 : P O) (s : Π (n : nat), P n → P (S n)), | |
nat_rect P s0 s O ≡ s0 | |
constant nat_iota_S : | |
Π (P : nat → Type) (s0 : P O) (s : Π (n : nat), P n → P (S n)) (m : nat), | |
nat_rect P s0 s (S m) ≡ s m (nat_rect P s0 s m) | |
(* We use the standard library to get correct normalization of nat_rect *) | |
now reducing = add_reducing nat_rect [lazy, lazy, lazy, eager] | |
now betas = add_betas [nat_iota_O, nat_iota_S] | |
(* Iterataion at object-level is defined using nat_rect, as it is | |
just the non-dependent version of induction. *) | |
constant iter : ∏ (X : Type), (X → X) → X → nat → X | |
constant iter_def : | |
∏ (X : Type) (f : X → X) (x : X) (n : nat), | |
iter X f x n ≡ nat_rect (λ _, X) x (λ _ y, f y) n | |
(* We tell the standard library about the definition of iter *) | |
now betas = add_beta iter_def | |
(* An object-level iteration of g can be done as follows. *) | |
do iter A g a (S (S (S O))) | |
(* output: ⊢ iter A g a (S (S (S O))) : A *) | |
(* We can use the whnf function from the standard library to normalize a term. By default | |
it performs weak head-normalization. But here we *locally* tell it to aggresively | |
normalize arguments of g. *) | |
do | |
now reducing = add_reducing g [eager] in | |
whnf (iter A g a (S (S (S O)))) | |
(* output: ⊢ refl (g (g (g a))) : iter A g a (S (S (S O))) ≡ g (g (g a)) *) | |
(* We can also pattern-match on terms at ML-level. *) | |
do | |
match g (g (g a)) with | |
| ⊢ (?h (?h ?x)) ⇒ h x | |
end | |
(* output ⊢ g (g a) : A *) | |
(* We can match anywhere, also under a λ-expression. Because match is ML-level it computes | |
immediately, i.e, it actually matches against the bound variable x. *) | |
do | |
(λ x : A, | |
match x with | |
| ⊢ ?f ?y ⇒ y | |
| ⊢ ?z ⇒ g z | |
end) (g a) | |
(* output ⊢ (λ (x : A), g x) (g a) : A *) | |
(* Now suppose we want to construct λ h, h (h (h a)) using iteration. Since λ-expressions | |
are still just ML-level programs which compute judgments, we can do it as follows using | |
the ML-level iteration. *) | |
do λ h : A → A, iterate h a (succ (succ (succ zero))) | |
(* output: ⊢ λ (h : A → A), h (h (h a)) : (A → A) → A *) | |
(* If we use object-level iteration we get a different term which | |
is still equal to the one we wanted. *) | |
do λ h : A → A, iter A h a (S (S (S O))) | |
(* output: ⊢ λ (h : A → A), iter A h a (S (S (S O))) : (A → A) → A *) | |
(* The standard library is smart enough to see that the two terms | |
are equal. (It has funext built in.) *) | |
do | |
let u = λ (h : A → A), iterate h a (succ (succ (succ zero))) | |
and v = λ (h : A → A), iter A h a (S (S (S O))) in | |
refl u : u ≡ v | |
(* output: | |
⊢ refl (λ (h : A → A), h (h (h a))) | |
: (λ (h : A → A), h (h (h a))) ≡ | |
(λ (h : A → A), iter A h a (S (S (S O)))) | |
*) | |
(* We could use object-level iteration and normalization to compute | |
the desired term as follows. *) | |
do | |
λ h : A → A, | |
now reducing = add_reducing h [eager] in | |
match whnf (iter A h a (S (S (S O)))) with | |
| ⊢ _ : _ ≡ ?e ⇒ e | |
end | |
(* output: ⊢ λ (h : A → A), h (h (h a)) : (A → A) → A *) |
You can tell which tuples of constants are harmless by showing that their types are contractible, as I mentioned earlier. That's what it takes for them to be eliminated. And this is a semantic condition, not a built-in syntactic approximation of it. In general, we abhor syntactic conditions because they are necessarily limiting.
You should in general use constant
for postulates and assume
for thigs you want tracked. (I realize now that the above example does not have any assume
s.)
Whether we can have different kinds of judgments is a very interesting question. It would be great if we could, but then I think we'd move much more in the direction of LF. I am not saying that's bad, just that it requires a bit of thought.
Do you see the gist comments in your GitHub notifications? I don't. It might not be possible to follow them, which is a bummer. In that case, it would be better to use Andromeda issues for discussions like this one.
No, I don't see anything in my github notifications.
It's true that syntactic conditions are more limited than semantic ones, but the point of a syntactic condition is that it's something you can check. The ultimate semantic condition is "P is true", but of course you can't check that directly; you have to give a proof, which is a syntactic thing whose correctness can be verified. I would be really unhappy if I had to prove every time I make a definition that it was well-defined; it's precisely the limitedness of syntactic conditions that allows us to prove metatheorems about them once and for all, like the admissibility of definitional extensions.
Is there an actual difference in meaning between constant
and assume
?
Finally, I think I still don't understand how variable binding works. Can you go through the funky handler example Gaetan posted on your blog
try lambda x : A, raise (Error x)
with Error e => e
end
and explain exactly what happens at every step and what the types of everything is?
Thanks! This business of joining contexts will take some getting used to, but I think I sort of see how it works.
Regarding definitions, remarks like "whether they are consistent is your problem" make me a bit queasy. I realize the point here is to allow the user to implement an arbitrary theory which might or might not be consistent, but it seems to me that we need some way to clearly separate the activity of "specifying a theory", when we allow ourselves to make postulates and don't expect the system to check their consistency, from the activity of "working inside that theory", when we do want the system to verify that we aren't introducing any new postulates by accident. Coq, for instance, separates "Axiom" from "Definition" for this reason, and I can "Print Assumptions" to see which axioms a theorem depends on. But if every "Definition" is actually just a pair of "Axioms", how can I separate out the ones that are "real" axioms? The point of having only a single definition for each term is that we can prove a meta-theorem ("delta-reduction"?) that any such definition can be eliminated; for more questionable kinds of "definitions" there is no such meta-theorem.
An unrelated question: I gather that every "judgment" is the same sort of judgment, namely a typing judgment for some term in some context. Is it possible for the user to extend Andromeda with other kinds of judgments? For instance, various people and I have been thinking about type theories with a judgment for "cofibrations"; could that be implemented in Andromeda?
Also: is it possible to "subscribe" to a gist so that I get email notifications when someone posts to it, the way I do when I comment on a github issue? It's annoying to have to keep coming back here and reloading the page.