From 3f7c22fafd0ed7e22504146f07be0cd0ac145651 Mon Sep 17 00:00:00 2001 From: Sergei Winitzki Date: Mon, 15 Jul 2024 15:21:53 +0200 Subject: [PATCH] Continue reformatting the book (#101) * wip * wip * wip * fix closing parentheses (closes #102) * wip * begin editing chapter 6 * wip editing chapter 6 * wip * wip * wip * wip corrections in chapter 9 * last corrections from the printed book * wip * corrections in chapter 9 * wip * better layout in chapter 6 * wip * fix script for no-number equations * corrections in chapter 10 * wip chapter 11 * finished corrections in chapter 11 * corrections in chapter 12 * wip * update pdf * wip * wip --- sofp-src/cover/sofp-back-cover-no-bg.tex.src | 2 +- sofp-src/lyx/sofp-applicative.lyx | 859 +++++---- sofp-src/lyx/sofp-curry-howard.lyx | 11 +- sofp-src/lyx/sofp-disjunctions.lyx | 2 +- sofp-src/lyx/sofp-essay2.lyx | 113 +- sofp-src/lyx/sofp-essay3.lyx | 4 +- sofp-src/lyx/sofp-filterable.lyx | 1397 ++++++++------- sofp-src/lyx/sofp-functors.lyx | 1637 +++++++++++------- sofp-src/lyx/sofp-induction.lyx | 50 +- sofp-src/lyx/sofp-monads.lyx | 1345 ++++++++------ sofp-src/lyx/sofp-preface.lyx | 160 +- sofp-src/lyx/sofp-reasoning.lyx | 478 +++-- sofp-src/lyx/sofp-summary.lyx | 162 +- sofp-src/lyx/sofp-transformers.lyx | 358 ++-- sofp-src/lyx/sofp-traversable.lyx | 499 +++--- sofp-src/lyx/sofp-typeclasses.lyx | 1101 +++++++----- sofp-src/lyx/sofp.lyx | 3 +- sofp-src/scripts/make_pdflatex_sources.sh | 2 +- sofp-src/tex/chapter3-picture.pdf | Bin 6720 -> 6720 bytes sofp-src/tex/sofp-applicative.tex | 688 ++++---- sofp-src/tex/sofp-back-cover-no-bg.tex | 6 +- sofp-src/tex/sofp-curry-howard.tex | 6 +- sofp-src/tex/sofp-disjunctions.tex | 4 +- sofp-src/tex/sofp-essay2.tex | 144 +- sofp-src/tex/sofp-essay3.tex | 4 +- sofp-src/tex/sofp-filterable.tex | 1007 +++++------ sofp-src/tex/sofp-free-type.tex | 41 +- sofp-src/tex/sofp-functors.tex | 982 +++++------ sofp-src/tex/sofp-induction.tex | 28 +- sofp-src/tex/sofp-monads.tex | 929 +++++----- sofp-src/tex/sofp-preface.tex | 53 +- sofp-src/tex/sofp-reasoning.tex | 274 ++- sofp-src/tex/sofp-summary.tex | 45 +- sofp-src/tex/sofp-transformers.tex | 323 ++-- sofp-src/tex/sofp-traversable.tex | 423 +++-- sofp-src/tex/sofp-typeclasses.tex | 926 +++++----- sofp-src/tex/sofp.tex | 12 +- talk_slides/fp_system_f_omega_dhall.lyx | 1261 ++++++++++++++ 38 files changed, 8673 insertions(+), 6666 deletions(-) create mode 100644 talk_slides/fp_system_f_omega_dhall.lyx diff --git a/sofp-src/cover/sofp-back-cover-no-bg.tex.src b/sofp-src/cover/sofp-back-cover-no-bg.tex.src index fd122722d..939a73161 100644 --- a/sofp-src/cover/sofp-back-cover-no-bg.tex.src +++ b/sofp-src/cover/sofp-back-cover-no-bg.tex.src @@ -22,7 +22,7 @@ functions and types; the Curry-Howard correspondence; laws, structural analysis, and code for functors, monads, and other typeclasses based on exponential-polynomial data types; techniques of symbolic derivation and proof; free typeclass constructions; and -parametricity theorems. +practical applications of parametricity. Long and difficult, yet boring explanations are logically developed in excruciating detail through NUMBEROFCODESNIPPETS diff --git a/sofp-src/lyx/sofp-applicative.lyx b/sofp-src/lyx/sofp-applicative.lyx index 77eac5b4a..e8c56947a 100644 --- a/sofp-src/lyx/sofp-applicative.lyx +++ b/sofp-src/lyx/sofp-applicative.lyx @@ -508,34 +508,7 @@ zip \end_inset -Using this type signature, the -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout -\noindent - -zip -\end_layout - -\end_inset - - operation may be implemented for many type constructors, not only for -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout -\noindent - -List -\end_layout - -\end_inset - --like collections. - In order to ensure that the implementation of +In order to ensure that the implementation of \begin_inset listings inline true status open @@ -562,20 +535,7 @@ zip \end_inset operation later in this chapter. - For now, let us look at some examples of implementing a -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout -\noindent - -zip -\end_layout - -\end_inset - - operation. + For now, let us look at some examples. \end_layout \begin_layout Subsubsection @@ -923,7 +883,7 @@ Branch \end_inset -, the value +, we should replicate the value \begin_inset listings inline true status open @@ -935,7 +895,7 @@ x \end_inset - must be replicated to match the subtree of + as needed to match the subtree of \begin_inset listings inline true status open @@ -1030,11 +990,11 @@ times f$ ] ] \size default - with replicated + with \begin_inset Formula $b$ \end_inset -. + replicated 3 times. \end_layout \begin_layout Subparagraph @@ -1648,7 +1608,7 @@ flatMap(x => expr) \end_inset - cannot start evaluating + cannot evaluate \begin_inset listings inline true status open @@ -1855,13 +1815,11 @@ def map2[A, B, C](ra: Result[A], rb: Result[B])(f: (A, B) => C): Result[C] \begin_inset Quotes eld \end_inset - -\backslash -n +; \begin_inset Quotes erd \end_inset - + e2) // Messages are separated by a newline. + + e2) // Messages are separated by a semicolon. \end_layout \begin_layout Plain Layout @@ -1937,12 +1895,7 @@ scala> for { \begin_layout Plain Layout -res1: Either[String, Int] = Left(error: 1 / 0 -\end_layout - -\begin_layout Plain Layout - -error: 2 / 0) +res1: Either[String, Int] = Left(error: 1 / 0; error: 2 / 0) \end_layout \end_inset @@ -2186,7 +2139,7 @@ zip \end_inset -, we find only one difference: the code of +, we find that the code of \begin_inset listings inline true status open @@ -3495,8 +3448,7 @@ status open \begin_layout Plain Layout -final case class MyData(userId: Long, userName: String, userEmails: List[String] -) +final case class MyData(userId: Long, name: String, emails: List[String]) \end_layout \end_inset @@ -4121,7 +4073,7 @@ zip3 \end_inset -We can now implement a general function ( +We can write a general function ( \begin_inset Formula $\text{zip}_{n}$ \end_inset @@ -4219,9 +4171,9 @@ mapN \begin_inset Formula $A$ \end_inset -), we would need to use techniques of dependent-type programming, which - is beyond the scope of this book. - We will now describe a simpler solution that implements +), we would need to use techniques of type-level programming, which is beyond + the scope of this book. + We will now describe a simpler solution implementing \begin_inset listings inline true status open @@ -4562,7 +4514,7 @@ def fmap2[A, B, C](f: A => B => C): L[A] => L[B] => L[C] = { la: L[A] => \end_inset Written in the point-free style using the code notation, this definition - looks like this: + becomes: \begin_inset Formula \[ \text{fmap}_{2}(f)\triangleq f^{\uparrow L}\bef\text{ap}_{L}\quad. @@ -4952,7 +4904,7 @@ map \end_inset because -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset does not have a @@ -5184,8 +5136,12 @@ type Reader[A] = E => A // The fixed type E must be already defined. \begin_layout Plain Layout -def zip[A, B](ra: Reader[A], rb: Reader[B]): Reader[(A, B)] = { e => (ra(e), - rb(e)) } +def zip[A, B](ra: Reader[A], rb: Reader[B]): Reader[(A, B)] = { +\end_layout + +\begin_layout Plain Layout + + e => (ra(e), rb(e)) } \end_layout \end_inset @@ -5210,7 +5166,12 @@ status open \begin_layout Plain Layout def map2[A, B, C](ra: Reader[A], rb: Reader[B])(f: A => B => C): Reader[C] - = { e => f(ra(e))(rb(e)) } + = { +\end_layout + +\begin_layout Plain Layout + + e => f(ra(e))(rb(e)) } \end_layout \end_inset @@ -5305,22 +5266,27 @@ status open \begin_layout Plain Layout def map2[A, B, C](ra: Reader[A], rb: Reader[B])(f: A => B => C): Reader[C] - = for { + = \end_layout \begin_layout Plain Layout - x <- ra + for { \end_layout \begin_layout Plain Layout - y <- rb + x <- ra \end_layout \begin_layout Plain Layout -} yield f(x)(y) + y <- rb +\end_layout + +\begin_layout Plain Layout + + } yield f(x)(y) \end_layout \end_inset @@ -5425,7 +5391,7 @@ Reader \end_inset - effects are always independent, and a + effects are always independent, and its \begin_inset listings inline true status open @@ -6357,8 +6323,7 @@ status open \begin_layout Plain Layout -import io.chymyst.ch._ // Import some symbols from the `curryhoward` - library. +import io.chymyst.ch._ // Import from the `curryhoward` library. \end_layout \begin_layout Plain Layout @@ -6792,19 +6757,7 @@ map \end_inset - and -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -map2 -\end_layout - -\end_inset - - methods exist because + method exists because \begin_inset listings inline true status open @@ -6874,7 +6827,7 @@ other)(_ / _) \begin_layout Plain Layout -} // May need to define more operations here. +} // We may define more operations here. \end_layout \end_inset @@ -9001,7 +8954,7 @@ implicit class ParserMoreZipOps[A](parserA: P[A]) { \begin_layout Plain Layout - } // + } \end_layout \begin_layout Plain Layout @@ -9276,7 +9229,7 @@ res9: Int = 11 \end_inset -The recursion stops only because the operation +The recursion stops because the operation \begin_inset Quotes eld \end_inset @@ -10134,7 +10087,7 @@ xmap2 \end_inset if appropriate) for the following type constructors -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset : @@ -10185,11 +10138,11 @@ xmap2 \begin_inset Formula $F^{A}\triangleq A\rightarrow A\times Z$ \end_inset - where + when \begin_inset Formula $Z$ \end_inset - is a + has a \begin_inset listings inline true status open @@ -10201,7 +10154,7 @@ Monoid \end_inset -. + instance. \end_layout \begin_layout Subsubsection @@ -10238,14 +10191,28 @@ zip \end_inset - method for a ternary tree + method for a +\series bold +ternary tree +\series default + +\begin_inset Index idx +status open + +\begin_layout Plain Layout +ternary tree +\end_layout + +\end_inset + + \begin_inset listings inline true status open \begin_layout Plain Layout -T3[A] +T3 \end_layout \end_inset @@ -10523,11 +10490,11 @@ regexp extractor \end_inset as a type constructor -\begin_inset Formula $R^{A}$ +\begin_inset Formula $R$ \end_inset - describing extraction of various data from strings; the extracted data - has type + describing extraction of various data from a string, given a regular expression. + The extracted data has type \begin_inset listings inline true status open @@ -10565,7 +10532,7 @@ map2 \end_inset for -\begin_inset Formula $R^{A}$ +\begin_inset Formula $R$ \end_inset . @@ -11096,7 +11063,7 @@ zip \begin_inset Formula $\text{map}_{2}:L^{A}\times L^{B}\rightarrow\left(A\times B\rightarrow C\right)\rightarrow L^{C}$ \end_inset - satisfying the naturality law: + satisfying the following naturality law: \begin_inset Formula \[ \text{map}_{2}\,(p^{:L^{A}}\times q^{:L^{B}})(f^{:A\times B\rightarrow C})\triangleright(g^{:C\rightarrow D})^{\uparrow L}=\text{map}_{2}\,(p\times q)(f\bef g)\quad, @@ -11160,16 +11127,16 @@ zip \end_inset -Then we need to show that +We need to show that \begin_inset Formula $\text{map}_{2}^{\prime}=\text{map}_{2}$ \end_inset . - We apply + Apply \begin_inset Formula $\text{map}_{2}^{\prime}$ \end_inset - to arbitrary arguments and write: + to arbitrary arguments: \begin_inset Formula \begin{align*} & \text{map}_{2}^{\prime}\,(p\times q)(f)=(p\times q)\triangleright\text{zip}\triangleright f^{\uparrow L}\\ @@ -11247,12 +11214,12 @@ map2 \end_inset -Then we need to show that +We need to show that \begin_inset Formula $\text{zip}^{\prime}=\text{zip}$ \end_inset . - We apply + Apply \begin_inset Formula $\text{zip}^{\prime}$ \end_inset @@ -11264,7 +11231,7 @@ Then we need to show that \begin_inset Formula $q$ \end_inset - and write: +: \begin_inset Formula \begin{align*} & \text{zip}^{\prime}\,(p^{:L^{A}}\times q^{:L^{B}})=\text{map}_{2}\,(p\times q)(\text{id}^{:A\times B\rightarrow A\times B})=(p\times q)\triangleright\text{zip}\triangleright\text{id}^{\uparrow L}\\ @@ -11358,7 +11325,7 @@ noprefix "false" \end_inset : a function with one type parameter (a natural transformation) is equivalent - to a function with two type parameters if a naturality law holds with respect + to a function with two type parameters obeying a naturality law with respect to one of those type parameters. Here \begin_inset listings @@ -11480,7 +11447,7 @@ ap \begin_inset Formula $L$ \end_inset - (i.e., functions of type + (i.e., values of type \begin_inset Formula $L^{A\rightarrow B}$ \end_inset @@ -11551,7 +11518,7 @@ map2 \end_inset . - Let us write the relationship between + The relationship between \begin_inset listings inline true status open @@ -11575,7 +11542,7 @@ fmap2 \end_inset - in the code notation: + is: \begin_inset Formula \[ \xymatrix{\xyScaleY{0.4pc}\xyScaleX{2pc} & F^{B\rightarrow C}\ar[rd]\sp(0.5){\text{ap}^{B,C}}\\ @@ -11674,31 +11641,7 @@ For any functor \begin_inset Formula $L$ \end_inset - for which -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -fmap2 -\end_layout - -\end_inset - - or -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -ap -\end_layout - -\end_inset - - can be implemented, the type of functions +, the type of functions \begin_inset listings inline true status open @@ -11790,7 +11733,7 @@ ap \begin_inset Formula $\text{fmap}_{2}:\left(A\rightarrow B\rightarrow C\right)\rightarrow L^{A}\rightarrow L^{B}\rightarrow L^{C}$ \end_inset - satisfying the naturality law, + satisfying the following naturality law, \begin_inset Formula \[ \text{fmap}_{2}\,(g^{:X\rightarrow A}\bef f^{:A\rightarrow B\rightarrow C})(p^{:L^{X}})=\text{fmap}_{2}\,(f)(p\triangleright g^{\uparrow L})\quad, @@ -11854,16 +11797,16 @@ ap \end_inset -Then we need to show that +We need to show that \begin_inset Formula $\text{fmap}_{2}^{\prime}=\text{fmap}_{2}$ \end_inset . - We apply + Apply \begin_inset Formula $\text{fmap}_{2}^{\prime}$ \end_inset - to arbitrary arguments and write: + to arbitrary arguments: \begin_inset Formula \begin{align*} & \text{fmap}_{2}^{\prime}\,(f^{:A\rightarrow B\rightarrow C})(p^{:L^{A}})=p\triangleright f^{\uparrow L}\triangleright\text{ap}\\ @@ -12322,7 +12265,7 @@ eval \end_inset -To prove this property, apply both sides to arbitrary +To prove this property, apply both sides to a pair of arbitrary \begin_inset Formula $a^{:A}$ \end_inset @@ -12402,16 +12345,16 @@ ap \end_inset -Then we need to show that +We need to show that \begin_inset Formula $\text{zip}^{\prime}=\text{zip}$ \end_inset . - We apply + Apply \begin_inset Formula $\text{zip}^{\prime}$ \end_inset - to arbitrary arguments and write: + to arbitrary arguments: \begin_inset Formula \begin{align*} \text{expect to equal }\text{zip}\,(p\times q):\quad & \text{zip}^{\prime}\,(p^{:L^{A}}\times q^{:L^{B}})=\text{ap}\,(p\triangleright\text{pair}^{\uparrow L})(q)\\ @@ -12554,7 +12497,7 @@ zip \end_inset -Then we need to show that +We need to show that \begin_inset Formula $\text{ap}^{\prime}=\text{ap}$ \end_inset @@ -12896,7 +12839,7 @@ abstract class Zippable[L[_]: Functor] { \end_inset -Instead of using +Instead of \begin_inset listings inline true status open @@ -12936,19 +12879,19 @@ ap \end_layout \begin_layout Standard -In addition to these methods, it is helpful to require a +In addition to \begin_inset listings inline true status open \begin_layout Plain Layout -pure +zip \end_layout \end_inset - method for the functor +, it is helpful to require that the functor \begin_inset listings inline true status open @@ -12960,7 +12903,19 @@ L \end_inset -. + should have a +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +pure +\end_layout + +\end_inset + + method. The resulting typeclass \begin_inset Index idx status open @@ -13045,7 +13000,7 @@ Applicative \end_inset - typeclass contains just these two methods: + typeclass contains just those two methods: \begin_inset listings inline false status open @@ -13072,7 +13027,7 @@ trait Applicative[L[_]] { \end_inset -Other methods ( +The functions \begin_inset listings inline true status open @@ -13096,7 +13051,7 @@ ap \end_inset -, +, and \begin_inset listings inline true status open @@ -13108,8 +13063,7 @@ pure \end_inset -) can be defined separately (as extension methods) using the functor instance - for + can be defined separately (as extension methods) using \begin_inset listings inline true status open @@ -13121,7 +13075,7 @@ L \end_inset -. +'s functor instance. However, this definition of the \begin_inset listings inline true @@ -13134,32 +13088,8 @@ Applicative \end_inset - typeclass can be used also with type constructors -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -L[A] -\end_layout - -\end_inset - - that are not covariant in -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -A -\end_layout - -\end_inset - -. - So, we will use this definition later in this chapter. + typeclass can be used also with type constructors that are not covariant. + So, we will prefer this typeclass definition in this book. \end_layout \begin_layout Subsection @@ -14136,30 +14066,6 @@ status open zip \end_layout -\end_inset - - that will follow once we express -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -map2 -\end_layout - -\end_inset - - via -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -zip -\end_layout - \end_inset . @@ -14388,7 +14294,7 @@ The functions \end_inset . - With these equivalences in mind, we rewrite the associativity law in a + With those equivalences implied, we rewrite the associativity law in a simpler form: \begin_inset Index idx status open @@ -14456,7 +14362,7 @@ noprefix "false" \begin_inset Formula $\cong$ \end_inset - denotes equality up to the type equivalence. + denotes equality up to a type equivalence. To obtain a real equation, one would need to apply \begin_inset Formula $\varepsilon_{1,23}^{\uparrow L}$ \end_inset @@ -14502,7 +14408,7 @@ noprefix "false" We also save time because we do not write out a number of tuple-swapping functions. To avoid errors, derivations using this technique must first check that - all types match up to tuple-swapping isomorphisms. + all types match (up to tuple-swapping isomorphisms). \end_layout \begin_layout Standard @@ -14815,7 +14721,7 @@ noprefix "false" \end_inset ). - So, we have justified the simplification of the left identity law to Eq. + So, we have justified the equivalence of the left identity law and Eq. \begin_inset space ~ \end_inset @@ -14841,7 +14747,7 @@ Since \begin_inset Formula $\text{ilu}^{\uparrow L}$ \end_inset - implements the type equivalence + implements a type equivalence \begin_inset Formula $L^{B}\cong L^{\bbnum 1\times B}$ \end_inset @@ -14984,7 +14890,7 @@ map2 \end_inset - can be simplified when formulated via + are simpler when formulated via \begin_inset listings inline true status open @@ -15394,7 +15300,7 @@ noprefix "false" \begin_layout Standard An applicative functor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset is @@ -15729,7 +15635,7 @@ noprefix "false" . We cannot expect to be able to parse different parts of a file in parallel, because correct parsing often depends on the success or failure of parsing - of previous portions of the data. + of previous text. \end_layout \begin_layout Standard @@ -15771,7 +15677,7 @@ status open \begin_layout Plain Layout -RDD[_] +RDD \end_layout \end_inset @@ -15789,7 +15695,7 @@ The \family typewriter Spark \family default - library does not support values of type + library does not support working with values of type \begin_inset listings inline true status open @@ -15802,7 +15708,7 @@ RDD[RDD[A]] \end_inset . - The + The class \begin_inset listings inline true status open @@ -15814,7 +15720,7 @@ RDD \end_inset - class's + has a method called \begin_inset listings inline true status open @@ -15826,7 +15732,7 @@ cartesian \end_inset - method has the type signature corresponding to + with the type signature of \begin_inset listings inline true status open @@ -15859,7 +15765,7 @@ flatMap \begin_inset Quotes erd \end_inset - exists but does not have the type signature of a monad's + exists but does not have the type signature required for a monad's \begin_inset listings inline true status open @@ -15869,6 +15775,18 @@ status open flatMap \end_layout +\end_inset + + for +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +RDD +\end_layout + \end_inset . @@ -16354,7 +16272,7 @@ noprefix "false" \series bold (2) \series default - Begin with the right-hand side of Eq. + Apply the commutativity law to the right-hand side of Eq. \begin_inset space ~ \end_inset @@ -16368,7 +16286,7 @@ noprefix "false" \end_inset -) and apply the commutativity law: +): \begin_inset Formula \begin{align*} & r\,\,\text{zip}\,\,(\gunderline q\,\,\text{zip}\,\,\gunderline p)=\gunderline r\,\,\text{zip}\,\,\gunderline{(p\,\,\text{zip}\,\,q)}=(p\,\,\text{zip}\,\,q)\,\,\text{zip}\,\,r\\ @@ -16733,7 +16651,11 @@ Three type constructions are based on using just type parameters: a constant \end_layout \begin_layout Standard -Given a fixed monoid type +Given a fixed +\emph on +monoid +\emph default + type \begin_inset Formula $Z$ \end_inset @@ -16837,7 +16759,11 @@ Comparing this with the corresponding monad construction, we note that \begin_inset Formula $L^{A}\triangleq Z$ \end_inset - is not a monad unless + is +\emph on +not +\emph default + a monad unless \begin_inset Formula $Z=\bbnum 1$ \end_inset @@ -17074,7 +17000,7 @@ To verify the right identity law: \end_layout \begin_layout Standard -To verify the associativity law, first substitute the definition of +To verify the associativity law, substitute the definition of \begin_inset Formula $\text{zip}_{L}$ \end_inset @@ -17093,7 +17019,7 @@ To verify the associativity law, first substitute the definition of \end_inset -Now rewrite the right-hand side in a similar way: +Now rewrite the other side in a similar way: \begin_inset Formula \begin{align*} & \quad\text{right-hand side}:\quad\\ @@ -17370,7 +17296,7 @@ To verify the commutativity law of \begin_inset Formula $L$ \end_inset - assuming it holds for + when it holds for \begin_inset Formula $F$ \end_inset @@ -17406,7 +17332,7 @@ Co-products \begin_layout Standard The co-product -\begin_inset Formula $F^{A}+G^{A}$ +\begin_inset Formula $F+G$ \end_inset of two arbitrary applicative functors @@ -17431,7 +17357,7 @@ zip \end_inset method cannot be implemented for -\begin_inset Formula $F^{A}+G^{A}$ +\begin_inset Formula $F+G$ \end_inset was shown in Example @@ -17484,7 +17410,7 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is applicative and @@ -17532,11 +17458,11 @@ wrapped unit \end_inset is a commutative monoid and -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is commutative then -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset is also commutative. @@ -17668,7 +17594,7 @@ Since each of the arguments \end_inset may be in one of the two parts of the disjunction type -\begin_inset Formula $Z+F^{\bullet}$ +\begin_inset Formula $Z+F^{A}$ \end_inset , we have 8 cases. @@ -17726,7 +17652,7 @@ zip \end_inset . - So, we need to consider the following two cases: + It means that we need to consider the following two cases: \end_layout \begin_layout Standard @@ -17752,7 +17678,7 @@ zip . In this case, any arguments of type -\begin_inset Formula $\bbnum 0+F^{\bullet}$ +\begin_inset Formula $\bbnum 0+F^{A}$ \end_inset are ignored by @@ -17797,7 +17723,7 @@ zip \end_inset ) of type -\begin_inset Formula $\bbnum 0+F^{\bullet}$ +\begin_inset Formula $\bbnum 0+F^{A}$ \end_inset by the empty value @@ -17885,7 +17811,7 @@ status open \end_inset are of type -\begin_inset Formula $\bbnum 0+F^{\bullet}$ +\begin_inset Formula $\bbnum 0+F^{A}$ \end_inset . @@ -17950,7 +17876,7 @@ By assumption, \end_inset . - Next, we need the code for the lifted + Next, we need the code for the lifted function \begin_inset Formula $\text{swap}^{\uparrow L}$ \end_inset @@ -17990,10 +17916,6 @@ The difference between the sides disappears if ). -\begin_inset Formula $\square$ -\end_inset - - \end_layout \begin_layout Subsubsection @@ -18019,7 +17941,7 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is applicative then @@ -18059,11 +17981,11 @@ wu . If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is commutative then -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset is also commutative. @@ -18074,7 +17996,7 @@ Proof \end_layout \begin_layout Standard -We will use Statement +We defer to Statement \begin_inset space ~ \end_inset @@ -18088,7 +18010,7 @@ noprefix "false" \end_inset -, where the same properties are demonstrated for a more general functor + below, where the same properties are demonstrated for a more general functor \begin_inset Formula $L^{A}\triangleq H^{A}+F^{A}$ \end_inset @@ -18112,8 +18034,8 @@ noprefix "false" \end_inset - and obtain the present statement because the compatibility law holds automatica -lly for +, which is justified because the compatibility law holds automatically for + \begin_inset Formula $\text{ex}_{H}\triangleq\text{id}$ \end_inset @@ -18745,7 +18667,7 @@ The following statement generalizes the construction \end_inset where -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is applicative and at the same time co-pointed (see Section @@ -18964,7 +18886,7 @@ noprefix "false" \end_inset -), we write the definition of +), take the definition of \begin_inset Formula $\text{zip}_{H}$ \end_inset @@ -19119,7 +19041,7 @@ noprefix "false" \end_inset ) shows that -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is applicative: it is a product of a constant functor ( @@ -19164,7 +19086,7 @@ zip \end_inset The functor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is co-pointed because it has a fully parametric @@ -19256,15 +19178,15 @@ The functor \end_inset is applicative if -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are applicative and in addition -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is co-pointed with a method @@ -19287,7 +19209,7 @@ noprefix "false" ). The applicative methods of -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset are defined by: @@ -19315,15 +19237,15 @@ The method . If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are commutative applicative functors then -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset is also commutative. @@ -19335,7 +19257,7 @@ Proof \begin_layout Standard The lifting to -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset is defined by: @@ -19436,7 +19358,7 @@ Since the identity laws of \hline H^{B} & h\rightarrow\text{zip}_{H}(\text{wu}_{H}\times h) & \bbnum 0\\ F^{B} & \bbnum 0 & f\rightarrow\text{zip}_{F}((\text{wu}_{H}\triangleright\text{ex}_{H}\triangleright\text{pu}_{F})\times f) \end{array}\\ - & \quad=\,\begin{array}{|c||cc|} + & =\,\begin{array}{|c||cc|} & H^{\bbnum 1\times B} & F^{\bbnum 1\times B}\\ \hline H^{B} & \text{ilu}^{\uparrow H} & \bbnum 0\\ F^{B} & \bbnum 0 & \text{ilu}^{\uparrow F} @@ -19588,15 +19510,19 @@ The two sides are equal due to the associativity law of \end_inset after converting arguments of type -\begin_inset Formula $H^{\bullet}+0$ +\begin_inset Formula $H^{A}+0$ \end_inset to type -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F^{A}$ \end_inset when needed. We may define this conversion as a helper function +\begin_inset Quotes eld +\end_inset + + \begin_inset listings inline true status open @@ -19608,6 +19534,10 @@ toF \end_inset + +\begin_inset Quotes erd +\end_inset + : \begin_inset Formula \begin{align*} @@ -19693,7 +19623,8 @@ The two sides are equal due to the associativity law of \end_inset . - The two situations are symmetric, so let us consider the first one: + The two situations are symmetric, so it suffices to consider the first + one: \begin_inset Formula \begin{align*} \text{left-hand side}:\quad & \text{zip}_{L}(p\times\text{zip}_{L}(q\times r))\triangleright\varepsilon_{1,23}^{\uparrow L}\\ @@ -19899,7 +19830,7 @@ The two sides are now equal. \begin_layout Standard The constructions shown so far define applicative methods for all polynomial - functors. + functors: \end_layout \begin_layout Subsubsection @@ -19929,7 +19860,7 @@ noprefix "false" (a) \series default Any polynomial functor -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset whose fixed types are monoids can be made into an applicative functor. @@ -19942,15 +19873,15 @@ noprefix "false" (b) \series default If -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset and -\begin_inset Formula $Q^{A}$ +\begin_inset Formula $Q$ \end_inset are polynomial functors with monoidal fixed types and -\begin_inset Formula $R^{A}$ +\begin_inset Formula $R$ \end_inset is any applicative functor then @@ -19966,7 +19897,7 @@ noprefix "false" (c) \series default A recursive polynomial functor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset defined by @@ -19974,11 +19905,11 @@ noprefix "false" \end_inset is applicative if -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset and -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset are polynomial (bi)functors with monoidal fixed types. @@ -19998,7 +19929,7 @@ noprefix "false" (c) \series default are commutative monoids, -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset will be also commutative. @@ -20014,7 +19945,7 @@ Proof (a) \series default Any polynomial functor -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset is built (in at least one way) by combining fixed types and the type parameter @@ -20106,11 +20037,11 @@ with some fixed types \end_inset and some polynomial functors -\begin_inset Formula $S_{1}^{A}$ +\begin_inset Formula $S_{1}$ \end_inset , -\begin_inset Formula $S_{2}^{A}$ +\begin_inset Formula $S_{2}$ \end_inset . @@ -20127,19 +20058,19 @@ P^{A}+Q^{A}\times R^{A}=Z_{1}+A\times S_{1}^{A}+(Z_{2}+A\times S_{2}^{A})\times \end_inset Since -\begin_inset Formula $S_{1}^{A}$ +\begin_inset Formula $S_{1}$ \end_inset and -\begin_inset Formula $S_{2}^{A}$ +\begin_inset Formula $S_{2}$ \end_inset are polynomial functors of smaller degree than -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset and -\begin_inset Formula $Q^{A}$ +\begin_inset Formula $Q$ \end_inset , we may assume by induction that the property we are proving will already @@ -20200,11 +20131,11 @@ noprefix "false" \begin_inset Formula $F^{A}\triangleq Z_{1}+Z_{2}\times R^{A}$ \end_inset - is applicative and + is applicative and that \begin_inset Formula $F^{A}+A\times G^{A}\cong L^{A}$ \end_inset - is applicative. + is also applicative. \end_layout \begin_layout Standard @@ -20222,12 +20153,12 @@ noprefix "false" \end_inset is an applicative functor, we may use the inductive assumption that -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset is applicative when used in the recursive position (i.e., as the second argument of -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset ). @@ -20246,16 +20177,16 @@ noprefix "false" \end_inset , -\begin_inset Formula $L^{F^{A}}$ +\begin_inset Formula $L\circ F$ \end_inset is applicative when used in that position. Denoting -\begin_inset Formula $N^{A}\triangleq L^{F^{A}}$ +\begin_inset Formula $N\triangleq L\circ F$ \end_inset , we now rewrite -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset as: @@ -20280,7 +20211,7 @@ It remains to prove that \begin_layout Standard The polynomial bifunctor -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset can be expressed as: @@ -20292,18 +20223,14 @@ S^{A,R}=P_{0}^{A}+R\times(P_{1}^{A}+R\times(...\times(P_{n-1}^{A}+R\times P_{n}^ \end_inset where -\begin_inset Formula $P_{0}^{A}$ +\begin_inset Formula $P_{0}$ \end_inset , ..., -\begin_inset Formula $P_{n}^{A}$ -\end_inset - - are some polynomial functors (with respect to the type parameter -\begin_inset Formula $A$ +\begin_inset Formula $P_{n}$ \end_inset -) with monoidal fixed types. + are some polynomial functors with monoidal fixed types. We need to set \begin_inset Formula $R=N^{A}$ \end_inset @@ -20444,11 +20371,7 @@ Reader \end_inset - functor ( -\begin_inset Formula $L^{A}\triangleq R\rightarrow A$ -\end_inset - -) has a + functor has a \begin_inset listings inline true status open @@ -20517,7 +20440,7 @@ Reader \end_inset , where -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is an arbitrary contrafunctor. @@ -20539,7 +20462,7 @@ zip of this type. However, commutativity is not guaranteed for arbitrary -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset . @@ -20614,7 +20537,7 @@ noprefix "false" \end_inset - + (is this wrong?) \end_layout \begin_layout Plain Layout @@ -21456,7 +21379,7 @@ noprefix "false" \end_layout \begin_layout Standard -Since all these constructions preserve commutative applicative functors, +Since all these constructions preserve commutativity of applicative functors, we conclude that \begin_inset Formula $L$ \end_inset @@ -21837,7 +21760,7 @@ noprefix "false" \end_layout \begin_layout Plain Layout -It remains to consider the case when the three arguments are of the form +It remains to consider the case when the three arguments are of the form: \begin_inset Formula \[ @@ -22000,11 +21923,11 @@ noprefix "false" \begin_layout Standard When -\begin_inset Formula $H^{A}$ +\begin_inset Formula $H$ \end_inset is a constant functor, the resulting -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset is a monad (Statement @@ -22051,8 +21974,8 @@ zip not \emph default compatible with its monad methods. - To see this, it is sufficient to note that even the simplest tree-like - monad (the binary tree, + To see this, it suffices to note that even a simple tree-like monad (the + binary tree, \begin_inset Formula $L^{A}\triangleq A+L^{A}\times L^{A}$ \end_inset @@ -22062,7 +21985,7 @@ not \end_inset , however, is commutative because it is built from -\begin_inset Formula $H^{A}\triangleq1$ +\begin_inset Formula $H^{A}\triangleq\bbnum 1$ \end_inset and @@ -22201,7 +22124,7 @@ noprefix "false" \begin_inset Formula $P^{A}\triangleq F^{G^{A}+H^{A}\times P^{A}}$ \end_inset - instead. +. \end_layout \begin_layout Subparagraph @@ -22219,7 +22142,7 @@ Proof is built via known type constructions. At the top level, -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset is the co-pointed co-product construction (Statement @@ -22237,16 +22160,16 @@ noprefix "false" \end_inset ) with functors -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset and -\begin_inset Formula $H^{\bullet}\times F^{L^{\bullet}}$ +\begin_inset Formula $H\times(F\circ L)$ \end_inset . The functor -\begin_inset Formula $H^{\bullet}\times F^{L^{\bullet}}$ +\begin_inset Formula $H\times(F\circ L)$ \end_inset is co-pointed because @@ -22269,7 +22192,7 @@ noprefix "false" ). The compatibility law holds for -\begin_inset Formula $H^{\bullet}\times F^{L^{\bullet}}$ +\begin_inset Formula $H\times(F\circ L)$ \end_inset due to Exercise @@ -22288,7 +22211,7 @@ noprefix "false" . The functor -\begin_inset Formula $F^{L^{\bullet}}$ +\begin_inset Formula $F\circ L$ \end_inset is applicative because it is a composition of @@ -22300,11 +22223,11 @@ noprefix "false" \end_inset . - As usual, we may assume that recursive uses of + (As usual, we assume that recursive uses of \begin_inset Formula $L$ \end_inset -'s methods will satisfy all required laws. +'s methods will satisfy all required laws.) \end_layout \begin_layout Standard @@ -22462,7 +22385,7 @@ noprefix "false" \end_inset ). - To establish the type equivalence + To establish a type equivalence \begin_inset Formula $P^{A}\cong F^{L^{A}}$ \end_inset @@ -22481,7 +22404,7 @@ noprefix "false" \end_inset below, where we need to set -\begin_inset Formula $R^{\bullet}\triangleq F^{\bullet}$ +\begin_inset Formula $R\triangleq F$ \end_inset , @@ -22871,7 +22794,7 @@ biapplicative bifunctor \end_inset -\begin_inset Formula $P^{A,R}$ +\begin_inset Formula $P$ \end_inset having a @@ -22889,7 +22812,7 @@ bizip method with this type signature: \begin_inset Formula \[ -\text{bizip}_{P}^{A,B,F^{\bullet}}:P^{A,F^{A}}\times P^{B,F^{B}}\rightarrow P^{A\times B,F^{A}\times F^{B}}\quad(\text{for all functors }F)\quad. +\text{bizip}_{P}^{A,B,F}:P^{A,F^{A}}\times P^{B,F^{B}}\rightarrow P^{A\times B,F^{A}\times F^{B}}\quad(\text{for all functors }F)\quad. \] \end_inset @@ -22959,11 +22882,11 @@ To find what biapplicative bifunctors exist, one could continue with structural analysis (considering products -\begin_inset Formula $P_{1}^{\bullet,\bullet}\times P_{2}^{\bullet,\bullet}$ +\begin_inset Formula $P_{1}\times P_{2}$ \end_inset , co-products -\begin_inset Formula $P_{1}^{\bullet,\bullet}+P_{2}^{\bullet,\bullet}$ +\begin_inset Formula $P_{1}+P_{2}$ \end_inset , and so on). @@ -23087,11 +23010,11 @@ noprefix "false" \begin_layout Standard Given two functors -\begin_inset Formula $R^{\bullet}$ +\begin_inset Formula $R$ \end_inset and -\begin_inset Formula $S^{\bullet}$ +\begin_inset Formula $S$ \end_inset , define two recursive types @@ -23448,7 +23371,7 @@ noprefix "false" \begin_layout Standard A contrafunctor -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset is @@ -23483,9 +23406,9 @@ wu \begin_inset Formula \begin{align} & \text{zip}_{C}:C^{A}\times C^{B}\rightarrow C^{A\times B}\quad,\quad\quad\text{wu}_{C}:C^{\bbnum 1}\quad,\nonumber \\ - & \quad\text{associativity law}:\quad\\ + & \quad\text{associativity law}:\quad\nonumber \\ & \text{zip}_{C}(p\times\text{zip}_{C}(q\times r))\triangleright\tilde{\varepsilon}_{1,23}^{\downarrow C}=\text{zip}_{C}(\text{zip}_{C}(p\times q)\times r)\triangleright\tilde{\varepsilon}_{12,3}^{\downarrow C}\quad,\label{eq:applicative-contrafunctor-associativity-law}\\ - & \quad\text{left and right identity laws}:\quad\\ + & \quad\text{left and right identity laws}:\quad\nonumber \\ & \text{zip}_{C}(\text{wu}_{C}\times p)\triangleright\text{ilu}^{\downarrow C}=p\quad,\quad\quad\text{zip}_{C}(p\times\text{wu}_{C})\triangleright\text{iru}^{\downarrow C}=p\quad.\label{eq:applicative-contrafunctor-identity-laws} \end{align} @@ -23934,7 +23857,7 @@ To verify the commutativity law for \begin_inset Formula $C$ \end_inset -, we assume that the law holds for +, we assume that the same law holds for \begin_inset Formula $F$ \end_inset @@ -24005,11 +23928,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset are applicative contrafunctors then the contrafunctor @@ -24090,11 +24013,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset are applicative contrafunctors then the contrafunctor @@ -24299,7 +24222,11 @@ G^{A}\times G^{B} & \bbnum 0 & \text{zip}_{G} This function will sometimes ignore its argument when that argument has type -\begin_inset Formula $\bbnum 0+G^{\bullet}$ +\begin_inset Formula $\bbnum 0+G^{A}$ +\end_inset + + or +\begin_inset Formula $\bbnum 0+G^{B}$ \end_inset . @@ -24336,7 +24263,7 @@ We know that \end_inset will sometimes ignore its argument of type -\begin_inset Formula $\bbnum 0+G^{\bullet}$ +\begin_inset Formula $\bbnum 0+G^{B}$ \end_inset , and yet we need to guarantee that no information is lost from the argument @@ -24373,7 +24300,7 @@ With this choice, we can now verify the left identity law: F^{\bbnum 1}\times G^{B} & p\times\_^{:G^{B}}\rightarrow p\triangleright\pi_{1}^{\downarrow F} & \bbnum 0\\ G^{\bbnum 1}\times F^{B} & \_^{:G^{\bbnum 1}}\times q^{:F^{B}}\rightarrow q\triangleright\pi_{2}^{\downarrow F} & \bbnum 0\\ G^{\bbnum 1}\times G^{B} & \bbnum 0 & \text{zip}_{G} -\end{array}\bef\text{ilu}^{\downarrow C}\\ +\end{array}\,\bef\text{ilu}^{\downarrow C}\\ & =p\triangleright\,\begin{array}{|c||cc|} & F^{\bbnum 1\times B} & G^{\bbnum 1\times B}\\ \hline F^{B} & \pi_{2}^{\downarrow F} & \bbnum 0\\ @@ -24586,11 +24513,11 @@ zip \end_inset operation to be of type -\begin_inset Formula $F^{\bullet}+\bbnum 0$ +\begin_inset Formula $F^{A\times B}+\bbnum 0$ \end_inset or -\begin_inset Formula $\bbnum 0+G^{\bullet}$ +\begin_inset Formula $\bbnum 0+G^{A\times B}$ \end_inset . @@ -24603,7 +24530,7 @@ zip \end_inset is type -\begin_inset Formula $\bbnum 0+G^{\bullet}$ +\begin_inset Formula $\bbnum 0+G^{A\times B}$ \end_inset only when both @@ -24647,8 +24574,8 @@ So, if all of \begin_inset Formula $r$ \end_inset - are of type -\begin_inset Formula $\bbnum 0+G^{\bullet}$ + are of types +\begin_inset Formula $\bbnum 0+G^{...}$ \end_inset , the associativity law of @@ -24672,8 +24599,8 @@ So, if all of \begin_inset Formula $r$ \end_inset - are of type -\begin_inset Formula $F^{\bullet}+\bbnum 0$ + are of types +\begin_inset Formula $F^{...}+\bbnum 0$ \end_inset , the associativity law of @@ -24729,7 +24656,7 @@ We can now compute \end_inset , which always returns values of type -\begin_inset Formula $F^{\bullet}+\bbnum 0$ +\begin_inset Formula $F^{A\times(B\times C)}+\bbnum 0$ \end_inset : @@ -24793,7 +24720,7 @@ and to apply the tuple-rearranging isomorphisms. \begin{align*} & \text{zip}_{F}(f\times(h\triangleright\pi_{2}^{\downarrow F}))\triangleright\tilde{\varepsilon}_{1,23}^{\downarrow F}\\ & \quad=\text{zip}_{F}(f\times h)\triangleright(a\times(b\times c)\rightarrow a\times c)^{\downarrow F}\bef(a\times b\times c\rightarrow a\times(b\times c))^{\downarrow F}\\ - & \quad=\text{zip}_{F}(f\times h)\triangleright(a\times b\times c\rightarrow a\times c)^{\downarrow F}\quad.\\ + & \quad=\text{zip}_{F}(f\times h)\triangleright(a\times b\times c\rightarrow a\times c)^{\downarrow F}\quad,\\ & \text{zip}_{F}((f\triangleright\pi_{1}^{\downarrow F})\times h)\triangleright\tilde{\varepsilon}_{12,3}^{\downarrow F}\\ & \quad=\text{zip}_{F}(f\times h)\triangleright((a\times b)\times c\rightarrow a\times c)^{\downarrow F}\bef(a\times b\times c\rightarrow(a\times b)\times c)^{\downarrow F}\\ & \quad=\text{zip}_{F}(f\times h)\triangleright(a\times b\times c\rightarrow a\times c)^{\downarrow F}\quad. @@ -24893,8 +24820,8 @@ noprefix "false" shows simple examples where a function type construction fails to produce applicative functors. - However, the function type construction works for a wide class of applicative - contrafunctors: + However, this construction works for a wide class of applicative contrafunctors +: \end_layout \begin_layout Subsubsection @@ -24920,11 +24847,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is an applicative contrafunctor and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is @@ -24937,11 +24864,11 @@ any functor is applicative. If -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is commutative then -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is also commutative. @@ -25066,7 +24993,7 @@ To verify the associativity law, we use properties such as \begin_inset Formula $\varepsilon_{12,3}\bef\pi_{2}=\pi_{3}$ \end_inset - and so on: +, etc.: \begin_inset Formula \begin{align*} & h^{:H^{A\times B\times C}}\triangleright\tilde{\varepsilon}_{1,23}^{\uparrow H}\triangleright\text{zip}_{P}(p\times\text{zip}_{P}(q\times r))\triangleright\tilde{\varepsilon}_{1,23}^{\downarrow G}\\ @@ -25087,11 +25014,11 @@ The two sides are now equal due to the assumed associativity law of \end_layout \begin_layout Standard -It remains to verify the commutativity law, assuming that +It remains to verify the commutativity law when \begin_inset Formula $\text{zip}_{G}$ \end_inset - satisfies that law: + obeys that law: \begin_inset Formula \begin{align*} & \text{zip}_{P}(q\times p)=\Delta\bef(\pi_{1}^{\uparrow H}\boxtimes\pi_{2}^{\uparrow H})\bef(q\boxtimes p)\bef\text{zip}_{G}\quad,\\ @@ -25203,15 +25130,15 @@ noprefix "false" \end_inset . - This new type constructor ( + In this book, such type constructors \begin_inset Formula $Q$ \end_inset -) is also called a + are also called \begin_inset Quotes eld \end_inset -profunctor +profunctors \begin_inset Quotes erd \end_inset @@ -25349,7 +25276,7 @@ map \end_inset -like methods for -\begin_inset Formula $U^{\bullet}$ +\begin_inset Formula $U$ \end_inset . @@ -25490,7 +25417,7 @@ noprefix "false" \begin_layout Standard A profunctor -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is @@ -25679,7 +25606,7 @@ pure \end_inset means that the profunctor -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is pointed @@ -25866,7 +25793,7 @@ pure \end_inset - method is a function that ignores its argument always returns the given + method is a function that ignores its argument and always returns the given value: \begin_inset Formula \[ @@ -25880,7 +25807,7 @@ But there are many more functions with the same type signature as \end_inset . - To see this, it is convenient swap the curried arguments of + To see this, we swap the curried arguments of \begin_inset Formula $\text{pu}_{P}$ \end_inset @@ -26020,7 +25947,7 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is an applicative @@ -26028,7 +25955,7 @@ If functor \emph default and -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is an applicative profunctor then the profunctor @@ -26100,11 +26027,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are applicative profunctors then so is @@ -26338,7 +26265,7 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is an applicative profunctor and @@ -26378,11 +26305,11 @@ The method \end_inset is a commutative monoid and -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is commutative then -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is also commutative. @@ -26526,8 +26453,8 @@ Since each of the arguments \begin_inset Formula $r$ \end_inset - may be in one of the two parts of the disjunction type -\begin_inset Formula $Z+F^{\bullet}$ + may be in one of the two parts of the disjunctive types +\begin_inset Formula $Z+F^{...}$ \end_inset , we have 8 cases. @@ -26610,8 +26537,8 @@ zip \end_inset . - In this case, any arguments of type -\begin_inset Formula $\bbnum 0+F^{\bullet}$ + In this case, any arguments of types +\begin_inset Formula $\bbnum 0+F^{...}$ \end_inset are ignored by @@ -26724,8 +26651,8 @@ After this replacement, we have three arguments ( \begin_inset Formula $r$ \end_inset - are of type -\begin_inset Formula $\bbnum 0+F^{\bullet}$ + are of types +\begin_inset Formula $\bbnum 0+F^{...}$ \end_inset . @@ -26830,11 +26757,11 @@ The difference between the sides disappears if \begin_inset Formula $Z$ \end_inset - is a commutative monoid ( + is a commutative monoid: in that case, we have \begin_inset Formula $z_{1}\oplus z_{2}=z_{2}\oplus z_{1}$ \end_inset -). +. \begin_inset Formula $\square$ \end_inset @@ -26865,15 +26792,15 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are applicative profunctors and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is co-pointed with the method @@ -26923,15 +26850,15 @@ The method . If -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are commutative then -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is also commutative. @@ -26942,7 +26869,7 @@ Proof \end_layout \begin_layout Standard -We follow the proof of Statement +Follow the proof of Statement \begin_inset space ~ \end_inset @@ -26958,7 +26885,7 @@ noprefix "false" . The lifting to -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is defined by: @@ -27106,7 +27033,7 @@ The associativity law is an equation between values of type \begin_inset Formula \begin{align*} & \text{zip}_{P}(p^{:H^{A}+F^{A}}\times\text{zip}_{P}(q^{:H^{B}+F^{B}}\times r^{:H^{C}+F^{C}}))\triangleright\varepsilon_{1,23}^{\uparrow P}\tilde{\varepsilon}_{1,23}^{\downarrow P}\\ - & \quad=\text{zip}_{P}(\text{zip}_{P}(p\times q)\times r)\triangleright\varepsilon_{12,3}^{\uparrow P}\tilde{\varepsilon}_{12,3}^{\downarrow P}\quad. + & \quad\overset{?}{=}\text{zip}_{P}(\text{zip}_{P}(p\times q)\times r)\triangleright\varepsilon_{12,3}^{\uparrow P}\tilde{\varepsilon}_{12,3}^{\downarrow P}\quad. \end{align*} \end_inset @@ -27213,16 +27140,20 @@ The two sides are equal due to the associativity law of \begin_inset Formula $\text{zip}_{F}$ \end_inset - after converting arguments of type -\begin_inset Formula $H^{\bullet}+0$ + after converting arguments of types +\begin_inset Formula $H^{...}+0$ \end_inset - to type -\begin_inset Formula $F^{\bullet}$ + to types +\begin_inset Formula $F^{...}$ \end_inset when needed. - We may define this conversion as a helper function + We may define this conversion as a function +\begin_inset Quotes eld +\end_inset + + \begin_inset listings inline true status open @@ -27234,7 +27165,11 @@ toF \end_inset - in the same way as in the proof of Statement + +\begin_inset Quotes erd +\end_inset + + as in the proof of Statement \begin_inset space ~ \end_inset @@ -27443,7 +27378,7 @@ It remains to verify the commutativity law in case that law holds for : \begin_inset Formula \[ -\text{swap}\bef\text{zip}_{F}\overset{!}{=}\text{zip}_{F}\bef\text{swap}^{\uparrow F}\text{swap}^{\downarrow F}\quad,\quad\text{swap}\bef\text{zip}_{H}\overset{!}{=}\text{zip}_{H}\bef\text{swap}^{\uparrow H}\text{swap}^{\downarrow H}\quad\quad. +\text{swap}\bef\text{zip}_{F}\overset{!}{=}\text{zip}_{F}\bef\text{swap}^{\uparrow F}\text{swap}^{\downarrow F}\quad,\quad\text{swap}\bef\text{zip}_{H}\overset{!}{=}\text{zip}_{H}\bef\text{swap}^{\uparrow H}\text{swap}^{\downarrow H}\quad. \] \end_inset @@ -27567,11 +27502,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is an applicative profunctor and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is @@ -27794,11 +27729,7 @@ covariant \begin_inset Formula $P^{A}\triangleq H^{A}\rightarrow A$ \end_inset - where -\begin_inset Formula $H$ -\end_inset - - is an arbitrary profunctor. +. (We omit the proof of that statement.) \end_layout @@ -28657,12 +28588,7 @@ def p[A]: A => A = { \begin_layout Plain Layout - case a: Int => (a + 123) -\end_layout - -\begin_layout Plain Layout - - .asInstanceOf[A] + case a: Int => (a + 123).asInstanceOf[A] \end_layout \begin_layout Plain Layout @@ -28920,8 +28846,8 @@ flatMap has two. For this reason, we have systematically derived the laws of all the equivalent typeclass methods. - In many cases, we found a formulation of the laws that was either conceptually - simpler or more straightforward to verify. + In many cases, we found formulations of the laws that are simpler and more + straightforward to verify. \end_layout @@ -28995,20 +28921,8 @@ map2 \end_inset methods defined in this way will have the right type signature and will - satisfy the laws of applicative functors. - This is due to the fact that the laws of -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -map2 -\end_layout - -\end_inset - - are derived (as shown in Section + satisfy the applicative laws. + This is due to the fact that those laws are derived (as shown in Section \begin_inset space ~ \end_inset @@ -29140,10 +29054,10 @@ Applicative \end_inset - typeclass instance automatically for all monads. - In most cases, we need to define the applicative instance separately from + typeclass instance automatically for a given monad. + In many cases, we need to define the applicative instance separately from the monad instance. - (Automatic derivation of + Automatic derivation of \begin_inset listings inline true status open @@ -29155,8 +29069,8 @@ Applicative \end_inset - instances is made difficult also by the fact that many type constructors - will admit more than one lawful implementation of + instances is difficult also because many type constructors admit several + lawful implementations of \begin_inset listings inline true status open @@ -29168,7 +29082,7 @@ map2 \end_inset -.) +. \end_layout \begin_layout Standard @@ -29200,9 +29114,8 @@ Applicative \end_inset instance exists for all polynomial functors with monoidal fixed types. - (Accordingly, all our examples of non-applicative functors involve non-polynomi -al functors.) This does not hold for monads; not all polynomial functors - are monadic. + (All our examples of non-applicative functors involve non-polynomial functors.) + But we have seen that not all polynomial functors are monads. \begin_inset Foot status open @@ -29215,7 +29128,7 @@ It is unknown how to characterize or enumerate all polynomial functors that \begin_inset CommandInset ref LatexCommand ref -reference "par:Problem-monads" +reference "par:Problem-monads-1" plural "false" caps "false" noprefix "false" @@ -29225,7 +29138,7 @@ noprefix "false" – \begin_inset CommandInset ref LatexCommand ref -reference "par:Problem-monads-1" +reference "par:Problem-monads" plural "false" caps "false" noprefix "false" @@ -29255,9 +29168,9 @@ noprefix "false" \begin_inset Formula $L^{A}\triangleq\bbnum 1+A\times A$ \end_inset -) that +) that does \emph on -cannot +not \emph default have a lawful monad implementation (Exercise \begin_inset space ~ @@ -29412,7 +29325,7 @@ extract \end_inset ). - So, the compatibility law requires that the operation + The compatibility law requires that the operation \begin_inset Formula $\text{zip}_{H}$ \end_inset @@ -29578,8 +29491,8 @@ applicative morphism \end_inset . - The two laws make applicative morphisms fully analogous to monad morphisms - (Section + The two laws display the similarity between applicative morphisms and monad + morphisms (Section \begin_inset space ~ \end_inset @@ -29842,7 +29755,7 @@ wrapped functions \end_inset ) can play the role of morphisms in a suitably defined category. - To define that category, we need to produce objects, morphisms, the identity + To define that category, we need to define objects, morphisms, the identity morphism, and the composition operation, and prove their laws. Finally, we will need to prove that \begin_inset listings @@ -29861,8 +29774,7 @@ ap \end_layout \begin_layout Standard -This section will follow these considerations in order to derive and verify - the laws of +This section will f derive and verify the laws of \begin_inset listings inline true status open @@ -29874,7 +29786,7 @@ ap \end_inset -. + in that way. \end_layout \begin_layout Standard @@ -30116,7 +30028,7 @@ wrapped identity \begin_inset Quotes erd \end_inset - value, + value ( \begin_inset listings inline true status open @@ -30128,11 +30040,11 @@ wid \end_inset -, of type -\begin_inset Formula $L^{A\rightarrow A}$ + +\begin_inset Formula $:L^{A\rightarrow A}$ \end_inset -, is defined by +) is defined by \begin_inset Formula $\text{wid}_{L}^{A}\triangleq\text{pu}_{L}(\text{id}^{A})$ \end_inset @@ -30310,7 +30222,7 @@ noprefix "false" \end_inset -Using these equations, we show that the two sides of the associativity law +Using these equations, we find that the two sides of the associativity law of \begin_inset Formula $\odot$ \end_inset @@ -30447,7 +30359,7 @@ zip \end_inset -Using these equations, we show that the two sides of the associativity law +Using these equations, we find that the two sides of the associativity law of \begin_inset Formula $\odot$ \end_inset @@ -30965,8 +30877,9 @@ noprefix "false" : \begin_inset Formula \begin{align*} -\quad\text{left-hand side}:\quad & r\triangleright\text{ap}\big(p^{:L^{A\rightarrow B}}\odot q^{:L^{B\rightarrow C}}\big)=\text{ap}\,(p\odot q)(r)\\ - & =((p\odot q)\times r)\triangleright\text{zip}\bef\text{eval}^{\uparrow L} +\text{left-hand side}:\quad & r\triangleright\text{ap}\big(p^{:L^{A\rightarrow B}}\odot q^{:L^{B\rightarrow C}}\big)=\text{ap}\,(p\odot q)(r)\\ + & =((p\odot q)\times r)\triangleright\text{zip}\bef\text{eval}^{\uparrow L}\\ +\text{definition (b) of }\odot:\quad & =\big(\big(\text{zip}\left(q\times p\right)\triangleright\gunderline{(h\times g\rightarrow g\bef h)^{\uparrow L}}\big)\times r\big)\triangleright\gunderline{\text{zip}}\bef\text{eval}^{\uparrow L} \end{align*} \end_inset @@ -30974,8 +30887,6 @@ noprefix "false" \begin_inset Formula \begin{align*} - & \quad\text{definition (b) of }\odot:\quad\\ - & =\big(\big(\text{zip}\left(q\times p\right)\triangleright\gunderline{(h\times g\rightarrow g\bef h)^{\uparrow L}}\big)\times r\big)\triangleright\gunderline{\text{zip}}\bef\text{eval}^{\uparrow L}\\ & \quad\text{naturality law of }\text{zip}:\quad\\ & =\big(\text{zip}\left(q\times p\right)\times r\big)\triangleright\text{zip}\triangleright\gunderline{((h\times g)\times a\rightarrow(g\bef h)\times a)^{\uparrow L}\bef\text{eval}^{\uparrow L}}\\ & \quad\text{composition under }^{\uparrow L}:\quad\\ @@ -31801,6 +31712,16 @@ contrafilterable \size footnotesize comonad +\begin_inset Index idx +status open + +\begin_layout Plain Layout +comonad +\end_layout + +\end_inset + + \end_layout \end_inset @@ -32923,7 +32844,7 @@ Show that \end_inset is a fixed monoidal type and -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is any applicative functor, contrafunctor, or profunctor. @@ -33178,10 +33099,6 @@ Show that the recursive functor \end_inset is applicative if -\begin_inset Formula $G^{A}$ -\end_inset - - is applicative and \begin_inset Formula $\text{wu}_{F}$ \end_inset @@ -33189,8 +33106,12 @@ Show that the recursive functor \begin_inset Formula $\text{wu}_{F}\triangleq\bbnum 0+\text{pu}_{G}\left(1\times\text{wu}_{F}\right)$ \end_inset -. - Use applicative functor constructions. +, assuming +\begin_inset Formula $G$ +\end_inset + + is an applicative functor. + (Use applicative functor constructions.) \end_layout \begin_layout Subsubsection @@ -33280,7 +33201,7 @@ Show that via a profunctor. Is -\begin_inset Formula $Q^{A}$ +\begin_inset Formula $Q$ \end_inset applicative? @@ -33313,7 +33234,7 @@ noprefix "false" (a) \series default For any given profunctor -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset , implement a function of type @@ -33330,7 +33251,7 @@ noprefix "false" (b) \series default Show that, for some profunctors -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset , one @@ -33371,7 +33292,7 @@ Implement profunctor and applicative instances for \end_inset where -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset is a given applicative profunctor and @@ -33416,7 +33337,7 @@ For the profunctor (a) \series default Show that -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset is pointed: there exist a value diff --git a/sofp-src/lyx/sofp-curry-howard.lyx b/sofp-src/lyx/sofp-curry-howard.lyx index 780755c33..0d1219519 100644 --- a/sofp-src/lyx/sofp-curry-howard.lyx +++ b/sofp-src/lyx/sofp-curry-howard.lyx @@ -1868,7 +1868,10 @@ turnstile status open \begin_layout Plain Layout -0@ +\begin_inset Formula $\triangleright$ +\end_inset + +@ \begin_inset Formula $\vdash$ \end_inset @@ -8660,7 +8663,10 @@ turnstile status open \begin_layout Plain Layout -0@ +\begin_inset Formula $\triangleright$ +\end_inset + +@ \begin_inset Formula $\vdash$ \end_inset @@ -24675,7 +24681,6 @@ pipe notation status open \begin_layout Plain Layout -0@ \begin_inset Formula $\triangleright$ \end_inset diff --git a/sofp-src/lyx/sofp-disjunctions.lyx b/sofp-src/lyx/sofp-disjunctions.lyx index bc09c0fc8..c4df4c793 100644 --- a/sofp-src/lyx/sofp-disjunctions.lyx +++ b/sofp-src/lyx/sofp-disjunctions.lyx @@ -4335,7 +4335,7 @@ y \end_inset - as in the code at left. + in the code above. Each pattern variable is defined only within the \emph on local scope diff --git a/sofp-src/lyx/sofp-essay2.lyx b/sofp-src/lyx/sofp-essay2.lyx index 4c30d43aa..697bb71f8 100644 --- a/sofp-src/lyx/sofp-essay2.lyx +++ b/sofp-src/lyx/sofp-essay2.lyx @@ -1027,9 +1027,9 @@ C. \begin_inset space \space{} \end_inset -Martin never studied any formalisms and do not think in terms of formalisms. - Instead, they summarize their programming experience in vaguely formulated - heuristic “principles”. +Martin who do not explain their material in terms of formalisms. + Instead, they summarize their programming experience in heuristically formulate +d “principles”. \begin_inset Foot status open @@ -1088,7 +1088,7 @@ strive for good interfaces \begin_inset Quotes erd \end_inset -, etc. +, and so on. \end_layout @@ -1361,8 +1361,7 @@ The Art of Programming indeed treats programming as an art and not as a science. Knuth shows many algorithms and derives their mathematical properties but - gives almost no examples of realistic program code and does not provide - any theory that could guide programmers in actually + does not provide any theory that could guide programmers in actually \emph on writing \emph default @@ -1530,19 +1529,11 @@ shoe equations \begin_inset Quotes erd \end_inset - are mathematically rigorous and can be analyzed or -\begin_inset Quotes eld -\end_inset - -verified -\begin_inset Quotes erd -\end_inset - -. + are mathematically rigorous and could be analyzed or verified. But the equations are merely written after the fact, they do not guide the fashion designers in actually making shoes. It is understandable that fashion designers do not study the mathematical - theory of surfaces. + theory of geometric surfaces. \end_layout \begin_layout Subsection* @@ -1815,9 +1806,9 @@ It is now clear that we do not presently have true software engineering. \end_layout \begin_layout Standard -True software engineering means having a mathematical theory that guides - the process of writing programs, — not just theory that describes or analyzes - programs after they are +Software engineering in the proper sense would mean having a mathematical + theory that guides the process of writing programs, — not just theory that + describes or analyzes programs after they are \emph on somehow \emph default @@ -1825,8 +1816,8 @@ somehow \end_layout \begin_layout Standard -It is not enough that the numerical methods required for physics or the - matrix calculations required for data science are +It is true that numerical methods required for physics and matrix calculations + required for data science are \begin_inset Quotes eld \end_inset @@ -1835,13 +1826,14 @@ mathematical \end_inset . - These programming tasks are indeed formulated using mathematical theory. + Those programming tasks are indeed formulated using mathematical theory. However, mathematical \emph on subject matter \emph default - (aerospace control, physics simulations, or statistics) does not mean that - mathematics is used to guide the process of writing code. + (aerospace control, physics simulations, or statistics) corresponds to + a relatively small part of written code and does not by itself guide the + process of writing code. Data scientists, aerospace engineers, and physicists almost always work as artisans when converting their computations into program code. \end_layout @@ -2103,7 +2095,7 @@ free applicative functor \end_inset construction. - It was first described in a 2014 paper; + It was first described in a 2014 paper. \begin_inset Foot status open @@ -2122,7 +2114,7 @@ literal "false" \end_inset - a couple of years later, a combined free applicative / free monad data + A couple of years later, a combined free applicative / free monad data type was designed and its implementation proposed in Scala \begin_inset Foot status open @@ -2381,8 +2373,8 @@ literal "false" true sense of the word. Modern computer science courses do not actually train engineers in that sense. - Instead, they train academic researchers who can also work as software - artisans and write code. + Instead, they train academic researchers who will in most cases go on to + work as software artisans writing code. \end_layout \begin_layout Standard @@ -2430,8 +2422,8 @@ out of control \begin_inset Quotes erd \end_inset -, and operating systems have been notorious for constantly appearing new - security flaws +, and operating systems have been notorious for a steady stream of new security + flaws \begin_inset Foot status open @@ -2823,9 +2815,9 @@ Functional programming languages started with Standard ML, which was a language Haskell, were mostly used for writing compilers and verified code. However, basic features of Standard ML — immutable polynomial data types, pattern-matching, higher-order functions, and parametric polymorphism with - a static type inference — have become standard, so that many new languages, - such as F#, Scala, Swift, and Rust, include them, while older languages - (Java, C#, Python) have also added some of these features. + a static type inference — have become standard, so that many new languages + (such as F#, Scala, Swift, and Rust) include them by design, while older + languages (Java, C#, Python) have retrofitted some of these features. \end_layout @@ -2850,15 +2842,15 @@ declarative programming \begin_inset Quotes erd \end_inset - when I started studying Haskell and then Prolog. - Both languages are claimed up front to be declarative, as opposed to imperative + when I started studying Haskell and Prolog. + Both languages are claimed to be declarative, as opposed to imperative languages such as C++ or Java. It was confusing, however, that two languages that are so different can be both deemed declarative. It was also clear that Prolog would be quite awkward for, say, numerical calculations, while Haskell would require a lot of hard-to-read, imperative code for tasks such as downloading a file from a Web server. - The book + (The book \begin_inset Quotes eld \end_inset @@ -2906,11 +2898,11 @@ literal "false" \end_inset - +) \end_layout \begin_layout Standard -So I then tried to understand what people mean by declarative programming. +I tried to understand what people mean by declarative programming. The Wikipedia definition \begin_inset Foot status open @@ -2948,8 +2940,8 @@ declarative \begin_inset Quotes erd \end_inset - is understood as a feature of a programming language as a whole, and any - programming language could be argued to be either + is understood as a feature of a programming language as a whole, as if + any programming language could be argued to be either \begin_inset Quotes eld \end_inset @@ -2961,7 +2953,7 @@ declarative \end_layout \begin_layout Standard -I was never satisfied with this definition and kept thinking about this +I was never satisfied with that definition and kept thinking about this question until I found a better definition, which I will explain now. \end_layout @@ -2986,8 +2978,8 @@ silver bullet \begin_layout Standard An important consequence is that the same languages were not suitable for - other problem domains! Prolog was not easily suitable for matrix multiplication -, nor Fortran for expert systems, nor Haskell for GUI programs. + other problem domains! Prolog was not suitable for matrix multiplication, + nor Fortran for expert systems, nor Haskell for GUI programs. \end_layout \begin_layout Standard @@ -2999,7 +2991,7 @@ declarativeness \begin_inset Quotes erd \end_inset - is not really a property of a programming language, but a + is not a property of a programming language, but a \emph on relation \emph default @@ -3396,7 +3388,7 @@ silver bullet \begin_inset Quotes erd \end_inset - examples, we can now formulate the principle of declarative programming: + examples, we arrive at the following definition of declarative programming: \end_layout \begin_layout Quote @@ -3435,7 +3427,7 @@ declarative for a chosen problem domain \end_layout \begin_layout Addsec -The problem of choosing a specification language +Specification languages \end_layout \begin_layout Standard @@ -3524,11 +3516,11 @@ good \end_inset notation — i.e., an unambiguous, expressive, and yet readable specification - language — for a given problem domain is an extremely difficult task that - may take a long time for newer problem domains. + language — for a given problem domain is a difficult task that may take + a long time for newer problem domains. The main reason for the difficulty is that a successful specification language - must be somehow convenient for human practitioners (whose detailed behavior, - to date, has evaded a formal description). + must be convenient for human practitioners (whose detailed behavior, to + date, has evaded a formal description). A person reading a description of a task in a good specification language must be able to understand the task quickly and should have no further questions or ambiguities to clarify. @@ -3551,9 +3543,9 @@ executable . However, blind attempts to use the same language for other problem domains did not bring any advantages. - The widely expressed disappointment with structural programming, with OOP, - or with functional programming is probably due to the fact that people - expected a + The widely expressed disappointments with structural programming, natural-langu +age programming, OOP, or functional programming is probably due to the fact + that people expected a \begin_inset Quotes eld \end_inset @@ -3578,9 +3570,16 @@ Without an accepted specification language, there is no hope of reaping at the same time readily understandable to humans. When people design GUIs, they communicate their designs to each other informall y and in multiple stages, gradually resolving the inevitable ambiguities. - ("And what if I now press this button in that window while the old message - box is still visible?") As a result, GUI programming remains a difficult - and error-prone exercise. + ( +\begin_inset Quotes eld +\end_inset + +And what if I now press this button in that window while the old message + box is still visible? +\begin_inset Quotes erd +\end_inset + +) As a result, GUI programming remains a difficult and error-prone exercise. Established GUI environments (X Window, MS Windows, macOS, iOS, Android) predominantly use the object-oriented paradigm, which turned out to be not a silver bullet for complex GUI design. @@ -3821,7 +3820,7 @@ n is a telephone number rather than a matrix, only to discover the error much later when the code is running in production. - The host language must have a tight control over the abstractions behind + The host language must maintain strict control over the abstractions behind the DSL. \end_layout diff --git a/sofp-src/lyx/sofp-essay3.lyx b/sofp-src/lyx/sofp-essay3.lyx index 4e5e7af76..12dc299ad 100644 --- a/sofp-src/lyx/sofp-essay3.lyx +++ b/sofp-src/lyx/sofp-essay3.lyx @@ -3408,7 +3408,7 @@ X . Some programming languages support functions with type parameters. - In Scala, the syntax is + In Scala, the syntax is: \begin_inset listings inline false status open @@ -3526,7 +3526,7 @@ t \end_inset . - In Scala syntax, the law is written as + In Scala syntax, the law is written as: \begin_inset listings inline false status open diff --git a/sofp-src/lyx/sofp-filterable.lyx b/sofp-src/lyx/sofp-filterable.lyx index a6275e0e0..175734a69 100644 --- a/sofp-src/lyx/sofp-filterable.lyx +++ b/sofp-src/lyx/sofp-filterable.lyx @@ -448,7 +448,7 @@ filter is the following calculation: \begin_inset Formula \[ -\sum_{x\in\mathbb{Z};\,0\leq x\leq100;\,\cos x>0}\sqrt{\cos\left(x\right)}\approx38.71\quad. +\sum_{x\in\mathbb{Z};\,0\leq x\leq100;\,\cos x>0}\sqrt{\cos x}\approx38.71\quad. \] \end_inset @@ -482,13 +482,12 @@ filter \end_inset - in this computation is to select only the positive values of -\begin_inset Formula $\cos\left(x\right)$ + in this computation is to remove all non-positive values of +\begin_inset Formula $\cos x$ \end_inset . - It is safe to apply the square root function to positive values, so the - code will work correctly. + It is safe to apply the square root function to positive values. \end_layout \begin_layout Standard @@ -867,7 +866,7 @@ status open \begin_layout Plain Layout -F[_] +F \end_layout \end_inset @@ -933,7 +932,7 @@ status open \begin_layout Plain Layout -withFilter +filter \end_layout \end_inset @@ -1018,7 +1017,7 @@ res2: Option[Int] = None In an intuitive view, a functor wraps one or more data values, and the filtering operation may decrease the number of values wrapped. - e.g., the length of the sequence + E.g., the length of the sequence \begin_inset listings inline true status open @@ -1101,7 +1100,7 @@ None \end_inset ). - In all cases, the resulting collection will not contain values that fail + In all cases, the resulting data structure will not store values that fail the filtering predicate. \end_layout @@ -1197,7 +1196,7 @@ status open \begin_layout Plain Layout -.filter(p) +_.filter(p) \end_layout \end_inset @@ -1381,14 +1380,13 @@ final case class Orders[A](tue: Option[A], fri: Option[A]) { \begin_layout Plain Layout - def map[B](f: A => B): Orders[B] = Orders(tue.map(f), fri.map(f)) - // Functor. + def map[B](f: A => B): Orders[B] = Orders(tue.map(f), fri.map(f)) \end_layout \begin_layout Plain Layout def withFilter(p: A => Boolean): Orders[A] = Orders(tue.filter(p), fri.filter(p) -) // Filterable. +) \end_layout \begin_layout Plain Layout @@ -1454,7 +1452,7 @@ Source \begin_inset Quotes erd \end_inset - of type Orders[Int]. + has type Orders[Int]. \end_layout \begin_layout Plain Layout @@ -1464,8 +1462,7 @@ Source \begin_layout Plain Layout - if y < 500 // Orders are approved if the amount is below $500 after - discount. + if y < 500 // Orders are approved if amount < 500 after discount. \end_layout \begin_layout Plain Layout @@ -1485,7 +1482,7 @@ res1: Orders[Double] = Orders(Some(330.0), None) \end_layout \begin_layout Standard -Suppose we are considering an additional business rule, such as: +Suppose we are considering additional business rules, such as: \end_layout \begin_layout Standard @@ -1608,7 +1605,7 @@ val result = for { // Some computations in the context of the `List` functor. \begin_layout Plain Layout if p1(y) // ... - impose condition p1: discard all y for which p1(y) == false + impose condition p1; continue only if p1(y) == true \end_layout \begin_layout Plain Layout @@ -1626,19 +1623,17 @@ val result = for { // Some computations in the context of the `List` functor. \begin_layout Plain Layout if q(x, y, z) // ... - impose another condition + impose another condition q(x, y, z) \end_layout \begin_layout Plain Layout -} yield // For all x in the given list, such that all the conditions - hold, +} yield // For those x for which all the conditions hold, \end_layout \begin_layout Plain Layout - k(x, y, z) // compute the values k, put them into a list, and return - as the list `result`. + k(x, y, z) // compute the list of values k as the `result`. \end_layout \end_inset @@ -1649,7 +1644,7 @@ val result = for { // Some computations in the context of the `List` functor. \begin_layout Standard There are several properties that one intuitively expects such programs to have. - One property is that computing + For example, the code says \begin_inset listings inline true status open @@ -1665,7 +1660,8 @@ y = f(x) \begin_inset Formula $3$ \end_inset - and then checking a condition for +. + Then we expect that checking a condition for \begin_inset listings inline true status open @@ -1702,71 +1698,28 @@ if p1(y) \end_inset , should be the same as checking the condition -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -p1(f(x)) -\end_layout - -\end_inset - - and then computing -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -y = f(x) -\end_layout - +\begin_inset Quotes eld \end_inset -: since the code says that -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -y = f(x) -\end_layout - -\end_inset -, we expect the conditions \begin_inset listings inline true status open \begin_layout Plain Layout -p1(y) +if p1(f(x)) \end_layout \end_inset - and -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -p1(f(x)) -\end_layout +\begin_inset Quotes erd \end_inset - to be equivalent. -\end_layout - -\begin_layout Standard -Translating this equivalence into code, we obtain the requirement that the - following two expressions ( +. + Translating this equivalence into code, we obtain the requirement that + the following two expressions ( \begin_inset listings inline true status open @@ -1790,7 +1743,7 @@ result2 \end_inset -) should be equal to each other: +) should be equal: \end_layout \begin_layout Standard @@ -2021,7 +1974,7 @@ p2 . So, we expect that applying these two filtering operations is equivalent - to filtering by the condition + to filtering by the single condition \begin_inset Quotes eld \end_inset @@ -2045,8 +1998,32 @@ if p1(y) && p2(y) \end_layout \begin_layout Standard -We can translate this expectation into equality of the following two code - expressions: +We translate this expectation into the requirement that the following values + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +result1 +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +result2 +\end_layout + +\end_inset + + should be equal: \end_layout \begin_layout Standard @@ -2372,7 +2349,7 @@ if p(x) . In particular, we should be able to use a partial function safely as long - as that function is defined for + as that function is well-defined for \begin_inset listings inline true status open @@ -2712,12 +2689,12 @@ val result2 = for { \begin_layout Plain Layout - if p(x) + if p(x) // def fp = if_p(p)(f) \end_layout \begin_layout Plain Layout - y = fp(x) // Here fp = if_p(p)(f) + y = fp(x) \end_layout \begin_layout Plain Layout @@ -2753,11 +2730,7 @@ val result2 = xs.filter(p).map(fp) \end_layout \begin_layout Standard -We found -\begin_inset Formula $4$ -\end_inset - - requirements for the +We found four requirements for the \begin_inset listings inline true status open @@ -2914,7 +2887,7 @@ filter : \begin_inset Formula \[ -\xymatrix{\xyScaleY{1.4pc}\xyScaleX{7.0pc}F^{A}\ar[r]\sp(0.5){\text{filt}_{F}(f^{:A\rightarrow B}\bef q^{:B\rightarrow\bbnum 2})}\ar[d]\sb(0.45){(f^{:A\rightarrow B})^{\uparrow F}} & F^{A}\ar[d]\sp(0.45){(f^{:A\rightarrow B})^{\uparrow F}}\\ +\xymatrix{\xyScaleY{1.4pc}\xyScaleX{7.0pc}F^{A}\ar[r]\sp(0.5){\text{filt}_{F}(f^{:A\rightarrow B}\bef q^{:B\rightarrow\bbnum 2})}\ar[d]\sb(0.45){(f^{:A\rightarrow B})^{\uparrow F}} & F^{A}\ar[d]\sp(0.5){(f^{:A\rightarrow B})^{\uparrow F}}\\ F^{B}\ar[r]\sp(0.5){\text{filt}_{F}(q^{:B\rightarrow\bbnum 2})} & F^{B} } \] @@ -3048,7 +3021,7 @@ List \end_inset - obey the filtering laws: those types can be viewed as + obey the filtering laws: those types are \begin_inset Quotes eld \end_inset @@ -3153,7 +3126,7 @@ scalacheck \end_inset -2 to implement randomized tests for the four filtering laws: + to implement randomized tests for the four filtering laws: \begin_inset listings inline false status open @@ -3306,7 +3279,7 @@ name "subsec:Examples-of-non-filterable-functors" \end_layout \begin_layout Standard -As usual with typeclasses, the code of the +As usual with Scala typeclasses, the code of the \begin_inset listings inline true status open @@ -3528,7 +3501,7 @@ res2: Orders[String] = Orders(Some("Amount: 500"), Some("Amount: 2000")) \end_inset -This computation violates the partial function law because the value +This computation violates the partial function law: the value \begin_inset listings inline true status open @@ -3586,7 +3559,8 @@ x = 2000 \end_inset - from the data structure in that case. + from the data structure in case there is another value that passes the + predicate. \end_layout \begin_layout Standard @@ -3630,8 +3604,8 @@ Orders is not a filtering operation. For instance, applying two order approvals one after another will not give the intuitively expected results. - Nevertheless, this may be acceptable in applications where only one order - approval is ever applied. + Nevertheless, this may be acceptable in applications where one order approval + is never applied after another. \end_layout \begin_layout Standard @@ -3830,8 +3804,8 @@ implicit val wrongFilterableOption = new Filterable[Option] { \end_inset -This code discards information and violates the identity law: the result - of filtering with an identically +This code discards information and violates the identity law: the filtering + with an identically \begin_inset listings inline true status open @@ -3843,7 +3817,11 @@ true \end_inset - predicate is not the identity function of type + predicate is +\emph on +not +\emph default + the identity function of type \begin_inset listings inline true status open @@ -3900,7 +3878,7 @@ filter \end_inset - function must be fully parametric and must not use hard-coded values of + function must be fully parametric and may not use hard-coded values of specific types or make decisions based on specific types. \end_layout @@ -3921,7 +3899,8 @@ Boolean \begin_inset Formula $\bbnum 2\cong\bbnum 1+\bbnum 1$ \end_inset -; in other words, this type can be expressed via the basic type constructions +. + In other words, that type can be expressed via the basic type constructions (disjunction and the \begin_inset listings inline true @@ -3986,11 +3965,8 @@ noprefix "false" \end_layout \begin_layout Standard -A cluster has -\begin_inset Formula $2$ -\end_inset - - servers; each server needs to have valid credentials, which expire periodically. +A cluster has two servers; each server needs to have valid credentials, + which expire periodically. If credentials expire for one server, it may copy valid credentials from the other server. If no server has valid credentials, the cluster is down. @@ -4076,7 +4052,15 @@ In line \end_inset 4, we need to compute a value of type -\begin_inset Formula $F^{A}$ +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +F[A] +\end_layout + \end_inset using the given values @@ -4216,7 +4200,20 @@ None \end_layout \begin_layout Standard -Looking at the business requirements, we see that +We may describe the validity of credentials by a predicate +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +p: A => Boolean +\end_layout + +\end_inset + +. + If \begin_inset listings inline true status open @@ -4228,7 +4225,7 @@ p(a1) == false \end_inset - means the first server's credentials expired. + then the first server's credentials expired. In that case, if \begin_inset listings inline true @@ -4241,7 +4238,7 @@ p(a2) == true \end_inset -, the first server copies the second server's valid credentials, +, the first server will copy the second server's valid credentials ( \begin_inset listings inline true status open @@ -4253,7 +4250,7 @@ a2 \end_inset -. +). So, we must return \begin_inset listings inline true @@ -4280,8 +4277,7 @@ def filter[A](p: A => Boolean): F[A] => F[A] = { \begin_layout Plain Layout - case None => None // Cluster is down, - no valid credentials. + case None => None // No credentials to validate. \end_layout \begin_layout Plain Layout @@ -4477,7 +4473,7 @@ status open \begin_layout Plain Layout -filter(f andThen p) = { +filter(f andThen p) == { \end_layout \begin_layout Plain Layout @@ -4517,7 +4513,7 @@ filter(f andThen p) = { \begin_layout Plain Layout -} andThen fmap(f) = { +} andThen fmap(f) == { \end_layout \begin_layout Plain Layout @@ -4579,9 +4575,9 @@ filter \end_inset - obey the naturality law: such functions manipulate their arguments purely - as symbols of unknown types, without referring to any specific types or - values. + obey the naturality law. + Such functions manipulate their arguments purely as symbols of unknown + types, without referring to any specific types or values. Applying a lifted function \begin_inset Formula $f^{\uparrow F}$ \end_inset @@ -4845,11 +4841,7 @@ It takes a bit more work to show that the composition law holds. \end_layout \begin_layout Standard -In the first case, the value of -\begin_inset Formula $F^{A}$ -\end_inset - - is +In the first case, the value is \begin_inset listings inline true status open @@ -4861,7 +4853,11 @@ None \end_inset - and remains + (denoted by +\begin_inset Formula $1+\bbnum 0^{:A\times A}$ +\end_inset + +) and remains \begin_inset listings inline true status open @@ -5583,7 +5579,7 @@ status open \begin_layout Plain Layout \size small -\begin_inset Formula $\bbnum 1+\bbnum 0^{:A\times A}$ +\begin_inset Formula $1+\bbnum 0^{:A\times A}$ \end_inset @@ -5597,7 +5593,7 @@ status open \begin_layout Plain Layout \size small -\begin_inset Formula $\bbnum 1+\bbnum 0^{:A\times A}$ +\begin_inset Formula $1+\bbnum 0^{:A\times A}$ \end_inset @@ -5759,7 +5755,7 @@ false \begin_layout Plain Layout \size small -\begin_inset Formula $\bbnum 1+\bbnum 0^{:A\times A}$ +\begin_inset Formula $1+\bbnum 0^{:A\times A}$ \end_inset @@ -5773,7 +5769,7 @@ false \begin_layout Plain Layout \size small -\begin_inset Formula $\bbnum 1+\bbnum 0^{:A\times A}$ +\begin_inset Formula $1+\bbnum 0^{:A\times A}$ \end_inset @@ -5921,7 +5917,7 @@ false \begin_layout Plain Layout \size small -\begin_inset Formula $\bbnum 1+\bbnum 0^{:A\times A}$ +\begin_inset Formula $1+\bbnum 0^{:A\times A}$ \end_inset @@ -5935,7 +5931,7 @@ false \begin_layout Plain Layout \size small -\begin_inset Formula $\bbnum 1+\bbnum 0^{:A\times A}$ +\begin_inset Formula $1+\bbnum 0^{:A\times A}$ \end_inset @@ -5949,7 +5945,7 @@ false \begin_layout Plain Layout \size small -\begin_inset Formula $\bbnum 1+\bbnum 0^{:A\times A}$ +\begin_inset Formula $1+\bbnum 0^{:A\times A}$ \end_inset @@ -6075,7 +6071,8 @@ filter \end_inset function will need to be changed. - For instance, suppose the first server is now the only source of credentials. + For instance, suppose the first server becomes the only source of valid + credentials. The second server may copy the first server's credentials if needed, but the cluster will go down whenever the first server's credentials expire. This corresponds to the code: @@ -6129,7 +6126,7 @@ def filter[A](p: A => Boolean): F[A] => F[A] = { \end_inset -Alternatively, we may have a requirement that credentials +Alternatively, we may get a new requirement that credentials \emph on cannot \emph default @@ -6628,7 +6625,20 @@ final case class Server[A](requests: Seq[A]) \end_inset -The filtering operation truncates the sequence when the predicate +Suppose a predicate +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +p: A => Boolean +\end_layout + +\end_inset + + checks the authentication. + The filtering operation truncates the sequence when the predicate \begin_inset listings inline true status open @@ -6719,11 +6729,11 @@ p . The filtering function also removes other values that may or may not fail - the predicate, but the filtering laws allow removing + the predicate, but the filtering laws do not forbid removing \emph on more \emph default - values. + values than strictly necessary. \end_layout \begin_layout Subsubsection @@ -6769,7 +6779,7 @@ Filterable (a) \series default The functor -\begin_inset Formula $F^{T}$ +\begin_inset Formula $F$ \end_inset defined by the Scala code: @@ -7337,7 +7347,7 @@ NEList \end_inset . - So, we can implement + So, we may implement \begin_inset listings inline true status open @@ -7349,7 +7359,7 @@ filter \end_inset - e.g., like this: + like this: \begin_inset listings inline false status open @@ -7805,7 +7815,7 @@ Filterable \end_inset , where -\begin_inset Formula $Q^{\bullet,\bullet}$ +\begin_inset Formula $Q$ \end_inset is defined by this Scala code: @@ -7834,7 +7844,7 @@ final case class Q[A, Z](id: Long, user1: Option[(A, Z)], user2: Option[(A, (b) \series default The functor -\begin_inset Formula $R^{A}$ +\begin_inset Formula $R$ \end_inset defined by the Scala code: @@ -7920,7 +7930,7 @@ noprefix "false" (e) \series default The functor -\begin_inset Formula $\text{Tree22}^{A}$ +\begin_inset Formula $\text{Tree22}$ \end_inset defined recursively as: @@ -7956,8 +7966,8 @@ noprefix "false" \end_layout \begin_layout Standard -Is the simplest perfect-shaped tree -\begin_inset Formula $R^{A}$ +Is the perfect-shaped tree +\begin_inset Formula $R$ \end_inset defined by @@ -7987,7 +7997,7 @@ perfect-shaped tree \end_inset tree -\begin_inset Formula $R^{A}$ +\begin_inset Formula $R$ \end_inset defined by: @@ -8324,7 +8334,7 @@ filter \end_inset - method defined for the + method of \begin_inset listings inline true status open @@ -8332,12 +8342,12 @@ status open \begin_layout Plain Layout \noindent -Option[_] +Option \end_layout \end_inset - types. +. \end_layout @@ -8346,11 +8356,12 @@ We notice that both functions in the composition \begin_inset Formula $\text{inflate}\bef(\text{filt}_{\text{Opt}}(p))^{\uparrow F}$ \end_inset - are some lifted functions in the functor + are lifted to the functor \begin_inset Formula $F$ \end_inset -, and so we can simplify that composition to a single lifted function: +. + So, we can simplify that composition to a single lifted function: \begin_inset Formula \begin{align*} & \gunderline{\text{inflate}}\bef(\text{filt}_{\text{Opt}}(p))^{\uparrow F}\\ @@ -8611,7 +8622,7 @@ def deflate[F[_]: Filterable : Functor, A]: F[Option[A]] => F[A] = \begin_inset Formula \begin{align} - & \text{deflate}:\xymatrix{\xyScaleX{4.5pc}F^{\bbnum 1+A}\ar[r]\sp(0.5){\text{filt}_{F}(\text{nonEmpty)}} & F^{\bbnum 1+A}\ar[r]\sp(0.5){\text{get}^{\uparrow F}} & F^{A}} + & \text{deflate}:\xymatrix{\xyScaleX{7.0pc}F^{\bbnum 1+A}\ar[r]\sp(0.5){\text{filt}_{F}(\text{nonEmpty)}} & F^{\bbnum 1+A}\ar[r]\sp(0.5){\text{get}^{\uparrow F}} & F^{A}} \nonumber \\ & \text{deflate}^{:F^{\bbnum 1+A}\rightarrow F^{A}}=\text{filt}_{F}(\text{nonEmpty})\bef\text{get}^{\uparrow F}\quad.\label{eq:def-deflate-via-filter} \end{align} @@ -9290,7 +9301,7 @@ deflate An immediate problem is that we need to map all disjunctive cases, including -\begin_inset Formula $\bbnum 1+0+0$ +\begin_inset Formula $1+\bbnum 0+\bbnum 0$ \end_inset , into a value of type @@ -9318,11 +9329,7 @@ deflate \begin_inset Formula $A$ \end_inset - from scratch, -\begin_inset Formula $\forall A.\,\bbnum 1\rightarrow A$ -\end_inset - -, which is impossible in a fully parametric function. + from a unit value, which is impossible in a fully parametric function. Since \begin_inset listings inline true @@ -9340,6 +9347,11 @@ deflate \end_inset is not filterable. + +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -9671,7 +9683,19 @@ We need to show that \end_inset is the same as -\begin_inset Formula $\text{filter}^{\prime}(p)$ +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +filter +\end_layout + +\end_inset + + +\begin_inset Formula $^{\prime}(p)$ \end_inset for any predicate @@ -9688,8 +9712,7 @@ We need to show that \end_inset -To proceed with the calculation, we need to simplify the two expressions - +To proceed with the calculation, we need to simplify the expressions \begin_inset Formula $\psi_{p}\bef\text{get}$ \end_inset @@ -9698,7 +9721,7 @@ To proceed with the calculation, we need to simplify the two expressions \end_inset . - Begin with writing the code for the standard methods + Begin by writing the code for the standard methods \begin_inset listings inline true status open @@ -9762,7 +9785,7 @@ status open \begin_layout Plain Layout -//Use Option[Unit] instead of Boolean, as Option[Unit] $ +// Use Option[Unit] instead of Boolean, as Option[Unit] $ \backslash color{dkgreen} \backslash @@ -9826,7 +9849,7 @@ noprefix "false" \begin_inset Formula $\psi$ \end_inset - is also fully parametric because we can implement using the type + is also fully parametric because we can implement it using the type \begin_inset listings inline true status open @@ -10232,8 +10255,8 @@ deflate \end_inset -The derivation is stuck here: we cannot simplify the last expression unless - we can somehow switch the order of function compositions so that +The derivation is stuck here: we cannot prove the last equality unless we + somehow switch the order of function compositions, so that \begin_inset Formula $\psi_{\text{nonEmpty}}^{\uparrow F}$ \end_inset @@ -10874,7 +10897,7 @@ noprefix "false" \end_layout \begin_layout Standard -The partial function law always holds for the +The partial function law always holds for a \begin_inset listings inline true status open @@ -11012,7 +11035,7 @@ It remains to show that \begin_inset Formula $\psi_{p}\bef f^{\uparrow\text{Opt}}$ \end_inset - to an + to an arbitrary value \begin_inset Formula $x^{:A}$ \end_inset @@ -11375,7 +11398,31 @@ The two sides will be equal if we prove that \begin_inset Formula $f^{\uparrow\text{Opt}}\bef\text{get}=\text{get}\bef f$ \end_inset -, which can be viewed as the two naturality laws specific to these functions. +, which can be viewed as the naturality laws specific to the functions +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +nonEmpty +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +get +\end_layout + +\end_inset + +. Use the definitions \begin_inset space ~ \end_inset @@ -11383,7 +11430,7 @@ The two sides will be equal if we prove that ( \begin_inset CommandInset ref LatexCommand ref -reference "eq:def-of-get-option" +reference "eq:def-of-nonempty-option" plural "false" caps "false" noprefix "false" @@ -11397,7 +11444,7 @@ noprefix "false" ( \begin_inset CommandInset ref LatexCommand ref -reference "eq:def-of-nonempty-option" +reference "eq:def-of-get-option" plural "false" caps "false" noprefix "false" @@ -11411,7 +11458,7 @@ status open \begin_layout Plain Layout -get +nonEmpty \end_layout \end_inset @@ -11423,7 +11470,7 @@ status open \begin_layout Plain Layout -nonEmpty +get \end_layout \end_inset @@ -11583,7 +11630,7 @@ deflate \end_inset - and the function + and if \begin_inset listings inline true status open @@ -11655,7 +11702,7 @@ Proof \end_layout \begin_layout Standard -Begin by writing the two sides of the naturality law +Begin by writing the two sides of the law \begin_inset space ~ \end_inset @@ -11765,7 +11812,7 @@ noprefix "false" \end_inset -) by applying both sides to an +) by applying both sides to an arbitrary value \begin_inset Formula $x^{:A}$ \end_inset @@ -12223,7 +12270,19 @@ noprefix "false" \end_inset -), assuming that a naturality law (Eq. +), assuming that +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +liftOpt +\end_layout + +\end_inset + + obeys a naturality law (Eq. \begin_inset space ~ \end_inset @@ -12237,19 +12296,7 @@ noprefix "false" \end_inset -) below) holds for -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -liftOpt -\end_layout - -\end_inset - -. +) below). \end_layout \begin_layout Subparagraph @@ -12408,7 +12455,21 @@ liftOpt \end_inset - function, compute a + function that obeys Eq. +\begin_inset space ~ +\end_inset + +( +\begin_inset CommandInset ref +LatexCommand ref +reference "eq:left-naturality-law-of-liftOpt" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +), compute a \begin_inset listings inline true status open @@ -12493,21 +12554,7 @@ liftOpt \end_inset -, assuming Eq. -\begin_inset space ~ -\end_inset - -( -\begin_inset CommandInset ref -LatexCommand ref -reference "eq:left-naturality-law-of-liftOpt" -plural "false" -caps "false" -noprefix "false" - -\end_inset - -). +. \end_layout \begin_layout Standard @@ -12808,6 +12855,10 @@ noprefix "false" \end_inset +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -13208,7 +13259,7 @@ The function \begin_inset Formula $x^{:A}\rightarrow\bbnum 0+x$ \end_inset - (i.e., + (in Scala, this is \begin_inset listings inline true status open @@ -13220,7 +13271,7 @@ x => Some(x) \end_inset - in Scala): +): \begin_inset Formula \begin{align} \text{use Eq.~(\ref{eq:def-of-psi})}:\quad & x^{:A}\triangleright\psi_{(\_\rightarrow\text{true})}\nonumber \\ @@ -13482,7 +13533,7 @@ Proof \series bold (a) \series default - Compute the identity law of + Verify the identity law of \begin_inset listings inline true status open @@ -13515,7 +13566,7 @@ liftOpt \series bold (b) \series default - Compute the identity law of + Verify the identity law of \begin_inset listings inline true status open @@ -13551,7 +13602,11 @@ noprefix "false" \end_inset -This completes the proof. + +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -13590,7 +13645,7 @@ status open \begin_layout Plain Layout -Option[_] +Option \end_layout \end_inset @@ -14063,32 +14118,24 @@ status open \begin_layout Plain Layout -Option +flatten \end_layout \end_inset -'s method +: \begin_inset listings -inline true +inline false status open \begin_layout Plain Layout -flatten +psi(p) == x => Some(x).filter(p1).map { y => Some(y).filter(p2) }.flatten \end_layout -\end_inset - -: -\begin_inset listings -inline false -status open - \begin_layout Plain Layout -psi(p) == x => Some(x).filter(p1).map { y => Some(y).filter(p2) }.flatten // - Use flatMap instead. + // Use flatMap instead. \end_layout \begin_layout Plain Layout @@ -14098,8 +14145,8 @@ psi(p) == x => Some(x).filter(p1).map { y => Some(y).filter(p2) }.flatten // \begin_layout Plain Layout - == psi(p1) andThen (_.flatMap(psi(p2))) // Using standard flatten - and flatMap for Option. + == psi(p1) andThen (_.flatMap(psi(p2))) // Use the standard methods + (flatten and flatMap) for Option. \end_layout \end_inset @@ -14116,7 +14163,7 @@ Denote this combination of the functions \begin_inset Formula $\diamond_{_{\text{Opt}}}$ \end_inset -, so that we may write: +: \begin_inset Formula \[ \psi_{p}=\psi_{p_{1}}\diamond_{_{\text{Opt}}}\psi_{p_{2}}\triangleq x^{:A}\rightarrow x\triangleright\psi_{p_{1}}\triangleright\text{flm}_{\text{Opt}}(\psi_{p_{2}})=\psi_{p_{1}}\bef(y\rightarrow y\triangleright\text{flm}_{\text{Opt}}(\psi_{p_{2}}))\quad. @@ -14341,7 +14388,7 @@ twisted \end_inset . - (The + The \begin_inset Quotes eld \end_inset @@ -14349,11 +14396,11 @@ twisted \begin_inset Quotes erd \end_inset - functions cannot be composed as + functions cannot be composed via the ordinary composition operation ( \begin_inset Formula $f\bef g$ \end_inset - because their types do not match.) +) because the types would not match. \end_layout \begin_layout Standard @@ -14888,19 +14935,15 @@ f^{:A\rightarrow\bbnum 1+B}\diamond_{_{\text{Opt}}}g^{:B\rightarrow\bbnum 1+C}\t \end_inset -Then we compute +We use this definition to compute \begin_inset Formula $f\diamond_{_{\text{Opt}}}g$ \end_inset - using this definition of -\begin_inset Formula $\diamond_{_{\text{Opt}}}$ -\end_inset - - as: +: \begin_inset Formula \begin{align*} \text{definition of }f:\quad & \gunderline f\diamond_{_{\text{Opt}}}g=(h\bef\text{pu}_{\text{Opt}})\,\gunderline{\diamond_{_{\text{Opt}}}}\,g\\ - & =h\bef\gunderline{\text{pu}_{\text{Opt}}\bef\text{flm}_{\text{Opt}}}(g)\\ +\text{use Eq.~(\ref{eq:def-of-Kleisli-product})}:\quad & =h\bef\gunderline{\text{pu}_{\text{Opt}}\bef\text{flm}_{\text{Opt}}}(g)\\ \text{compute composition (see below)}:\quad & =h\bef g\quad. \end{align*} @@ -14942,7 +14985,7 @@ Option \end_inset - is not obvious, but it turns out that + is perhaps not obvious, but it turns out that \begin_inset Formula $\text{pu}_{\text{Opt}}$ \end_inset @@ -14992,7 +15035,11 @@ pure(x).flatMap(g) == Some(x).flatMap(g) == g(x) \end_inset -The same symbolic computation is written in the code notation like this: + +\end_layout + +\begin_layout Standard +Now we write the same symbolic computation in the code notation: \begin_inset Formula \begin{align} \text{pu}_{\text{Opt}}=\,\begin{array}{|c||cc|} @@ -15762,7 +15809,7 @@ name "subsec:Constructions-of-filterable-functors" \begin_layout Standard How can we recognize a filterable functor -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset by its type expression, without having to prove laws? One intuition is @@ -15801,7 +15848,7 @@ status open \begin_layout Plain Layout -Option[_] +Option \end_layout \end_inset @@ -15813,7 +15860,7 @@ status open \begin_layout Plain Layout -Either[L, _] +Either \end_layout \end_inset @@ -15825,7 +15872,7 @@ status open \begin_layout Plain Layout -Try[_] +Try \end_layout \end_inset @@ -15837,7 +15884,7 @@ status open \begin_layout Plain Layout -Seq[_] +Seq \end_layout \end_inset @@ -15849,7 +15896,7 @@ status open \begin_layout Plain Layout -Map[K, _] +Map \end_layout \end_inset @@ -15918,7 +15965,11 @@ There are three constructions that work solely by manipulating type parameters: \begin_inset Formula $F^{A}\triangleq G^{H^{A}}$ \end_inset -. + (or +\begin_inset Formula $F\triangleq G\circ H$ +\end_inset + +). \end_layout \begin_layout Standard @@ -15990,6 +16041,7 @@ wrapper \end_inset . + We write: \begin_inset Formula \begin{align*} \text{verify law~(\ref{eq:combined-naturality-identity-law-of-liftOpt})}:\quad & \text{liftOpt}_{\text{Const}}(f\bef\text{pu}_{\text{Opt}})=\text{id}=f^{\uparrow\text{Const}}\quad,\\ @@ -16007,7 +16059,11 @@ The functor composition \begin_inset Formula $F^{A}\triangleq G^{H^{A}}$ \end_inset - requires only + requires +\emph on +only +\emph default + \begin_inset Formula $H$ \end_inset @@ -16041,11 +16097,11 @@ The functor \end_inset is filterable when -\begin_inset Formula $H^{A}$ +\begin_inset Formula $H$ \end_inset is filterable and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset is @@ -16151,7 +16207,7 @@ Products \begin_layout Standard To show that the product of two filterable functors is filterable, we will use a definition of -\begin_inset Formula $\text{liftOpt}_{G^{\bullet}\times H^{\bullet}}$ +\begin_inset Formula $\text{liftOpt}_{G\times H}$ \end_inset and a proof quite similar to what we did for the product of functors (Statement @@ -16208,11 +16264,11 @@ The functor \end_inset is filterable if -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are filterable functors. @@ -16303,7 +16359,7 @@ noprefix "false" \end_inset -In this calculation, we used the composition property: +In this calculation, we used the distributive property: \begin_inset Formula \begin{equation} (f\boxtimes g)\bef(p\boxtimes q)=(f\bef p)\boxtimes(g\bef q)\quad,\label{eq:function-product-distributive-property-over-composition} @@ -16315,7 +16371,7 @@ which follows from the definition of the pair product operation \begin_inset Formula $\boxtimes$ \end_inset -, +: \begin_inset Formula \begin{align*} & (f\boxtimes g)\bef(p\boxtimes q)\\ @@ -16342,11 +16398,11 @@ There are two constructions that produce new filterable functors involving \end_inset , where -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are filterable functors. @@ -16369,8 +16425,8 @@ noprefix "false" \begin_inset Formula $F^{A}\triangleq\bbnum 1+A\times G^{A}$ \end_inset - where -\begin_inset Formula $G^{\bullet}$ +, where +\begin_inset Formula $G$ \end_inset is a filterable functor. @@ -16378,7 +16434,7 @@ noprefix "false" \begin_inset Formula $A\times G^{A}$ \end_inset - is not filterable. + is never filterable. \end_layout \begin_layout Subsubsection @@ -16476,7 +16532,7 @@ H^{A} & \bbnum 0 & \text{liftOpt}_{H}(f) \end_inset Lifting to the functor -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset is defined as in Statement @@ -16608,15 +16664,15 @@ noprefix "false" \end_layout \begin_layout Standard -The functor -\begin_inset Formula $F^{A}\triangleq\bbnum 1+A\times G^{A}$ +If +\begin_inset Formula $G$ \end_inset - is filterable if -\begin_inset Formula $G$ + is a filterable functor then +\begin_inset Formula $F^{A}\triangleq\bbnum 1+A\times G^{A}$ \end_inset - is a filterable functor. + is filterable . \end_layout \begin_layout Subparagraph @@ -17099,7 +17155,7 @@ liftOpt 's composition law: \begin_inset Formula \begin{equation} -\text{liftOpt}_{F}(f)\bef\text{liftOpt}_{F}(f^{\prime})=\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}f^{\prime})\quad.\label{eq:liftOpt-composition-law-derivation1} +\text{liftOpt}_{F}(f)\bef\text{liftOpt}_{F}(g)=\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}g)\quad.\label{eq:liftOpt-composition-law-derivation1} \end{equation} \end_inset @@ -17278,7 +17334,7 @@ liftOpt 's composition law: \begin_inset Formula \begin{equation} -\text{flm}_{\text{Opt}}(f)\bef\text{flm}_{\text{Opt}}(f^{\prime})=\text{flm}_{\text{Opt}}(f\diamond_{_{\text{Opt}}}f^{\prime})=\text{flm}_{\text{Opt}}\big(f\bef\text{flm}_{\text{Opt}}(f^{\prime})\big)\quad.\label{eq:associativity-law-of-flatMap-for-Option} +\text{flm}_{\text{Opt}}(f)\bef\text{flm}_{\text{Opt}}(g)=\text{flm}_{\text{Opt}}(f\diamond_{_{\text{Opt}}}g)=\text{flm}_{\text{Opt}}\big(f\bef\text{flm}_{\text{Opt}}(g)\big)\quad.\label{eq:associativity-law-of-flatMap-for-Option} \end{equation} \end_inset @@ -17531,7 +17587,7 @@ flatMap \end_inset -The typed holes must be filled using the only available data (the functions +The typed holes must be filled using the only available data — the functions \begin_inset Formula $p$ \end_inset @@ -17540,7 +17596,7 @@ The typed holes must be filled using the only available data (the functions \begin_inset Formula $q$ \end_inset -): +: \begin_inset Formula \begin{align} & (p^{:A\rightarrow B})^{\uparrow\text{Opt}}\bef\text{flm}_{\text{Opt}}(q^{:B\rightarrow\text{Opt}^{C}})=\text{flm}_{\text{Opt}}(p\bef q)\quad,\label{eq:left-naturality-flatmap-option}\\ @@ -17549,7 +17605,7 @@ The typed holes must be filled using the only available data (the functions \end_inset -We omit the proofs for these +The last two equations are the \series bold naturality laws \series default @@ -17581,8 +17637,34 @@ flatMap \end_inset -. - With them, we transform Eqs. + for +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Option +\end_layout + +\end_inset + + (see Exercise +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand ref +reference "subsec:Exercise-filterable-laws-2-1" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +). + Using those laws, we transform Eqs. \begin_inset space ~ \end_inset @@ -17626,8 +17708,8 @@ and: \end_inset -The difference between sub-expressions has become smaller; it just remains - to show the following: +The difference between sub-expressions has become smaller. + It remains to show the following: \begin_inset Formula \[ r_{f^{\prime},\text{liftOpt}_{G}(f)(g)}\overset{?}{=}r_{f\bef\text{flm}_{\text{Opt}}(f^{\prime}),g}\quad. @@ -17717,7 +17799,7 @@ noprefix "false" needs to pass the filter for any data to remain in the functor after filtering. We can use the same construction repeatedly with -\begin_inset Formula $G^{\bullet}\triangleq\bbnum 1$ +\begin_inset Formula $G^{A}\triangleq\bbnum 1$ \end_inset and obtain the type: @@ -17734,14 +17816,14 @@ which is equivalent to a list of up to elements. The construction defines a filtering operation for -\begin_inset Formula $L_{n}^{\bullet}$ +\begin_inset Formula $L_{n}$ \end_inset that will delete any data beyond the first value of type \begin_inset Formula $A$ \end_inset - that does fails the predicate. + that fails the predicate. It is clear that this filtering operation implements the standard \begin_inset listings inline true @@ -17901,7 +17983,7 @@ noprefix "false" \end_inset , require -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset to be a @@ -17914,7 +17996,7 @@ contrafunctor \end_inset is filterable only if the contrafunctor -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset has certain properties (Eqs. @@ -18003,11 +18085,16 @@ Assume that \begin_inset Formula $H^{A}$ \end_inset -; that is only possible if we apply +. + That is only possible if we apply \begin_inset Formula $p^{:G^{A}\rightarrow H^{A}}$ \end_inset -, + to something of type +\begin_inset Formula $G^{A}$ +\end_inset + +: \begin_inset Formula \[ \text{liftOpt}_{F}(f)=p^{:G^{A}\rightarrow H^{A}}\rightarrow g^{:G^{B}}\rightarrow\text{liftOpt}_{H}(f)(p(\text{???}^{:G^{A}}))\quad. @@ -18033,7 +18120,11 @@ The only way to proceed is to have a function \end_inset . - So, we need to require having a function: + We need to have a function +\begin_inset Formula $\text{liftOpt}_{G}$ +\end_inset + + with this type signature: \begin_inset Formula \begin{equation} \text{liftOpt}_{G}(f^{:A\rightarrow\bbnum 1+B}):G^{B}\rightarrow G^{A}\quad.\label{eq:type-signature-liftOpt-contrafunctors} @@ -18062,7 +18153,11 @@ liftOpt \end_inset ). - We can now complete the implementation of + Assuming that +\begin_inset Formula $\text{liftOpt}_{G}$ +\end_inset + + is available, we can now complete the implementation of \begin_inset Formula $\text{liftOpt}_{F}$ \end_inset @@ -18070,10 +18165,10 @@ liftOpt \begin_inset Formula \begin{align} & \text{liftOpt}_{F}(f^{:A\rightarrow\bbnum 1+B})\triangleq p^{:G^{A}\rightarrow H^{A}}\rightarrow g^{:G^{B}}\rightarrow\gunderline{\text{liftOpt}_{H}(f)\big(p(\text{\text{liftOpt}}_{G}(f)(g))\big)}\nonumber \\ - & \quad\triangleright\text{-notation}:\quad\nonumber \\ - & =p^{:G^{A}\rightarrow H^{A}}\rightarrow\gunderline{g^{:G^{B}}\rightarrow g\,\triangleright}\,\text{\text{liftOpt}}_{G}(f)\triangleright p\triangleright\text{liftOpt}_{H}(f)\\ - & \quad\text{omit }(g\rightarrow g\,\triangleright):\quad\label{eq:def-of-liftopt-function-type}\\ - & =p\rightarrow\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\quad. + & \quad\text{pipe notation}:\quad\nonumber \\ + & =p^{:G^{A}\rightarrow H^{A}}\rightarrow\gunderline{g^{:G^{B}}\rightarrow g\,\triangleright}\,\text{\text{liftOpt}}_{G}(f)\triangleright p\triangleright\text{liftOpt}_{H}(f)\nonumber \\ + & \quad\text{omit }(g\rightarrow g\,\triangleright):\quad\nonumber \\ + & =p\rightarrow\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\quad.\label{eq:def-of-liftopt-function-type} \end{align} \end_inset @@ -18128,20 +18223,19 @@ map \end_layout \begin_layout Standard -The laws for filterable contrafunctors are chosen such that +The laws for filterable contrafunctors ensure that \begin_inset Formula $F^{A}\triangleq G^{A}\rightarrow H^{A}$ \end_inset - can be shown to obey filtering laws when -\begin_inset Formula $H^{\bullet}$ + obeys filtering laws when +\begin_inset Formula $H$ \end_inset is a filterable functor and -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset - is a filterable contrafunctor. - + is a filterable contrafunctor: \end_layout \begin_layout Subsubsection @@ -18167,11 +18261,11 @@ noprefix "false" \begin_layout Standard Assume that -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is a lawful filterable functor and -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is a contrafunctor with a function @@ -18229,7 +18323,7 @@ Proof \end_layout \begin_layout Standard -We will arrive at the required laws for +We will find the required laws for \begin_inset Formula $G$ \end_inset @@ -18242,11 +18336,11 @@ We will arrive at the required laws for \begin_layout Standard Because -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset - has a function type, it is convenient to apply both sides of the laws to - an arbitrary value + contains a function type, it is convenient to apply both sides of the laws + to an arbitrary value \begin_inset Formula $p^{:G^{A}\rightarrow H^{A}}$ \end_inset @@ -18258,7 +18352,7 @@ Because : \begin_inset Formula \begin{align*} -\text{expect to equal }p\triangleright f^{\uparrow F}=f^{\downarrow G}\bef p\bef f^{\uparrow H}:\quad & p\triangleright\text{liftOpt}_{F}(f\bef\text{pu}_{\text{Opt}})\\ +\text{expect to equal }f^{\downarrow G}\bef p\bef f^{\uparrow H}:\quad & p\triangleright\text{liftOpt}_{F}(f\bef\text{pu}_{\text{Opt}})\\ \text{definition~(\ref{eq:def-of-liftopt-function-type}) of }\text{liftOpt}_{F}:\quad & =\text{\text{liftOpt}}_{G}(f\bef\text{pu}_{\text{Opt}})\bef p\bef\gunderline{\text{liftOpt}_{H}(f\bef\text{pu}_{\text{Opt}})}\\ \text{naturality-identity law of }\text{liftOpt}_{H}:\quad & =\text{\text{liftOpt}}_{G}(f\bef\text{pu}_{\text{Opt}})\bef p\bef\gunderline{f^{\uparrow H}}\quad. \end{align*} @@ -18310,8 +18404,8 @@ The composition law of & \quad\text{left-hand side of Eq.~(\ref{eq:composition-law-of-liftOpt}) for }F:\quad\\ & p\triangleright\text{liftOpt}_{F}(f)\bef\text{liftOpt}_{F}(g)=\gunderline{p\triangleright\text{liftOpt}_{F}(f)}\triangleright\text{liftOpt}_{F}(g)\\ & \quad\text{definition~(\ref{eq:def-of-liftopt-function-type}) of }\text{liftOpt}_{F}:\quad\\ - & =\big(\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\big)\,\gunderline{\triangleright\text{liftOpt}_{F}(g)}\\ - & \quad\text{definition~(\ref{eq:def-of-liftopt-function-type})}:\quad\\ + & =\big(\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\big)\,\gunderline{\triangleright\,\text{liftOpt}_{F}(g)}\\ + & \quad\text{again definition~(\ref{eq:def-of-liftopt-function-type})}:\quad\\ & =\text{\text{liftOpt}}_{G}(g)\bef\big(\text{\text{liftOpt}}_{G}(f)\bef p\bef\gunderline{\text{liftOpt}_{H}(f)\big)\bef\text{liftOpt}_{H}(g)}\\ & \quad\text{composition law~(\ref{eq:composition-law-of-liftOpt}) of }\text{liftOpt}_{H}:\quad\\ & =\text{\text{liftOpt}}_{G}(g)\bef\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f\diamond_{_{\text{Opt}}}g)\quad. @@ -18437,7 +18531,7 @@ noprefix "false" \end_inset ) that requires a bifunctor -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset . @@ -18466,11 +18560,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is a filterable functor, the recursive functor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset defined by: @@ -18482,7 +18576,7 @@ F^{A}\triangleq G^{A}+A\times F^{A} \end_inset is filterable. - With + When \begin_inset Formula $G^{A}\triangleq\bbnum 1$ \end_inset @@ -18521,8 +18615,7 @@ status open \begin_layout Plain Layout -sealed trait F[A] // Assume that the functor - G was defined previously. +sealed trait F[A] // Assume that the functor G was defined previously. \end_layout \begin_layout Plain Layout @@ -18537,8 +18630,7 @@ final case class FAF[A](a: A, rf: F[A]) extends F[A] \begin_layout Plain Layout - // Assume that liftOpt_G is available - and define liftOpt_F: + // Assume that liftOpt_G is available and define liftOpt_F: \end_layout \begin_layout Plain Layout @@ -18553,13 +18645,12 @@ def liftOpt_F[A, B](f: A => Option[B]): F[A] => F[B] = { \begin_layout Plain Layout - case FAF(a, rf) => f(a) match { // Does `a` pass - the filtering predicate? + case FAF(a, rf) => f(a) match { // Does `a` pass the filtering predicate? \end_layout \begin_layout Plain Layout - case None => liftOpt_F(f)(rf) // No. + case None => liftOpt_F(f)(rf) // No. Drop `a` and filter `rf` recursively. \end_layout @@ -18676,7 +18767,7 @@ The disjunctive type \end_inset , so both laws are satisfied since -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is a lawful filterable functor. @@ -18723,11 +18814,7 @@ noprefix "false" \end_inset -) of -\begin_inset Formula $\text{liftOpt}_{F}$ -\end_inset - -, begin with the left-hand side: +), begin with the left-hand side: \begin_inset Formula \begin{align*} & (\bbnum 0+a\times r)\triangleright\text{liftOpt}_{F}(\gunderline{f\bef\text{pu}_{\text{Opt}}})\\ @@ -18801,7 +18888,7 @@ noprefix "false" \text{use Eq.~(\ref{eq:expression-liftOpt-derivation2})}:\quad & =f(a)\triangleright\,\begin{array}{||c|} 1\rightarrow r\triangleright\overline{\text{liftOpt}_{F}}(f)\\ b^{:B}\rightarrow\bbnum 0+\left(b\times r\right)\triangleright\overline{\text{liftOpt}_{F}}(f) -\end{array}\,\,\gunderline{\triangleright\,\text{liftOpt}_{F}(g)} +\end{array}\,\,\,\gunderline{\triangleright\,\text{liftOpt}_{F}(g)} \end{align*} \end_inset @@ -18822,7 +18909,7 @@ b^{:B}\rightarrow g(b)\triangleright\,\begin{array}{||c|} c^{:C}\rightarrow\bbnum 0+\left(c\times r\right)\triangleright\overline{\text{liftOpt}_{F}}(f)\bef\overline{\text{liftOpt}_{F}}(g) \end{array} \end{array}\nonumber \\ - & \quad\text{inductive assumption}:\quad\\ + & \quad\text{inductive assumption}:\quad\nonumber \\ & =a\triangleright f\bef\,\,\begin{array}{||c|} 1\rightarrow r\triangleright\overline{\text{liftOpt}_{F}}(f\diamond_{_{\text{Opt}}}g)\\ b^{:B}\rightarrow g(b)\triangleright\,\begin{array}{||c|} @@ -18834,7 +18921,7 @@ c^{:C}\rightarrow\bbnum 0+\left(c\times r\right)\triangleright\overline{\text{li \end_inset -We are justified to use the inductive assumption for +We are justified in using the inductive assumption for \begin_inset Formula $\overline{\text{liftOpt}_{F}}(f)\bef\text{liftOpt}_{F}(g)$ \end_inset @@ -18842,9 +18929,21 @@ We are justified to use the inductive assumption for \begin_inset Formula $\text{liftOpt}_{F}(g)$ \end_inset -, is not a recursive call. - It is sufficient that at least one term in the function composition is - a recursive call to +, is not marked as a recursive call. + This is because the symbol +\begin_inset Formula $\overline{\text{liftOpt}_{F}}$ +\end_inset + + denotes the +\emph on +same +\emph default + function as +\begin_inset Formula $\text{liftOpt}_{F}$ +\end_inset + +. + By the inductive assumption, the laws already hold for \begin_inset Formula $\overline{\text{liftOpt}_{F}}$ \end_inset @@ -18944,7 +19043,7 @@ noprefix "false" \begin_inset Formula $q$ \end_inset -. + having suitable types. Start from the right-hand side: \begin_inset Formula \begin{align*} @@ -19049,7 +19148,7 @@ filter \begin_layout Standard The next construction is for a functor defined via a filterable recursion scheme. - The filtering logic is then different from that used in Statement + Then the filtering logic is different from that used in Statement \begin_inset space ~ \end_inset @@ -19097,18 +19196,14 @@ If \end_inset , the recursive functor -\begin_inset Formula $F^{A}$ -\end_inset - - defined by the recursion scheme -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $F$ \end_inset - (type equation + defined by the type equation \begin_inset Formula $F^{A}\triangleq S^{A,F^{A}}$ \end_inset -) is filterable. + is filterable. \end_layout \begin_layout Subparagraph @@ -19312,7 +19407,7 @@ wrappers \begin_inset Quotes erd \end_inset - of data, while contrafunctors + that store data, while contrafunctors \begin_inset Quotes eld \end_inset @@ -19321,8 +19416,8 @@ consume \end_inset data. - Filterable functors permit us to exclude certain data from a wrapper; filterabl -e contrafunctors permit us to exclude certain data from being consumed. + Filterable functors permit us to exclude certain data from storage; filterable + contrafunctors permit us to exclude certain data from being consumed. Let us now make this intuition precise. \end_layout @@ -19348,7 +19443,7 @@ extractor \begin_inset Formula $Z$ \end_inset - from data of an arbitrary type + from data of various types \begin_inset Formula $A$ \end_inset @@ -19404,13 +19499,12 @@ def filter[A](p: A => Boolean): C[A] => C[A] \begin_layout Plain Layout -val extractor: C[Payload] = ??? // Original code for extracting - metadata from payloads. +val extractor: C[Payload] = ??? // Code that extracts metadata from payloads. \end_layout \begin_layout Plain Layout -val noPrivateData: Payload => Boolean = ??? // Returns true only if payload +val noPrivateData: Payload => Boolean = ??? // Returns true only if payload has no private data. \end_layout @@ -19598,15 +19692,15 @@ val c: A => Option[Z] = ??? \end_inset -we need somehow to impose a filter predicate -\begin_inset Formula $p^{:A\rightarrow\bbnum 2}$ +we need somehow to ensure that the function +\begin_inset Formula $c$ \end_inset - ensuring that the function -\begin_inset Formula $c$ + is applied only to values that pass a given filter predicate +\begin_inset Formula $p^{:A\rightarrow\bbnum 2}$ \end_inset - is applied only to values that pass the predicate. +. The result will be a new function \begin_inset Formula $d^{:A\rightarrow\bbnum 1+Z}$ \end_inset @@ -19732,7 +19826,7 @@ d \end_inset is a filtering operation for the contrafunctor -\begin_inset Formula $C^{A}$ +\begin_inset Formula $C$ \end_inset , implemented as: @@ -19753,8 +19847,7 @@ def filter[A](p: A => Boolean)(c: A => Option[Z]): A => Option[Z] = { a \begin_layout Plain Layout -} // Equivalent code is { a => Some(a).filter(p).flatMap(c) - } +} // Equivalent code is { a => Some(a).filter(p).flatMap(c) } \end_layout \end_inset @@ -19912,19 +20005,18 @@ def filter[A](p: A => Boolean)(c: Option[A] => Z): Option[A] => Z = { \begin_layout Plain Layout case Some(a) if p(a) => c(Some(a)) // Only apply `c` to `a` - if `p(a) == true`. + if p(a) == true. \end_layout \begin_layout Plain Layout - case _ => c(None) // Return c(None) otherwise, or - for empty Option. + case _ => c(None) // Return c(None) if p(a) == false, + or for empty Option. \end_layout \begin_layout Plain Layout -} // Equivalent code is _.filter(p).pipe(c) - (Scala 2.13). +} // Equivalent code is: filter(p)(c) = _.filter(p).pipe(c) \end_layout \end_inset @@ -19962,11 +20054,11 @@ noprefix "false" \end_inset ): In order to assure the properties of a filterable functor for -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset , the contrafunctor -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset must have the @@ -20155,8 +20247,7 @@ These functions have different but equivalent laws: \begin_inset Formula $\text{inflate}_{C}$ \end_inset - is the easiest to implement in code (and to check whether a given contrafunctor - is filterable). + is the easiest to implement in code. The laws \begin_inset Index idx status open @@ -20279,7 +20370,7 @@ liftOpt \begin_inset Formula $C^{A}\triangleq A\rightarrow\bbnum 1+Z$ \end_inset -, and verify Eq. + and verify Eq. \begin_inset space ~ \end_inset @@ -20325,7 +20416,7 @@ liftOpt \end_inset -, preserving information: +, trying to preserve information as much as possible: \begin_inset listings inline false status open @@ -20338,7 +20429,12 @@ def inflate[A](c: A => Option[Z]): Option[A] => Option[Z] = _.flatMap(c) \begin_layout Plain Layout def liftOpt[A, B](f: A => Option[B])(c: B => Option[Z]): A => Option[Z] - = { a => f(a).flatMap(c) } + = +\end_layout + +\begin_layout Plain Layout + + { a => f(a).flatMap(c) } \end_layout \end_inset @@ -20545,10 +20641,10 @@ noprefix "false" \end_inset ) for an arbitrary filterable contrafunctor -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset -, assuming needed laws. +, assuming naturality laws as needed. \end_layout \begin_layout Subparagraph @@ -20602,7 +20698,7 @@ noprefix "false" \end_inset -The computation gets stuck here: We could simplify the composition +The computation gets stuck here: We would simplify the composition \begin_inset Formula $\psi_{p}\bef\text{get}$ \end_inset @@ -20621,7 +20717,7 @@ noprefix "false" \end_inset ), if only we could move these functions next to each other. - It is clear that we need a law that exchanges the order of compositions + It is clear that we need a law that exchanges the order of composition of \begin_inset Formula $\text{filt}_{C}$ \end_inset @@ -20642,7 +20738,7 @@ noprefix "false" \end_inset -) and making sure types match, we write a +) and making sure types match, we obtain a \series bold naturality law \series default @@ -20904,7 +21000,21 @@ noprefix "false" \end_layout \begin_layout Standard -Verify Eq. +Proceeding similarly to Example +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand ref +reference "subsec:filt-solved-example-5-1" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +, verify Eq. \begin_inset space ~ \end_inset @@ -20919,10 +21029,10 @@ noprefix "false" \end_inset ) for an arbitrary filterable contrafunctor -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset -, assuming needed laws. +, assuming naturality laws as needed. \end_layout \begin_layout Subsection @@ -21029,7 +21139,11 @@ trivially \begin_layout Standard Further constructions that work with type parameters are functor compositions. The composition -\begin_inset Formula $P\circ Q\triangleq P^{Q^{\bullet}}$ +\begin_inset Formula $P\circ Q$ +\end_inset + + defined as +\begin_inset Formula $(P\circ Q)^{A}\triangleq P^{Q^{A}}$ \end_inset is a contrafunctor when @@ -21041,12 +21155,12 @@ Further constructions that work with type parameters are functor compositions. \end_inset is a contrafunctor, or vice versa. - The contrafunctor -\begin_inset Formula $P^{Q^{\bullet}}$ + It turns out that the contrafunctor +\begin_inset Formula $P\circ Q$ \end_inset is filterable if -\begin_inset Formula $Q^{\bullet}$ +\begin_inset Formula $Q$ \end_inset (whether it is a functor or a contrafunctor) is filterable: @@ -21073,24 +21187,28 @@ noprefix "false" \end_layout +\begin_layout Standard +The type constructor +\begin_inset Formula $P\circ Q$ +\end_inset + + is filterable: +\end_layout + \begin_layout Standard \series bold (a) \series default If -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is any contrafunctor and -\begin_inset Formula $Q^{\bullet}$ -\end_inset - - is a filterable functor then -\begin_inset Formula $P^{Q^{\bullet}}$ +\begin_inset Formula $Q$ \end_inset - is filterable. + is a filterable functor. \end_layout \begin_layout Standard @@ -21099,18 +21217,14 @@ noprefix "false" (b) \series default If -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is any functor and -\begin_inset Formula $Q^{\bullet}$ -\end_inset - - is a filterable contrafunctor then -\begin_inset Formula $P^{Q^{\bullet}}$ +\begin_inset Formula $Q$ \end_inset - is filterable. + is a filterable contrafunctor. \end_layout \begin_layout Subparagraph @@ -21132,7 +21246,11 @@ noprefix "false" \end_inset - with necessary modifications. +, +\emph on +mutatis mutandis +\emph default +. \end_layout @@ -21141,7 +21259,7 @@ noprefix "false" \series bold (a) \series default - We define the + Define the \begin_inset listings inline true status open @@ -21246,7 +21364,7 @@ noprefix "false" \series bold (b) \series default - We define the + Define the \begin_inset listings inline true status open @@ -21374,22 +21492,22 @@ Products and co-products \begin_layout Standard If -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset - are filterable contrafunctors, the product -\begin_inset Formula $G^{A}\times H^{A}$ + are filterable contrafunctors, the product contrafunctor +\begin_inset Formula $G\times H$ \end_inset - and the co-product -\begin_inset Formula $G^{A}+H^{A}$ + and the co-product contrafunctor +\begin_inset Formula $G+H$ \end_inset - will also be filterable contrafunctors. + will also be filterable. Proofs are analogous to the case of filterable functors and are delegated to Exercise \begin_inset space ~ @@ -21427,7 +21545,7 @@ noprefix "false" \end_inset - for filterable functors: +: \end_layout \begin_layout Subsubsection @@ -21457,11 +21575,11 @@ The contrafunctor \end_inset is filterable for any filterable functor -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset and any filterable contrafunctor -\begin_inset Formula $H^{A}$ +\begin_inset Formula $H$ \end_inset . @@ -21514,7 +21632,7 @@ To obtain a clearer code formula, rewrite the Scala code using the \begin{align*} & \text{liftOpt}_{F}(f)\\ & \triangleq p^{:G^{B}\rightarrow H^{B}}\rightarrow\gunderline{g^{:G^{A}}\rightarrow g}\triangleright\text{liftOpt}_{G}(f)\triangleright p\triangleright\text{liftOpt}_{H}(f)\\ -\text{simplify }(x\rightarrow x\triangleright y)=y:\quad & \quad=p^{:G^{B}\rightarrow H^{B}}\rightarrow\text{liftOpt}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\quad. +\text{simplify }(x\rightarrow x\triangleright y)=y:\quad & =p^{:G^{B}\rightarrow H^{B}}\rightarrow\text{liftOpt}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\quad. \end{align*} \end_inset @@ -21637,7 +21755,7 @@ search functor \end_inset -\begin_inset Formula $S_{Z}^{\bullet}$ +\begin_inset Formula $S_{Z}$ \end_inset defined by @@ -21659,7 +21777,7 @@ search functor \end_inset . - The simplest case of the search functor is found by setting + A simple case of the search functor is found by setting \begin_inset Formula $Z\triangleq\bbnum 1$ \end_inset @@ -21756,10 +21874,10 @@ noprefix "false" \begin_layout Standard If a contrafunctor -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset - is filterable, so is the contrafunctor + is filterable then so is the contrafunctor \begin_inset Formula $F^{A}\triangleq A\rightarrow\bbnum 1+H^{A}$ \end_inset @@ -22016,7 +22134,7 @@ Proof \begin_layout Standard The recursive contrafunctor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is implemented by wrapping @@ -22053,7 +22171,7 @@ liftOpt \end_inset for -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is recursive and uses the @@ -22119,7 +22237,7 @@ Note that \end_inset . - As before, we use an overline to mark recursive calls to the same function: + We use an overline to mark recursive calls: \begin_inset Formula \begin{align*} & \text{liftOpt}_{F}(f^{:A\rightarrow\bbnum 1+B})\triangleq\text{liftOpt}_{S}(f)\bef\big(\overline{\text{liftOpt}_{F}}(f)\big)^{\uparrow S^{A,\bullet}}\\ @@ -22143,8 +22261,12 @@ status open \begin_layout Plain Layout -def cmap_F[A, B](f: A => B): F[B] => F[A] = { case F(sbfb) => F( sbfb.xmap_S(f)(c -map_F(f)) ) } +def cmap_F[A, B](f: A => B): F[B] => F[A] = { case F(sbfb) => +\end_layout + +\begin_layout Plain Layout + + F( sbfb.xmap_S(f)(cmap_F(f)) ) } \end_layout \end_inset @@ -22229,7 +22351,8 @@ In this derivation, we have used the naturality law of \begin_inset Formula $S^{A,R}$ \end_inset -: +. + That law is: \begin_inset Formula \[ \xymatrix{S^{B,R}\ar[r]\sp(0.5){\text{liftOpt}_{S}(f^{:A\rightarrow\bbnum 1+B})}\ar[d]\sp(0.4){(h^{:R\rightarrow R^{\prime}})^{\uparrow S^{B,\bullet}}} & S^{A,R}\ar[d]\sb(0.4){h^{\uparrow S^{A,\bullet}}}\\ @@ -22375,8 +22498,8 @@ liftOpt \end_layout \begin_layout Standard -This cannot be done because most non-trivial type constructors have many - lawful but +This cannot be done because many type constructors have several lawful but + \emph on inequivalent \emph default @@ -22512,7 +22635,7 @@ Show that the functor \end_inset is not filterable (for any contrafunctor -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset ). @@ -22556,7 +22679,7 @@ We cannot extract a value of type \end_inset since the contrafunctor -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset does not wrap any values of @@ -22582,7 +22705,7 @@ We cannot extract a value of type \end_inset (e.g., if -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is filterable), the result of applying @@ -22612,12 +22735,12 @@ deflate \end_inset for -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is not implementable. We conclude that -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is not filterable. @@ -22663,7 +22786,7 @@ Solution \begin_layout Standard We need to analyze the structure of the functor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset to decide which constructions we may use. @@ -22693,7 +22816,7 @@ F^{A}=R_{1}^{L^{A}}\quad,\quad\quad L^{A}\triangleq G^{A}+R_{2}^{H^{A}}\quad. \end_inset The type of -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is a co-product, so we need to check which of the two co-product constructions @@ -22777,7 +22900,7 @@ Int \end_inset ), -\begin_inset Formula $K^{\bullet}$ +\begin_inset Formula $K$ \end_inset is filterable by Statement @@ -22796,7 +22919,7 @@ noprefix "false" . So, -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset is filterable. @@ -22804,7 +22927,7 @@ noprefix "false" \begin_layout Standard Similarly, we find that -\begin_inset Formula $H^{A}$ +\begin_inset Formula $H$ \end_inset is filterable by Statement @@ -22834,10 +22957,10 @@ noprefix "false" \begin_layout Standard The functor -\begin_inset Formula $R_{2}^{H^{\bullet}}$ +\begin_inset Formula $R_{2}\circ H$ \end_inset - is filterable since it is a functor composition (Statement + is filterable: it is a functor composition (Statement \begin_inset space ~ \end_inset @@ -22852,7 +22975,7 @@ noprefix "false" \end_inset ) and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is filterable. @@ -22875,7 +22998,7 @@ noprefix "false" \end_inset , and -\begin_inset Formula $R_{1}^{L^{A}}$ +\begin_inset Formula $R_{1}\circ L$ \end_inset by Statement @@ -22928,7 +23051,7 @@ liftOpt for this functor. For instance, the filtering operation for -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset could be defined similarly to that for @@ -23408,12 +23531,12 @@ Assume that a given functor \end_inset is filterable (but -\begin_inset Formula $K^{\bullet}$ +\begin_inset Formula $K$ \end_inset is not necessarily filterable). The functor -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is a @@ -23429,16 +23552,16 @@ data wrapper \end_inset . - Show that an empty wrapper remains empty after any filtering: the function - -\begin_inset Formula $\text{filt}_{H}$ + Show that an empty wrapper must remain empty after any filtering. + In other words, for any +\begin_inset Formula $p^{:A\rightarrow\bbnum 2}$ \end_inset - satisfies, for any -\begin_inset Formula $p^{:A\rightarrow\bbnum 2}$ + the function +\begin_inset Formula $\text{filt}_{H}$ \end_inset -, + satisfies the equation: \begin_inset Formula \begin{equation} (1+\bbnum 0^{:K^{A}})\triangleright\text{filt}_{H}(p)=1+\bbnum 0^{:K^{A}}\quad.\label{eq:empty-filter-remains-empty-via-filt} @@ -23455,11 +23578,11 @@ Solution \begin_layout Standard We know nothing about -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset and -\begin_inset Formula $K^{\bullet}$ +\begin_inset Formula $K$ \end_inset other than the fact that @@ -23506,7 +23629,7 @@ deflate \end_inset Any function lifted to -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset works separately for the two parts of the disjunctive type @@ -23707,15 +23830,15 @@ noprefix "false" \begin_layout Standard Assuming that -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are filterable functors and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset is of the form @@ -23723,11 +23846,11 @@ Assuming that \end_inset (where -\begin_inset Formula $K^{\bullet}$ +\begin_inset Formula $K$ \end_inset is not necessarily filterable), prove that the functor -\begin_inset Formula $F^{\bullet}\triangleq G^{K^{\bullet}}$ +\begin_inset Formula $F\triangleq G\circ K$ \end_inset is filterable. @@ -23841,7 +23964,7 @@ We can map \begin_inset Formula $\text{liftOpt}_{F}$ \end_inset - as + as: \begin_inset Formula \[ \text{liftOpt}_{F}(f^{:A\rightarrow\bbnum 1+B})\triangleq\text{liftOpt}_{G}^{K^{A},K^{B}}\big(\text{pu}_{\text{Opt}}^{K^{A}}\bef\text{liftOpt}_{H}(f)\big)\quad. @@ -24183,7 +24306,7 @@ unroll \end_inset the recursive equation and to reduce -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset to the @@ -24235,19 +24358,19 @@ unrolling trick gives, for any recursive definition of the form \end_inset where -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset and -\begin_inset Formula $Q^{\bullet}$ +\begin_inset Formula $Q$ \end_inset are arbitrary functors. The functor -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset - given in this example is of this form with the functors + given in this example is of that form with the functors \begin_inset Formula $P^{A}\triangleq\bbnum 1+A\times A$ \end_inset @@ -24349,11 +24472,11 @@ unrolled . We can now use the functor product construction for -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset - if we show that -\begin_inset Formula $\text{List}^{Q^{\bullet}}$ + if we show that the functor +\begin_inset Formula $\text{List}\circ Q$ \end_inset is filterable. @@ -24421,8 +24544,8 @@ noprefix "false" \end_inset - and conclude that -\begin_inset Formula $\text{List}^{Q^{\bullet}}$ + and conclude that the functor +\begin_inset Formula $\text{List}\circ Q$ \end_inset is filterable. @@ -24466,7 +24589,7 @@ Solution \end_layout \begin_layout Standard -Try to implement the function +Try implementing the function \begin_inset Formula $\text{inflate}_{C}:C^{A}\rightarrow C^{\bbnum 1+A}$ \end_inset @@ -24524,7 +24647,8 @@ empty \begin_inset Formula $\text{inflate}_{C}$ \end_inset -, and we conclude that +. + We conclude that \begin_inset Formula $C$ \end_inset @@ -24553,11 +24677,7 @@ noprefix "false" \end_layout \begin_layout Standard -Given a filterable functor -\begin_inset Formula $F^{\bullet}$ -\end_inset - -, show that the type +Show that the type \begin_inset Formula $F^{\bbnum 1}\rightarrow F^{\bbnum 0}$ \end_inset @@ -24565,7 +24685,11 @@ Given a filterable functor \emph on not \emph default - void. + void when +\begin_inset Formula $F$ +\end_inset + + is a filterable functor. \end_layout \begin_layout Subparagraph @@ -24681,7 +24805,7 @@ status open \begin_layout Plain Layout -H[_] +H \end_layout \end_inset @@ -24705,7 +24829,7 @@ status open \begin_layout Plain Layout -G[_] +G \end_layout \end_inset @@ -24795,6 +24919,55 @@ Prove rigorously (not via tests) that . \end_layout +\begin_layout Subsubsection +Exercise +\begin_inset CommandInset label +LatexCommand label +name "subsec:Exercise-filterable-laws-2-1" + +\end_inset + + +\begin_inset CommandInset ref +LatexCommand ref +reference "subsec:Exercise-filterable-laws-2-1" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + +\end_layout + +\begin_layout Standard +Prove rigorously (not via tests) that Eqs. +\begin_inset space ~ +\end_inset + +( +\begin_inset CommandInset ref +LatexCommand ref +reference "eq:left-naturality-flatmap-option" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +)–( +\begin_inset CommandInset ref +LatexCommand ref +reference "eq:right-naturality-flatmap-option" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +) hold. +\end_layout + \begin_layout Subsubsection Exercise \begin_inset CommandInset label @@ -24834,7 +25007,7 @@ deflate \end_inset for any contrafunctor -\begin_inset Formula $C^{A}$ +\begin_inset Formula $C$ \end_inset (not necessarily filterable). @@ -24843,7 +25016,7 @@ deflate in case \emph default -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset is filterable, the @@ -24900,7 +25073,7 @@ noprefix "false" \begin_layout Standard Assuming that -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is a filterable functor, prove rigorously that the recursive functor @@ -24921,7 +25094,7 @@ Filterable \end_inset instance for -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset . @@ -24983,12 +25156,12 @@ Prove that \end_inset is in general not filterable if -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset is an arbitrary (non-filterable) functor; give an example of a suitable -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset . @@ -25011,7 +25184,7 @@ noprefix "false" \end_inset gives a necessary but not a sufficient condition for a functor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset to be filterable. @@ -25052,11 +25225,11 @@ Show that \end_inset are filterable (even when -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset and -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset are not filterable). @@ -25163,19 +25336,19 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset and -\begin_inset Formula $H^{A}$ +\begin_inset Formula $H$ \end_inset are filterable contrafunctors, prove that the contrafunctors -\begin_inset Formula $G^{A}\times H^{A}$ +\begin_inset Formula $P^{A}\triangleq G^{A}\times H^{A}$ \end_inset and -\begin_inset Formula $G^{A}+H^{A}$ +\begin_inset Formula $Q^{A}\triangleq G^{A}+H^{A}$ \end_inset are also filterable. @@ -25213,7 +25386,7 @@ Show that the contrafunctor not \emph default filterable for any functor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset and any fixed type @@ -25250,7 +25423,7 @@ Show that a necessary \emph default condition for a contrafunctor -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset to be filterable is that a function of type @@ -25286,28 +25459,25 @@ noprefix "false" \end_layout \begin_layout Standard -Show that a -\emph on -polynomial -\emph default - functor -\begin_inset Formula $F^{\bullet}$ +Give an example of a non-filterable polynomial functor +\begin_inset Formula $F$ \end_inset - is filterable (in some way) if and only if the type -\begin_inset Formula $F^{\bbnum 1}\rightarrow F^{\bbnum 0}$ + for which +\begin_inset Formula $F^{\bbnum 1}\rightarrow F^{\bbnum 0}\cong\bbnum 0$ \end_inset - is not void. - Find an example of a non-filterable polynomial functor -\begin_inset Formula $F^{\bullet}$ +. + Show that a polynomial functor +\begin_inset Formula $F$ \end_inset - that violates the condition -\begin_inset Formula $F^{\bbnum 1}\rightarrow F^{\bbnum 0}\not\cong\bbnum 0$ + is filterable (in some way) if and only if the type +\begin_inset Formula $F^{\bbnum 1}\rightarrow F^{\bbnum 0}$ \end_inset -. + is not void. + \end_layout \begin_layout Section @@ -25769,7 +25939,7 @@ List \end_inset -We can write an equivalent Scala code for this function as +We can write an equivalent Scala code for this function as: \end_layout \begin_layout Standard @@ -25897,7 +26067,7 @@ headOption : \begin_inset Formula \[ -\xymatrix{\text{List}^{A}\ar[r]\sp(0.55){\text{headOpt}^{A}}\ar[d]\sp(0.4){(f^{:A\rightarrow B})^{\uparrow\text{List}}} & \text{Opt}^{A}\ar[d]\sb(0.4){f^{\uparrow\text{Opt}}}\\ +\xymatrix{\text{List}^{A}\ar[r]\sp(0.55){\text{headOpt}^{A}}\ar[d]\sb(0.5){(f^{:A\rightarrow B})^{\uparrow\text{List}}} & \text{Opt}^{A}\ar[d]\sb(0.4){f^{\uparrow\text{Opt}}}\\ \xyScaleY{1.6pc}\xyScaleX{4.5pc}\text{List}^{B}\ar[r]\sp(0.55){\text{headOpt}^{B}} & \text{Opt}^{B} } \] @@ -25930,12 +26100,11 @@ headOption first \emph default element of a list. - The same naturality law will hold for any fully parametric function of - type + The same law will hold for any fully parametric function of type \begin_inset Formula $\text{List}^{A}\rightarrow\text{Opt}^{A}$ \end_inset -, e.g., the functions +, e.g., for the functions \begin_inset listings inline true status open @@ -25960,7 +26129,10 @@ _.drop(2).headOption \end_inset . - The naturality law only expresses the property that the function + The naturality law only expresses the property that the function works + in the same way for all types; the function should not behave differently + for any specific types or for any specific values in the list. + This is true for \begin_inset listings inline true status open @@ -25972,13 +26144,23 @@ headOption \end_inset - works in the same way for all types and has no special code for a specific - type or any specific values in the list. - +, for +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +lastOption +\end_layout + +\end_inset + +, and for many other functions. \end_layout \begin_layout Standard -Other examples of natural transformations are the functions +Further examples of natural transformations are the functions \begin_inset listings inline true status open @@ -26024,17 +26206,17 @@ natural transformation \end_inset pattern, which we can formulate for arbitrary functors -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset as the following law: \begin_inset Formula \[ -\xymatrix{F^{A}\ar[r]\sp(0.55){t^{A}}\ar[d]\sp(0.4){(f^{:A\rightarrow B})^{\uparrow F}} & G^{A}\ar[d]\sp(0.4){f^{\uparrow G}}\\ +\xymatrix{F^{A}\ar[r]\sp(0.55){t^{A}}\ar[d]\sb(0.5){(f^{:A\rightarrow B})^{\uparrow F}} & G^{A}\ar[d]\sp(0.4){f^{\uparrow G}}\\ \xyScaleY{1.7pc}\xyScaleX{3.5pc}F^{B}\ar[r]\sp(0.55){t^{B}} & G^{B} } \] @@ -26089,7 +26271,7 @@ Once we recognize that a given function \end_inset . - So, the naturality law is + So, naturality laws have the form \begin_inset Formula $f^{\uparrow F}\bef t=t\bef f^{\uparrow G}$ \end_inset @@ -26097,11 +26279,15 @@ Once we recognize that a given function \end_layout \begin_layout Standard -The analogous naturality law for natural transformations -\begin_inset Formula $t:C^{A}\rightarrow D^{A}$ +The naturality law for natural transformations +\begin_inset Formula $t^{A}:C^{A}\rightarrow D^{A}$ \end_inset - between contrafunctors + between +\emph on +contrafunctors +\emph default + \begin_inset Formula $C$ \end_inset @@ -26112,7 +26298,7 @@ The analogous naturality law for natural transformations has exactly the same form, but the order of type parameters must be swapped: \begin_inset Formula \[ -\xymatrix{C^{A}\ar[r]\sp(0.55){t^{A}}\ar[d]\sp(0.4){(f^{:B\rightarrow A})^{\downarrow C}} & D^{A}\ar[d]\sp(0.4){f^{\downarrow D}}\\ +\xymatrix{C^{A}\ar[r]\sp(0.55){t^{A}}\ar[d]\sb(0.5){(f^{:B\rightarrow A})^{\downarrow C}} & D^{A}\ar[d]\sp(0.4){f^{\downarrow D}}\\ \xyScaleY{1.7pc}\xyScaleX{3.5pc}C^{B}\ar[r]\sp(0.55){t^{B}} & D^{B} } \] @@ -26357,11 +26543,12 @@ It is clear that we must choose an arbitrary value \end_inset ) for all types to match. - It remains to fill the typed hole, which must be of the form + It remains to fill the right-hand side, which must be of the form \begin_inset Formula $t(...)\bef f^{\uparrow G}$ \end_inset -, +. + We write: \begin_inset Formula \[ f^{\uparrow F}\bef t^{B}(c^{:C^{B}})=t^{A}(\text{???}^{:C^{A}})\bef f^{\uparrow G}\quad. @@ -26369,12 +26556,8 @@ f^{\uparrow F}\bef t^{B}(c^{:C^{B}})=t^{A}(\text{???}^{:C^{A}})\bef f^{\uparrow \end_inset -The argument of -\begin_inset Formula $t$ -\end_inset - - of type -\begin_inset Formula $C^{A}$ +The value +\begin_inset Formula $\text{???}^{:C^{A}}$ \end_inset is obtained by applying @@ -26395,11 +26578,15 @@ f^{\uparrow F}\bef t(c)=t(c\triangleright f^{\downarrow C})\bef f^{\uparrow G}\q \end_inset A similar law can be derived for the case when -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset is a functor and -\begin_inset Formula $F^{\bullet},G^{\bullet}$ +\begin_inset Formula $F$ +\end_inset + +, +\begin_inset Formula $G$ \end_inset are contrafunctors. @@ -26426,7 +26613,7 @@ Functions , which is a type signature of a natural transformation. Denoting by -\begin_inset Formula $\tilde{t}:F^{A}\rightarrow H^{A}$ +\begin_inset Formula $\tilde{t}^{A}:F^{A}\rightarrow H^{A}$ \end_inset the function @@ -26440,7 +26627,7 @@ Functions as: \begin_inset Formula \[ -f^{\uparrow F}\bef\tilde{t}=\tilde{t}\bef f^{\uparrow H}\quad,\quad\text{where}\quad\tilde{t}\triangleq p^{:F^{A}}\rightarrow c^{:C^{A}}\rightarrow p\triangleright t(c)\quad. +f^{\uparrow F}\bef\tilde{t}=\tilde{t}\bef f^{\uparrow H}\quad,\quad\text{where}\quad\tilde{t}^{A}\triangleq p^{:F^{A}}\rightarrow c^{:C^{A}}\rightarrow p\triangleright t(c)\quad. \] \end_inset @@ -26532,16 +26719,20 @@ and considered as functions of \begin_layout Standard Reduction to natural transformations works similarly when -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset is a functor and -\begin_inset Formula $F^{\bullet},G^{\bullet}$ +\begin_inset Formula $F$ +\end_inset + +, +\begin_inset Formula $G$ \end_inset are contrafunctors. A naturality law of -\begin_inset Formula $t:C^{A}\rightarrow F^{A}\rightarrow G^{A}$ +\begin_inset Formula $t^{A}:C^{A}\rightarrow F^{A}\rightarrow G^{A}$ \end_inset can then be derived from @@ -26651,9 +26842,17 @@ map \end_inset Replacing +\begin_inset Quotes eld +\end_inset + + \begin_inset Formula $\text{Opt}$ \end_inset + +\begin_inset Quotes erd +\end_inset + by an arbitrary functor \begin_inset Formula $G$ \end_inset @@ -26661,12 +26860,12 @@ Replacing , we obtain the type signature: \begin_inset Formula \[ -\text{lift}_{G,F}^{A,B}:(A\rightarrow G^{B})\rightarrow F^{A}\rightarrow F^{B}\quad, +\text{lift}_{G,F}^{A,B}:(A\rightarrow G^{B})\rightarrow F^{A}\rightarrow F^{B}\quad. \] \end_inset -which we can view as a generalized +We can view this as a generalized \begin_inset Quotes eld \end_inset @@ -26674,19 +26873,11 @@ lifting \begin_inset Quotes erd \end_inset - of functions with a -\begin_inset Quotes eld -\end_inset - -twisted -\begin_inset Quotes erd -\end_inset - - type + from functions of type \begin_inset Formula $A\rightarrow G^{B}$ \end_inset - (which we call + (called \begin_inset Index idx status open @@ -26861,7 +27052,7 @@ Parametricity theorem \begin_layout Standard It turns out that the naturality law of a natural transformation -\begin_inset Formula $t:F^{A}\rightarrow G^{A}$ +\begin_inset Formula $t^{A}:F^{A}\rightarrow G^{A}$ \end_inset will @@ -26944,7 +27135,7 @@ not This saves a significant amount of work, since every method of every typeclass will have one naturality law per type parameter. Until now, we have been systematically deriving and checking all naturality - laws; but we will not check those laws in the rest of the book. + laws; but we will not keep checking those laws in the rest of the book. \end_layout \begin_layout Standard @@ -26984,7 +27175,7 @@ noprefix "false" \end_inset at the left side of -\begin_inset Formula $t:F^{A}\rightarrow G^{A}$ +\begin_inset Formula $t^{A}:F^{A}\rightarrow G^{A}$ \end_inset and to the functor @@ -27117,7 +27308,7 @@ deflate \end_inset is the easiest type signature to implement and to reason about, especially - in order to demonstrate that a functor is not filterable; + in order to detect that a functor is not filterable; \begin_inset listings inline true status open @@ -27175,7 +27366,7 @@ noprefix "false" \end_inset ). - It is notable how these two laws are similar to the functor laws + It is notable that those two laws are similar to the functor laws \begin_inset space ~ \end_inset @@ -27208,7 +27399,7 @@ noprefix "false" \end_inset -The only difference between these laws is in replacing +The only difference between those sets of laws is in replacing \begin_inset Formula $\text{id}^{:A\rightarrow A}$ \end_inset @@ -27225,7 +27416,7 @@ The only difference between these laws is in replacing \end_inset . - We will now focus on the analogy between these laws, which goes far beyond + We will now focus on the analogy between those laws, which goes far beyond the superficial similarity of form. \end_layout @@ -27386,6 +27577,11 @@ noprefix "false" \end_inset ). + +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -27511,10 +27707,10 @@ noprefix "false" \end_layout \begin_layout Standard -The similarity between these proofs means, in the mathematical sense, that - we have been proving essentially the same statements twice but did not - look at the appropriate level of abstraction to see that. - While programmers may accept the work of writing these proofs twice, a +The similarity between those proofs means, in a mathematician's view, that + we have been proving essentially the same statement twice but did not use + an appropriate level of abstraction to see that. + While programmers may accept the work of writing those proofs twice, a mathematician would prefer to define a \begin_inset Quotes eld \end_inset @@ -27540,8 +27736,8 @@ generalized lifting \end_inset -and postulating the required properties as the set of identity, associativity, - and composition laws: +and postulating the required properties as the laws of identity, associativity, + and composition: \begin_inset Formula \begin{align*} & \text{lift}_{M,F}(\text{pu}_{M}^{:A\rightarrow M^{A}})=\text{id}^{:F^{A}\rightarrow F^{A}}\quad,\quad\quad\text{lift}_{M,F}(f)\bef\text{lift}_{M,F}(g)=\text{lift}_{M,F}(f\diamond_{_{M}}g)\quad,\\ @@ -27588,8 +27784,16 @@ status open \begin_inset Formula $M$ \end_inset - could be later set to the identity functor or the -\begin_inset Formula $\text{Opt}$ + could be later set either to the identity functor or to the +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Option +\end_layout + \end_inset functor. @@ -27609,7 +27813,7 @@ Not all functors \begin_inset Formula $M$ \end_inset -, which are known as +, known as \series bold monads \series default @@ -27708,7 +27912,8 @@ reversed \end_inset . - In turn, all these function types obey the laws of identity and composition. + In turn, all those function types obey their own versions of the laws of + identity and composition. (For the types to match, composition of reversed functions needs to be performed in the reverse order.) \end_layout @@ -27961,7 +28166,7 @@ plain \begin_inset Quotes eld \end_inset -reversed +reverse \begin_inset Quotes erd \end_inset @@ -28089,7 +28294,7 @@ reversed \begin_inset Quotes eld \end_inset -reversed +reverse \begin_inset Formula $M$ \end_inset @@ -28446,7 +28651,7 @@ functor \end_inset is a type constructor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset with a lawful lifting of functions @@ -28548,8 +28753,7 @@ endofunctor \begin_inset Quotes erd \end_inset - because almost all categorical functors used in functional programming - are endofunctors. + because almost all categorical functors used in programming are endofunctors. \end_layout \end_inset @@ -28779,11 +28983,6 @@ Now we require that the first category's identity morphism is mapped to \end_inset We derived these laws previously as the laws of contrafunctors. - -\begin_inset Formula $\square$ -\end_inset - - \end_layout \begin_layout Subsubsection @@ -28873,7 +29072,7 @@ To define that (categorical) functor, we need to specify how types and morphisms \end_layout \begin_layout Standard -Formulate the laws of identity and composition for the (categorical) functor +Write the laws of identity and composition for that categorical functor using the definitions of the identity morphisms and the composition operation in each category: \begin_inset Formula @@ -28884,10 +29083,10 @@ Formulate the laws of identity and composition for the (categorical) functor \end_inset -Now we require that the first category's identity morphism is mapped to - the second category's identity morphism, and that a composition of any - two morphisms (as defined in the first category) is mapped to a composition - as defined in the second category: +The functor laws require that the first category's identity morphism is + mapped to the second category's identity morphism, and that a composition + of any two morphisms (as defined in the first category) is mapped to a + composition as defined in the second category: \begin_inset Formula \begin{align*} \text{identity law}:\quad & \text{liftOpt}_{F}\big(\text{pu}_{\text{Opt}}\big)=\text{id}^{:F^{A}\rightarrow F^{A}}\quad,\\ @@ -29017,7 +29216,7 @@ filter and did not assume more laws than necessary. In contrast, the two laws of the categorical functor are general and appear time and again in different areas of mathematics. - This gives us confidence that these laws are correctly chosen and will + This gives us confidence that those laws are correctly chosen and will be useful in a wide range of contexts. Proving that the four laws of \begin_inset listings @@ -29093,7 +29292,7 @@ Kleisli!category contrafunctors, filterable functors, and filterable contrafunctors) by a single but more abstract theorem about the product of (categorical) functors being a functor between suitably defined categories. - We will not look at these proofs here; Chapters + We will not look at those proofs here; Chapters \begin_inset space ~ \end_inset @@ -29122,13 +29321,29 @@ noprefix "false" \end_layout \begin_layout Standard -The categorical view also shows us two directions for developing the theory +The categorical view also shows us some directions for developing the theory further, hoping to find useful applications. - First, we can look for functors + We have found a useful operation (Kleisli composition) and the properties + it must satisfy (the identity and the associativity laws). + The +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Option +\end_layout + +\end_inset + + functor obeys those properties, and so we may want to look for other functors + \begin_inset Formula $M$ \end_inset - (called + that also have these properties. + (Those functors are called \begin_inset Quotes eld \end_inset @@ -29136,17 +29351,11 @@ monads \begin_inset Quotes erd \end_inset -) that admit the Kleisli composition with the properties (the identity and - the associativity laws) required by an -\begin_inset Formula $M$ -\end_inset - --Kleisli category. - Second, having found some new monads +.) Having found a new monad \begin_inset Formula $M$ \end_inset -, we can look for +, we can then look for \begin_inset Quotes eld \end_inset @@ -29233,9 +29442,9 @@ To summarize, using the category theory's notion of functor brings the following \end_layout \begin_layout Itemize -We are assured that we found a correct set of laws of a typeclass. - We can derive the formulation of those laws from the standard laws of categorie -s and functors, without guessing. +We are assured that we have found a correct set of laws of a typeclass. + We can derive the formulation of those laws without guessing, by starting + from the standard laws of categories and functors. \end_layout \begin_layout Itemize @@ -29249,14 +29458,14 @@ Several proofs may be replaced by a single proof for properties of some \end_layout \begin_layout Itemize -We can formulate general constructions (e.g., functor product) that work in - the same way for many different typeclasses. +We may find general constructions (e.g., functor product) that work in the + same way for many different typeclasses. \end_layout \begin_layout Standard -In this way, we find that category theory is a useful tool for reasoning - about abstract constructions that work with different typeclasses (functor, - contrafunctor, filterable, etc.). +We see that category theory is a useful tool for reasoning about abstract + constructions that work with different typeclasses (functor, contrafunctor, + filterable, etc.). Category theory views many typeclasses in a similar way and gives a systematic guidance for deriving the typeclass laws. \end_layout @@ -29333,8 +29542,8 @@ type F[A] = Option[(A, A)] \end_inset -) will belong to a specific typeclass (e.g., a filterable functor, a pointed - functor, or a monad). +) belongs to a specific typeclass (e.g., a filterable functor, a pointed functor, + or a monad). \end_layout \begin_layout Itemize @@ -29372,8 +29581,8 @@ CT does not say whether there exists a natural transformation between two \end_layout \begin_layout Standard -Performing these tasks requires certain techniques of symbolic derivation - adapted to +Performing those tasks requires certain symbolic derivation techniques adapted + to \emph on applied \emph default diff --git a/sofp-src/lyx/sofp-functors.lyx b/sofp-src/lyx/sofp-functors.lyx index b574f2f56..ee8225710 100644 --- a/sofp-src/lyx/sofp-functors.lyx +++ b/sofp-src/lyx/sofp-functors.lyx @@ -317,7 +317,7 @@ name "chap:Functors,-contrafunctors,-and" {\underline{#1}} \end_inset -Type constructors such as +Types \begin_inset listings inline true status open @@ -341,7 +341,7 @@ Array[A] \end_inset - are data structures that hold or + represent data structures that hold or \begin_inset Quotes eld \end_inset @@ -362,7 +362,7 @@ A \end_inset . - These data structures are fully parametric: they work in the same way for + Those data structures are fully parametric: they work in the same way for every type \begin_inset listings inline true @@ -461,7 +461,7 @@ status open \begin_layout Plain Layout -Seq[A] +Seq \end_layout \end_inset @@ -473,7 +473,7 @@ status open \begin_layout Plain Layout -Try[A] +Try \end_layout \end_inset @@ -485,7 +485,7 @@ status open \begin_layout Plain Layout -Future[A] +Future \end_layout \end_inset @@ -599,14 +599,14 @@ List(15, 25, 35) \end_layout \begin_layout Standard -The data types +The types \begin_inset listings inline true status open \begin_layout Plain Layout -Seq[A] +Seq \end_layout \end_inset @@ -618,7 +618,7 @@ status open \begin_layout Plain Layout -Try[A] +Try \end_layout \end_inset @@ -630,7 +630,7 @@ status open \begin_layout Plain Layout -Future[A] +Future \end_layout \end_inset @@ -798,7 +798,7 @@ f: A => B \end_inset - to data of type + to all the data of type \begin_inset listings inline true status open @@ -810,7 +810,7 @@ A \end_inset - stored inside the wrapper, such that new data (of type + stored inside the wrapper, putting new data (of type \begin_inset listings inline true status open @@ -822,7 +822,7 @@ B \end_inset -) will remain within a wrapper of the same type: +) into a wrapper of the same type: \begin_inset listings inline false status open @@ -985,7 +985,7 @@ all \end_layout \begin_layout Subsection -Extended example: +Example: \family typewriter Option \family default @@ -1015,7 +1015,7 @@ status open \begin_layout Plain Layout -Option[A] +Option \end_layout \end_inset @@ -1082,7 +1082,7 @@ status open \begin_layout Plain Layout -def mapX[A, B](oa: Option[A])(f: A => B): Option[B] = None +def mapX[A, B](p: Option[A])(f: A => B): Option[B] = None \end_layout \begin_layout Plain Layout @@ -1091,12 +1091,12 @@ def mapX[A, B](oa: Option[A])(f: A => B): Option[B] = None \begin_layout Plain Layout -def mapY[A, B](oa: Option[A])(f: A => B): Option[B] = +def mapY[A, B](p: Option[A])(f: A => B): Option[B] = \end_layout \begin_layout Plain Layout - oa match { + p match { \end_layout \begin_layout Plain Layout @@ -1191,7 +1191,7 @@ status open \begin_layout Plain Layout -map(oa)(f) +map(p)(f) \end_layout \end_inset @@ -1217,7 +1217,7 @@ map \end_inset , so that the types match). - Applying an identity function to a value wrapped in an + Applying an identity function to a value stored in an \begin_inset listings inline true status open @@ -1261,12 +1261,12 @@ status open \begin_layout Plain Layout -mapY[A, A](x: Option[A])(identity[A]: A => A): Option[A] +mapY[A, A](p: Option[A])(identity[A]: A => A): Option[A] \end_layout \begin_layout Plain Layout - == x match { + == p match { \end_layout \begin_layout Plain Layout @@ -1281,7 +1281,7 @@ mapY[A, A](x: Option[A])(identity[A]: A => A): Option[A] \begin_layout Plain Layout - } == x + } == p \end_layout \end_inset @@ -1293,7 +1293,7 @@ status open \begin_layout Plain Layout -x +p \end_layout \end_inset @@ -1302,7 +1302,7 @@ x We can write that fact as an equation: \begin_inset Formula \[ -\forall x^{:\text{Opt}^{A}}.\,\,\text{map}\,(x)(\text{id})=x\quad. +\text{for all }p^{:\text{Opt}^{A}}:\,\,\text{map}\,(p)(\text{id})=p\quad. \] \end_inset @@ -1379,7 +1379,7 @@ status open \begin_layout Plain Layout -mapX(oa)(id) == None +mapX(p)(id) == None \end_layout \end_inset @@ -1391,19 +1391,19 @@ status open \begin_layout Plain Layout -oa +p \end_layout \end_inset - for arbitrary values of + for all \begin_inset listings inline true status open \begin_layout Plain Layout -oa +p \end_layout \end_inset @@ -1436,7 +1436,7 @@ mapY \end_inset . - The code notation for + The code notation for that implementation of \begin_inset listings inline true status open @@ -1937,14 +1937,14 @@ We can formulate this property more generally: Liftings should preserve \end_inset . - This is written as: + So, we must have: \begin_inset listings inline false status open \begin_layout Plain Layout -c.map(f).map(g) == c.map(f andThen g) == c.map(x => g(f(x))) +c.map(f).map(g) == c.map(f andThen g) \end_layout \end_inset @@ -1952,45 +1952,70 @@ c.map(f).map(g) == c.map(f andThen g) == c.map(x => g(f(x))) \begin_inset Formula \[ -c^{:F^{A}}\triangleright\text{fmap}\,(f)\triangleright\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f)\bef\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f\bef g)\quad. +c^{:F^{A}}\triangleright\text{fmap}\,(f)\triangleright\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f\bef g)\quad. \] \end_inset -This equation has the form -\begin_inset Formula $c\triangleright p=c\triangleright q$ +The pipe notation +\begin_inset Index idx +status open + +\begin_layout Plain Layout +pipe notation +\end_layout + \end_inset - with some functions -\begin_inset Formula $p$ + allows us to write: +\begin_inset Formula +\[ +c\triangleright\text{fmap}\,(f)\triangleright\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f)\bef\text{fmap}\,(g)\quad. +\] + \end_inset - and -\begin_inset Formula $q$ +Then we can express the property of +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +map +\end_layout + \end_inset -, or equivalently -\begin_inset Formula $p(c)=q(c)$ + as: +\begin_inset Formula +\[ +c\triangleright\text{fmap}\,(f)\bef\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f\bef g)\quad. +\] + \end_inset -. - When we have -\begin_inset Formula $p(c)=q(c)$ +When +\begin_inset Formula $c\triangleright p=c\triangleright q$ \end_inset for all \begin_inset Formula $c$ \end_inset -, it means +, where +\begin_inset Formula $p$ +\end_inset + + and +\begin_inset Formula $q$ +\end_inset + + are some functions, it means \emph on the functions themselves \emph default - are equal: -\begin_inset Formula $p=q$ -\end_inset - -. + are equal, \begin_inset Index idx status open @@ -2000,11 +2025,20 @@ equality between functions \end_inset - So, we may omit the argument + and we may simply write +\begin_inset Formula $p=q$ +\end_inset + + instead of +\begin_inset Formula $c\triangleright p=c\triangleright q$ +\end_inset + +. + So, we omit the argument \begin_inset Formula $c$ \end_inset - and rewrite the equation in a shorter form: + in the last equation and rewrite it in a shorter form: \begin_inset Formula \[ \text{fmap}\,(f)\bef\text{fmap}\,(g)=\text{fmap}\,(f\bef g)\quad. @@ -2012,7 +2046,7 @@ equality between functions \end_inset -This equation is called the +This is called the \series bold composition law \series default @@ -2072,60 +2106,59 @@ noprefix "false" . For clarity and practice, we will perform the derivations both in the code notation and in the Scala syntax. - To evaluate -\begin_inset Formula $\text{fmap}\,(f\bef g$ -\end_inset - -), we apply -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -fmap(f andThen g) \end_layout +\begin_layout Standard +The composition law must hold for arbitrary functions +\begin_inset Formula $f^{:A\rightarrow B}$ \end_inset -, where + and +\begin_inset Formula $g^{:B\rightarrow C}$ +\end_inset + +. + Then both sides of the composition law are functions of type \begin_inset listings inline true status open \begin_layout Plain Layout -f: A => B +Option[A] => Option[C] \end_layout \end_inset - and +. + To show that both sides are equal, we apply both sides to an arbitrary + value \begin_inset listings inline true status open \begin_layout Plain Layout -g: B => C +p: Option[A] \end_layout \end_inset - are arbitrary functions, to an arbitrary value + and show that the results are equal. + Begin with the right-hand side, which is \begin_inset listings inline true status open \begin_layout Plain Layout -oa:Option[A] +fmap(f andThen g) \end_layout \end_inset -. - In Scala code, it is convenient to use the method + in Scala. + It is convenient to use the method \begin_inset listings inline true status open @@ -2144,7 +2177,7 @@ status open \begin_layout Plain Layout -oa.map(f) +p.map(f) \end_layout \end_inset @@ -2156,7 +2189,7 @@ status open \begin_layout Plain Layout -fmap(f)(oa) +fmap(f)(p) \end_layout \end_inset @@ -2168,7 +2201,7 @@ status open \begin_layout Plain Layout -fmap(f andThen g)(oa) == oa.map(f andThen g) == oa match { +fmap(f andThen g)(p) == p.map(f andThen g) == p match { \end_layout \begin_layout Plain Layout @@ -2207,7 +2240,7 @@ status open \begin_layout Plain Layout -oa.map(f andThen g) == oa match { +p.map(f andThen g) == p match { \end_layout \begin_layout Plain Layout @@ -2231,18 +2264,26 @@ oa.map(f andThen g) == oa match { \end_layout \begin_layout Standard -Now we consider the left-hand side of the law, -\begin_inset Formula $\text{fmap}\,(f)\bef\text{fmap}\,(g)$ +Now apply the left-hand side of the law to +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +p +\end_layout + \end_inset -, and write the Scala expressions: +: \begin_inset listings inline false status open \begin_layout Plain Layout -oa.map(f).map(g) == (oa match { +p.map(f).map(g) == (p match { \end_layout \begin_layout Plain Layout @@ -2257,7 +2298,7 @@ oa.map(f).map(g) == (oa match { \begin_layout Plain Layout -}).map(g) == (oa match { +}).map(g) == (p match { \end_layout \begin_layout Plain Layout @@ -2287,7 +2328,7 @@ oa.map(f).map(g) == (oa match { \begin_layout Plain Layout -} == oa match { +} == p match { \end_layout \begin_layout Plain Layout @@ -2490,7 +2531,7 @@ None , our ordinary intuitions about data transformations would become incorrect. In other words, violations of the composition law prevent us from understanding - the code via mathematical reasoning about transformation of data values. + the code via reasoning about transformation of data values. \end_layout \begin_layout Standard @@ -2592,7 +2633,7 @@ data wrapper \begin_inset Quotes erd \end_inset - from any other features of a data type, we obtain: + from other features of a data type, we obtain: \end_layout \begin_layout Itemize @@ -2609,7 +2650,7 @@ L[A] \end_inset . - We will use the notation + We will sometimes use the notation \begin_inset Formula $L^{\bullet}$ \end_inset @@ -2625,7 +2666,11 @@ L[_] \end_inset -) for the type constructor itself. +) in order to emphasize that +\begin_inset Formula $L$ +\end_inset + + is a type constructor. \end_layout \begin_layout Itemize @@ -2651,7 +2696,7 @@ fmap \end_inset - with this type signature: + with the type signature: \begin_inset Formula \[ \text{fmap}_{L}:\left(A\rightarrow B\right)\rightarrow L^{A}\rightarrow L^{B}\quad. @@ -2692,7 +2737,7 @@ fmap \begin_layout Standard A type constructor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset with these properties is called a @@ -2775,7 +2820,8 @@ There are two paths from \begin_inset Formula $L^{C}$ \end_inset -; by Eq. +. + By Eq. \begin_inset space ~ \end_inset @@ -2790,11 +2836,19 @@ noprefix "false" \end_inset ), both paths must give the same result. - Mathematicians call such diagrams + Mathematicians say that the diagram is +\begin_inset Quotes eld +\end_inset + + \series bold commutative \series default +\begin_inset Quotes erd +\end_inset + + \begin_inset Index idx status open @@ -2869,7 +2923,7 @@ fmap \end_inset - and can be defined through + and can be expressed through \begin_inset listings inline true status open @@ -2967,7 +3021,7 @@ map \begin_inset Formula $\text{fmap}_{L}$ \end_inset -, in order to indicate clearly the type constructor those functions work + in order to indicate clearly the type constructor those functions work with. \end_layout @@ -3234,14 +3288,14 @@ V \end_inset . - The functor's + The \begin_inset listings inline true status open \begin_layout Plain Layout -fmap +map \end_layout \end_inset @@ -3311,80 +3365,80 @@ final case class Counted[A](n: Int, a: A) \end_inset -We may implement +The data type \begin_inset listings inline true status open \begin_layout Plain Layout -map +Counted[A] \end_layout \end_inset - for + may be used to describe \begin_inset listings inline true status open \begin_layout Plain Layout -Counted[A] +n \end_layout \end_inset - as a function: + repetitions of a given value \begin_inset listings -inline false +inline true status open \begin_layout Plain Layout -def map[A, B](c: Counted[A])(f: A => B): Counted[B] = c match { +a: A \end_layout -\begin_layout Plain Layout +\end_inset - case Counted(n, a) => Counted(n, f(a)) -\end_layout +. + We may implement +\begin_inset listings +inline true +status open \begin_layout Plain Layout -} +map \end_layout \end_inset -But it is often more convenient to implement + for \begin_inset listings inline true status open \begin_layout Plain Layout -map +Counted[A] \end_layout \end_inset - as a class method: -\end_layout - -\begin_layout Standard + as a function: \begin_inset listings inline false status open \begin_layout Plain Layout -final case class Counted[A](n: Int, a: A) { +def map[A, B](c: Counted[A])(f: A => B): Counted[B] = c match { \end_layout \begin_layout Plain Layout - def map[B](f: A => B): Counted[B] = Counted(n, f(a)) + case Counted(n, a) => Counted(n, f(a)) \end_layout \begin_layout Plain Layout @@ -3394,49 +3448,39 @@ final case class Counted[A](n: Int, a: A) { \end_inset - -\begin_inset Note Note -status open - -\begin_layout Plain Layout -The data type +But it is often more convenient to implement \begin_inset listings inline true status open \begin_layout Plain Layout -Counted[A] +map \end_layout \end_inset - may be used to describe + as a class method: +\end_layout + +\begin_layout Standard \begin_inset listings -inline true +inline false status open \begin_layout Plain Layout -n +final case class Counted[A](n: Int, a: A) { \end_layout -\end_inset - - repetitions of a given value -\begin_inset listings -inline true -status open - \begin_layout Plain Layout -a: A + def map[B](f: A => B): Counted[B] = Counted(n, f(a)) \end_layout -\end_inset +\begin_layout Plain Layout -. - +} \end_layout \end_inset @@ -3465,7 +3509,8 @@ map \end_inset -, which can be used like this: +. + Here is a usage example: \begin_inset listings inline false status open @@ -3529,13 +3574,26 @@ status open \begin_layout Plain Layout -Counted[_] +Counted \end_layout \end_inset is a polynomial type constructor. - The existence of a + Let us verify that +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Counted +\end_layout + +\end_inset + + is a functor. + As we already have a \begin_inset listings inline true status open @@ -3547,20 +3605,19 @@ map \end_inset - method suggests that + method for \begin_inset listings inline true status open \begin_layout Plain Layout -Counted[_] +Counted \end_layout \end_inset - is a functor. - Let us now verify that the functor laws hold for it. +, it remains to check that the functor laws hold. \end_layout \begin_layout Subsubsection @@ -3648,7 +3705,7 @@ status open \begin_layout Plain Layout -n:Int +n: Int \end_layout \end_inset @@ -3853,7 +3910,7 @@ fmap method: \begin_inset Formula \[ -\text{fmap}_{\text{Counted}}(f^{:A\rightarrow B})\triangleq\big(n^{:\text{Int}}\times a^{:A}\rightarrow n\times f(a)\big)\quad. +\text{fmap}_{\text{Counted}}(f^{:A\rightarrow B})\triangleq n^{:\text{Int}}\times a^{:A}\rightarrow n\times f(a)\quad. \] \end_inset @@ -3862,7 +3919,7 @@ To verify the identity law, we write: \begin_inset Formula \begin{align*} \text{expect to equal }\text{id}:\quad & \text{fmap}_{\text{Counted}}(\text{id})\\ -\text{definition of }\text{fmap}_{\text{Counted}}:\quad & =\big(n\times a\rightarrow n\times\gunderline{\text{id}\,(a)}\big)\\ +\text{definition of }\text{fmap}_{\text{Counted}}:\quad & =n\times a\rightarrow n\times\gunderline{\text{id}\,(a)}\\ \text{definition of }\text{id}:\quad & =\left(n\times a\rightarrow n\times a\right)=\text{id}\quad. \end{align*} @@ -3890,7 +3947,7 @@ To verify the composition law (for brevity, denote \text{expect to equal }\text{fmap}(f\bef g):\quad & \text{fmap}\,(f)\bef\text{fmap}\,(g)\\ \text{definition of }\text{fmap}:\quad & =\left(n\times a\rightarrow n\times f(a)\right)\bef\left(n\times b\rightarrow n\times g(b)\right)\\ \text{compute composition}:\quad & =n\times a\rightarrow n\times\gunderline{g(f(a))}\\ -\text{definition of }\left(f\bef g\right):\quad & =\left(n\times a\rightarrow n\times(f\bef g)(a)\right)=\text{fmap}\,(f\bef g)\quad. +\text{definition of }\left(f\bef g\right):\quad & =n\times a\rightarrow n\times(f\bef g)(a)=\text{fmap}\,(f\bef g)\quad. \end{align*} \end_inset @@ -4047,8 +4104,19 @@ Counter(n, a) != map_bad(Counter(n, a))(identity) == Counter(n + 1, a) \end_inset -The failure of functor laws leads to surprising behavior because a code - refactoring changes the result: +The failure of functor laws means that code involving +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +map +\end_layout + +\end_inset + + cannot be refactored in usual ways: \begin_inset listings inline false status open @@ -4290,7 +4358,7 @@ fmap \end_inset - shown first. + shown above. \end_layout \begin_layout Standard @@ -4301,7 +4369,7 @@ status open \begin_layout Plain Layout -Vec3[_] +Vec3 \end_layout \end_inset @@ -4565,8 +4633,9 @@ fmap , we will prefer shorter and more straightforward code even if it is not tail-recursive. - Once the laws are proved for that code, the programmer may use an equivalent - but more efficient and stack-safe implementation of the same function. + Once the laws are proved for that code, the programmer may look for an + equivalent but more efficient and stack-safe implementation of the same + function. \end_layout \begin_layout Standard @@ -5108,7 +5177,7 @@ fmap \end_inset . - So, the inductive assumption says that the law already holds for any recursive + The inductive assumption says that the law already holds for any recursive calls of the function \begin_inset listings inline true @@ -5122,7 +5191,8 @@ fmap \end_inset . - We will see this pattern in all proofs of laws for recursive functions. + We will use such inductive assumptions in all proofs of laws for recursive + functions. \end_layout \begin_layout Standard @@ -5462,7 +5532,11 @@ A diagonal code matrix whose elements are identity functions will never change any values in any of the disjunctive parts. So, that matrix is equal to the identity function applied to the entire disjunctive type. - This concludes the proof of the identity law. + +\end_layout + +\begin_layout Standard +This concludes the proof of the identity law. \end_layout \begin_layout Standard @@ -5487,14 +5561,14 @@ To verify the composition law, we write (omitting types for brevity): \end_inset By the inductive assumption, the law already holds for recursive calls, - which we denoted as + which we denoted by \begin_inset Formula $\overline{\text{fmap}}$ \end_inset : \begin_inset Formula \[ -\overline{\text{fmap}}\,(g)\big(\overline{\text{fmap}}\,(f)(t)\big)=t\triangleright\overline{\text{fmap}}\,(f)\bef\overline{\text{fmap}}\,(g)=t\triangleright\overline{\text{fmap}}\,(f\bef g)\quad. +\overline{\text{fmap}}\,(g)\big(\overline{\text{fmap}}\,(f)(t)\big)=t\triangleright\overline{\text{fmap}}\,(f)\bef\overline{\text{fmap}}\,(g)\overset{!}{=}t\triangleright\overline{\text{fmap}}\,(f\bef g)\quad. \] \end_inset @@ -5540,7 +5614,7 @@ Define a list of odd \emph default length as a recursive type constructor -\begin_inset Formula $\text{LO}^{\bullet}$ +\begin_inset Formula $\text{LO}$ \end_inset : @@ -5579,7 +5653,7 @@ status open \begin_layout Plain Layout -LO[_] +LO \end_layout \end_inset @@ -5698,7 +5772,7 @@ LO2 \begin_layout Standard The type constructor -\begin_inset Formula $\text{LO}^{\bullet}$ +\begin_inset Formula $\text{LO}$ \end_inset is a @@ -5768,12 +5842,12 @@ status open \begin_layout Plain Layout -LO[_] +LO \end_layout \end_inset - (as well as other list-like and tree-like data types) are lawful functors. + and other list-like and tree-like data types are lawful functors. \end_layout \begin_layout Subsection @@ -5910,17 +5984,17 @@ scala> val result = for { \begin_layout Plain Layout - x = i * i // define `x` by computing i * i... + x = i * i // define `x` by computing i * i... \end_layout \begin_layout Plain Layout - product = s"$name * $name" // define `product`... + product = s"$name * $name" // define `product`... \end_layout \begin_layout Plain Layout -} yield s"$product is $x" // and add this to the `result` sequence. +} yield s"$product is $x" // and add this to the `result` sequence. \end_layout \begin_layout Plain Layout @@ -6191,11 +6265,6 @@ val result: L[B] = for { z = g(x, y) // Another computation, uses x and y. \end_layout -\begin_layout Plain Layout - - ... -\end_layout - \begin_layout Plain Layout } yield q(x, y, z) // The `yield` may use any defined variables. @@ -6248,11 +6317,6 @@ val result: L[B] = p y). \end_layout -\begin_layout Plain Layout - - ... -\end_layout - \begin_layout Plain Layout .map { case (x, y, z) => q(x, y, z) } // Here we can use x, y, and z. @@ -6361,7 +6425,7 @@ yield \end_inset - syntax is that, at first sight, functor blocks (such as this code: + syntax is that, at first sight, this code: \begin_inset listings inline false status open @@ -6374,7 +6438,7 @@ for { x <- p; ... \end_inset -appear to compute the value +appears to compute the value \begin_inset listings inline true status open @@ -6416,16 +6480,40 @@ p \emph on sequence \emph default -. - In general, the result of a functor block is a -\begin_inset Quotes eld -\end_inset +; that will be a sequence of values of the form +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +expr(x) +\end_layout -wrapped -\begin_inset Quotes erd \end_inset - value, where the type of the + for various +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +x +\end_layout + +\end_inset + +. + In general, the result of a functor block is a +\begin_inset Quotes eld +\end_inset + +wrapped +\begin_inset Quotes erd +\end_inset + + value, where the type of the \begin_inset Quotes eld \end_inset @@ -6517,20 +6605,8 @@ yield \end_layout \begin_layout Standard -For instance, the first line of the following functor block contains an - -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -Option -\end_layout - -\end_inset - - value, +As another example, the first line of the following functor block contains + the value \begin_inset listings inline true status open @@ -6542,7 +6618,7 @@ Some(123) \end_inset -, as the + as the \begin_inset Quotes eld \end_inset @@ -6559,7 +6635,7 @@ status open \begin_layout Plain Layout -Option +Option[...] \end_layout \end_inset @@ -6658,7 +6734,7 @@ Option[String] \end_inset . - Note that the expression after the + The expression after the \begin_inset Quotes eld \end_inset @@ -6703,7 +6779,7 @@ def \end_inset -s, and/or other +s, and other \begin_inset listings inline true status open @@ -6791,7 +6867,7 @@ status open \begin_layout Plain Layout -QueryResult[_] +QueryResult \end_layout \end_inset @@ -7015,7 +7091,7 @@ status open \begin_layout Plain Layout -LO[_] +LO[...] \end_layout \end_inset @@ -7068,19 +7144,19 @@ Functor blocks and functor laws \begin_layout Standard There is an important connection between the functor laws and the properties of code in functor blocks. - Consider the following code: + Consider this code: \begin_inset listings inline false status open \begin_layout Plain Layout -def f(x: Int) = x * x // Some computations. +def f(x: Int) = x * x \end_layout \begin_layout Plain Layout -def g(x: Int) = x - 1 // More computations. +def g(x: Int) = x - 1 \end_layout \begin_layout Plain Layout @@ -7104,7 +7180,7 @@ scala> for { \begin_layout Plain Layout - z = f(y) // Perform computations. + z = f(y) \end_layout \begin_layout Plain Layout @@ -7160,7 +7236,7 @@ scala> for { \begin_layout Plain Layout - z = f(x) // Perform computations. + z = f(x) \end_layout \begin_layout Plain Layout @@ -7175,8 +7251,7 @@ res1: List[Int] = List(99, 399, 899) \end_inset -Another example of refactoring that appears reasonable is to combine transformat -ions: +Another example of a reasonable refactoring is to combine transformations: \begin_inset listings inline false status open @@ -7198,7 +7273,7 @@ scala> for { \begin_layout Plain Layout - z = f(y) // Perform computations. + z = f(y) \end_layout \begin_layout Plain Layout @@ -8228,7 +8303,7 @@ status open \end_inset . - This replacement is justified as long as the + This replacement is justified if the \begin_inset listings inline true status open @@ -8395,7 +8470,7 @@ map[A, B] \end_inset - function satisfies the laws for most types + function satisfies the laws for some types \begin_inset listings inline true status open @@ -8419,7 +8494,31 @@ B \end_inset -, but violates the laws for certain specially chosen types. + but violates the laws for other +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +B +\end_layout + +\end_inset + +. \end_layout \begin_layout Standard @@ -8436,7 +8535,7 @@ map \begin_layout Standard Consider the type constructor -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset defined by: @@ -8514,21 +8613,8 @@ fully parametric!function \end_inset -fully parametric function needs to treat all types as type parameters, including - the primitive type -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -Int -\end_layout - -\end_inset - -. - So, the code +fully parametric function needs to treat all types as type parameters. + The code: \begin_inset listings inline false status open @@ -8615,7 +8701,7 @@ noprefix "false" \end_inset - that this type signature is not implementable. + that this type signature cannot be implemented by fully parametric code. So, the type constructor \begin_inset Formula $H$ \end_inset @@ -8667,7 +8753,7 @@ GADT|textit ). In this book we call a GADT a type constructor whose definition sets type - parameters to certain specific types. + parameters to specific types. This makes it impossible to implement \begin_inset listings inline true @@ -8833,7 +8919,7 @@ status open \begin_layout Plain Layout -ServerAction[A] +ServerAction \end_layout \end_inset @@ -9008,7 +9094,7 @@ ServerAction[R] \end_inset - has only the two parts with the specified types. + has only two parts with the specified types. So, we cannot create a value of type \begin_inset listings inline true @@ -9081,7 +9167,7 @@ status open \begin_layout Plain Layout -ServerAction[R] +ServerAction \end_layout \end_inset @@ -9094,7 +9180,7 @@ status open \begin_layout Plain Layout -ServerAction[_] +ServerAction \end_layout \end_inset @@ -9117,21 +9203,7 @@ map \end_inset - does not lead to problems (see Chapter -\begin_inset space ~ -\end_inset - - -\begin_inset CommandInset ref -LatexCommand ref -reference "chap:Free-type-constructions" -plural "false" -caps "false" -noprefix "false" - -\end_inset - -). + does not lead to problems. \end_layout \begin_layout Paragraph @@ -9260,15 +9332,23 @@ The law must hold for arbitrary functions \begin_inset Formula $q(a)$ \end_inset - and thus is not equal to the original function + and is not equal to the original function \begin_inset Formula $q$ \end_inset . So, the result of evaluating the expression +\begin_inset Quotes eld +\end_inset + + \begin_inset Formula $\text{map}(q\times a)(\text{id})$ \end_inset + +\begin_inset Quotes erd +\end_inset + is not always equal to the original value \begin_inset Formula $q\times a$ \end_inset @@ -9290,8 +9370,8 @@ map \end_inset - function is the only available implementation of the required type signature, - we conclude that + function is the only available fully parametric implementation of the required + type signature, we conclude that \begin_inset Formula $Q$ \end_inset @@ -9380,8 +9460,7 @@ status open \end_inset -; we could say that it fails to preserve information about the order of - those values. +; it fails to preserve information about the order of those values. The functor identity law does not hold: \begin_inset Formula \begin{align*} @@ -9445,11 +9524,11 @@ map that reorders some parts of a tuple and duplicates other parts. The correct implementation preserves the order of parts in a tuple and - does not duplicate or omit any parts. + neither duplicates nor omits any data. \end_layout \begin_layout Standard -Another case of an an incorrect implementation is the following +Another case of an incorrect implementation is this \begin_inset listings inline true status open @@ -9468,7 +9547,7 @@ status open \begin_layout Plain Layout -Option[_] +Option \end_layout \end_inset @@ -9506,7 +9585,7 @@ status open \begin_layout Plain Layout -Option[_] +Option \end_layout \end_inset @@ -9546,7 +9625,7 @@ status open \begin_layout Plain Layout -List[_] +List \end_layout \end_inset @@ -9583,7 +9662,7 @@ status open \begin_layout Plain Layout -List[_] +List \end_layout \end_inset @@ -9716,8 +9795,8 @@ def equalTypes[A: TypeTag, B: TypeTag]: Boolean = getType[A] =:= getType[B] \begin_layout Plain Layout -def fmap_bad[A: TypeTag, B: TypeTag](f: A => B)(oa: Option[A]): Option[B] - = oa match { +def fmap_bad[A: TypeTag, B: TypeTag](f: A => B): Option[A] => Option[B] + = { \end_layout \begin_layout Plain Layout @@ -9818,6 +9897,47 @@ A = B \end_inset +. + To see that, we use functions +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +(_ + +\begin_inset Quotes eld +\end_inset + + a +\begin_inset Quotes erd +\end_inset + +) +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +(_ + +\begin_inset Quotes eld +\end_inset + + b +\begin_inset Quotes erd +\end_inset + +) +\end_layout + +\end_inset + : \begin_inset listings inline false @@ -9885,24 +10005,33 @@ map \end_inset - function that would obey the laws. - It is not precise to say that, e.g., the type constructor + function that obeys the laws. + When we say that, e.g., the type constructor \begin_inset listings inline true status open \begin_layout Plain Layout -Vec3[_] +Vec3 \end_layout \end_inset - is -\emph on -by itself -\emph default - a functor: being a functor depends on having a lawful + +\begin_inset Quotes eld +\end_inset + +is a functor +\begin_inset Quotes erd +\end_inset + +, we will mean +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +: being a functor depends on having a lawful \begin_inset listings inline true status open @@ -9915,14 +10044,14 @@ map \end_inset function. - Keeping that in mind, we will say that the type constructor + Keeping that in mind, we will still say that the type constructor \begin_inset listings inline true status open \begin_layout Plain Layout -Vec3[_] +Vec3 \end_layout \end_inset @@ -9935,7 +10064,12 @@ is \begin_inset Quotes erd \end_inset - a functor, meaning that a suitable lawful implementation of + a functor, meaning +\end_layout + +\end_inset + + that a lawful implementation of \begin_inset listings inline true status open @@ -10488,8 +10622,12 @@ status open \begin_layout Plain Layout -val f: OnlyA[Int, Int] => Int = { case OnlyA(Left(a)) => a; case OnlyA(Right(a)) - => a } +val f: OnlyA[Int, Int] => Int = { case OnlyA(Left(a)) => a +\end_layout + +\begin_layout Plain Layout + + case OnlyA(Right(a)) => a } \end_layout \begin_layout Plain Layout @@ -10680,23 +10818,51 @@ res7: Set[IgnoreB[Int,Int]] = Set(IgnoreB(0,0)) \end_layout \begin_layout Standard -The functor laws for a type constructor -\begin_inset Formula $L^{\bullet}$ +The functor laws +\begin_inset space ~ \end_inset - do not require that the types -\begin_inset Formula $A,B$ +( +\begin_inset CommandInset ref +LatexCommand ref +reference "eq:f-identity-law-functor-fmap" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +)–( +\begin_inset CommandInset ref +LatexCommand ref +reference "eq:f-composition-law-functor-fmap" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +) for a type constructor +\begin_inset Formula $L$ \end_inset - used in the function: + are imposed on the function: \begin_inset Formula \[ -\text{fmap}_{L}:\left(A\rightarrow B\right)\rightarrow L^{A}\rightarrow L^{B} +\text{fmap}_{L}:\left(A\rightarrow B\right)\rightarrow L^{A}\rightarrow L^{B}\quad. \] \end_inset -should have a mathematically lawful definition of the +The functor laws do not require that the types +\begin_inset Formula $A$ +\end_inset + + and +\begin_inset Formula $B$ +\end_inset + + should have an \begin_inset listings inline true status open @@ -10708,8 +10874,13 @@ equals \end_inset - method (or of any other operation). - The + method (or any other operation). + +\begin_inset Note Note +status open + +\begin_layout Plain Layout +The \begin_inset listings inline true status open @@ -10722,67 +10893,27 @@ map \end_inset method of a functor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset must be \series bold lawful \series default - -\begin_inset Index idx -status open - -\begin_layout Plain Layout -lawful functor -\end_layout - +, i.e., must satisfy the functor laws for all types +\begin_inset Formula $A,B$ \end_inset - -\begin_inset Index idx -status open - -\begin_layout Plain Layout -functor!laws of +. \end_layout \end_inset -, i.e., must satisfy the functor laws -\begin_inset space ~ -\end_inset - -( -\begin_inset CommandInset ref -LatexCommand ref -reference "eq:f-identity-law-functor-fmap" -plural "false" -caps "false" -noprefix "false" - -\end_inset - -)–( -\begin_inset CommandInset ref -LatexCommand ref -reference "eq:f-composition-law-functor-fmap" -plural "false" -caps "false" -noprefix "false" - -\end_inset - -) for all types -\begin_inset Formula $A,B$ -\end_inset - -. The functor laws must hold even if a type \begin_inset Formula $A$ \end_inset -'s implementation of some operations violate some other laws. +'s implementations of some other operations violate some other laws. For this reason, \begin_inset listings inline true @@ -10790,7 +10921,7 @@ status open \begin_layout Plain Layout -Set[_] +Set \end_layout \end_inset @@ -10937,7 +11068,7 @@ K \end_inset - that have lawful + that have a lawful \begin_inset listings inline true status open @@ -10949,7 +11080,19 @@ equals \end_inset - operations, and the functor laws will hold. + operation, and the functor laws will hold for those +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +K +\end_layout + +\end_inset + +. \end_layout \begin_layout Subsection @@ -10979,7 +11122,7 @@ noprefix "false" \end_inset , the type constructor -\begin_inset Formula $H^{\bullet}$ +\begin_inset Formula $H$ \end_inset defined by @@ -11007,6 +11150,11 @@ map \end_inset + +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout To see why, begin writing the code with a typed hole: \begin_inset Formula \[ @@ -11049,7 +11197,7 @@ Int \end_inset . - However, it would be possible to apply a function of type + However, it is possible to apply a function of type \begin_inset Formula $B\rightarrow A$ \end_inset @@ -11062,7 +11210,12 @@ Int \end_inset . - So, we can implement a function called + So, we can +\end_layout + +\end_inset + +However, it is possible to implement a function called \begin_inset listings inline true status open @@ -11074,7 +11227,7 @@ contramap \end_inset - with a different type signature where the function type is + with a different type signature ,where the function type is \begin_inset Formula $B\rightarrow A$ \end_inset @@ -11137,8 +11290,8 @@ cmap as: \begin_inset Formula \begin{align} -\text{cmap}^{A,B} & :\left(B\rightarrow A\right)\rightarrow H^{A}\rightarrow H^{B}\quad,\nonumber \\ -\text{cmap} & \triangleq f^{:B\rightarrow A}\rightarrow h^{:A\rightarrow\text{Int}}\rightarrow\left(f\bef h\right)^{:B\rightarrow\text{Int}}\quad.\label{eq:f-example-1-contrafmap} + & \text{cmap}^{A,B}:\left(B\rightarrow A\right)\rightarrow H^{A}\rightarrow H^{B}\quad,\nonumber \\ + & \text{cmap}\triangleq f^{:B\rightarrow A}\rightarrow h^{:A\rightarrow\text{Int}}\rightarrow\left(f\bef h\right)^{:B\rightarrow\text{Int}}\quad.\label{eq:f-example-1-contrafmap} \end{align} \end_inset @@ -11311,7 +11464,7 @@ cmap \end_inset - is called a + satisfying the identity law and the composition law is called a \series bold contrafunctor \series default @@ -11325,7 +11478,16 @@ contrafunctor \end_inset - if the identity and the composition laws are satisfied. + (short for +\begin_inset Quotes eld +\end_inset + +contravariant functor +\begin_inset Quotes erd +\end_inset + +). + \end_layout \begin_layout Subsubsection @@ -11444,7 +11606,7 @@ Int . To avoid information loss, we need to preserve the order of the curried arguments. - So, the resulting expression is: + The resulting expression is: \begin_inset Formula \[ \text{contramap}^{A,B}\triangleq d^{:A\rightarrow A\rightarrow\text{Int}}\rightarrow f^{:B\rightarrow A}\rightarrow b_{1}^{:B}\rightarrow b_{2}^{:B}\rightarrow d\left(f(b_{1})\right)\left(f(b_{2})\right)\quad. @@ -11465,7 +11627,7 @@ def contramap[A, B](d: A => A => Int)(f: B => A): B => B => Int = { b1 => \end_inset -To verify the laws, it is easier to use the equivalent +To verify the laws, it is easier to use the equivalent function \begin_inset listings inline true status open @@ -11489,8 +11651,8 @@ To verify the identity law: \begin_inset Formula \begin{align*} \text{expect to equal }\text{id}:\quad & \text{cmap}\left(\text{id}\right)\\ -\text{use Eq.~(\ref{eq:f-example-2-contrafmap})}:\quad & =d\rightarrow b_{1}\rightarrow b_{2}\rightarrow d\gunderline{\left(\text{id}\,(b_{1})\right)}\gunderline{\left(\text{id}\,(b_{2})\right)}\\ -\text{definition of }\text{id}:\quad & =d\rightarrow\gunderline{b_{1}\rightarrow b_{2}\rightarrow d(b_{1})(b_{2})}\\ +\text{use Eq.~(\ref{eq:f-example-2-contrafmap})}:\quad & =d\rightarrow b_{1}\rightarrow b_{2}\rightarrow d\,\gunderline{\left(\text{id}\,(b_{1})\right)}\gunderline{\left(\text{id}\,(b_{2})\right)}\\ +\text{definition of }\text{id}:\quad & =d\rightarrow\gunderline{b_{1}\rightarrow b_{2}\rightarrow d\left(b_{1}\right)\left(b_{2}\right)}\\ \text{simplify curried function}:\quad & =\left(d\rightarrow d\right)=\text{id}\quad. \end{align*} @@ -11659,7 +11821,7 @@ contramap \end_inset for -\begin_inset Formula $N^{\bullet}$ +\begin_inset Formula $N$ \end_inset . @@ -11718,7 +11880,7 @@ status open \begin_layout Plain Layout -ServerAction[_] +ServerAction \end_layout \end_inset @@ -11745,7 +11907,7 @@ status open \begin_layout Plain Layout -ServerAction[_] +ServerAction \end_layout \end_inset @@ -11769,7 +11931,7 @@ Ordinarily, applying a function of type \begin_inset Formula $Q\rightarrow R$ \end_inset - to a value of type + to a value of a different type \begin_inset Formula $P$ \end_inset @@ -11790,7 +11952,7 @@ val p: P = ??? \begin_layout Plain Layout -h(p) // Type error: need type Q but found P. +h(p) // Type error: expected type Q but found P. \end_layout \end_inset @@ -11840,7 +12002,7 @@ Programming languages that support subtyping allow us to use a value of \end_layout \begin_layout Standard -We may imagine that the language's compiler can automatically convert values +We may imagine that the language's compiler automatically converts values of type \begin_inset Formula $P$ \end_inset @@ -11849,7 +12011,7 @@ We may imagine that the language's compiler can automatically convert values \begin_inset Formula $Q$ \end_inset - using a fixed, designated + using a fixed, compiler-provided \begin_inset Quotes eld \end_inset @@ -11857,11 +12019,11 @@ conversion function \begin_inset Quotes erd \end_inset - (of type + of type \begin_inset Formula $P\rightarrow Q$ \end_inset -) that is somehow already available. +. It is convenient to \emph on define @@ -12230,44 +12392,41 @@ This is written in the code notation as: \end_inset -The code +Compare the code \begin_inset Formula $\text{p2q}\,(f)=t^{:\text{Two}}\rightarrow f(t)$ \end_inset - is almost the same as -\begin_inset Formula $\text{p2q}\,(f)=f$ -\end_inset - -, except that -\begin_inset Formula $f$ + with the code of an identity function, +\begin_inset Formula $\text{id}\left(f\right)=f=t\rightarrow f(t)$ \end_inset - is applied to values of type +. + We see that the code of \begin_inset listings inline true status open \begin_layout Plain Layout -Two +p2q \end_layout \end_inset -. - So, the code of + an identity function that just reassigns the type of its argument to be + \begin_inset listings inline true status open \begin_layout Plain Layout -p2q +Two \end_layout \end_inset - is the same as the code of an identity function. +. \end_layout \begin_layout Standard @@ -12277,7 +12436,7 @@ In these cases, it is easy for the compiler to insert the appropriate conversion \begin_inset Formula $Q$ \end_inset - will then automatically apply to arguments of type + will be then automatically compatible with arguments of type \begin_inset Formula $P$ \end_inset @@ -12312,13 +12471,19 @@ extends another class. Second, one may declare type parameters with a -\begin_inset Quotes eld -\end_inset +\begin_inset Index idx +status open +\begin_layout Plain Layout variance annotation -\begin_inset Quotes erd +\end_layout + \end_inset + +\series bold +variance annotation +\series default such as \begin_inset listings inline true @@ -12345,11 +12510,17 @@ L[-B] . Third, one may declare type parameters with a -\begin_inset Quotes eld -\end_inset +\series bold +subtyping annotation +\series default +\begin_inset Index idx +status open + +\begin_layout Plain Layout subtyping annotation -\begin_inset Quotes erd +\end_layout + \end_inset ( @@ -12570,7 +12741,7 @@ every \emph on no \emph default - values, so a conversion function + values, so the conversion function \begin_inset listings inline true status open @@ -12600,7 +12771,7 @@ noprefix "false" \end_inset -) can never be actually applied. +) exists but never actually needs to be applied. The Scala compiler recognizes this automatically. \end_layout @@ -12907,19 +13078,19 @@ F[A] is covariant or contravariant, but the features of subtype-co(ntra)variance are not activated automatically. - For that, the programmer needs to use a + To do that, the programmer needs to use a \begin_inset Index idx status open \begin_layout Plain Layout -subtyping variance annotation +variance annotation \end_layout \end_inset \series bold -subtyping variance annotation +variance annotation \series default , which looks like \begin_inset listings @@ -12933,7 +13104,7 @@ F[+A] \end_inset -, on the relevant type parameters. +. \end_layout \begin_layout Standard @@ -12944,7 +13115,7 @@ status open \begin_layout Plain Layout -Counted[A] +Counted \end_layout \end_inset @@ -12975,19 +13146,7 @@ map \end_inset - method), and so it is covariant in its type parameter -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -A -\end_layout - -\end_inset - -. + method), and so it is covariant. If we write the variance annotation \begin_inset listings inline true @@ -13037,7 +13196,7 @@ Counted[Two] \end_inset - will be considered a subtype of + will be automatically recognized a subtype of \begin_inset listings inline true status open @@ -13149,12 +13308,9 @@ F[-A] \end_layout \begin_layout Standard -It is important that the covariance or contravariance of a type constructor - is determined by its type structure alone, and -\emph on -not -\emph default - by its subtyping properties. +It is important that the covariance/contravariance properties of a type + constructor are determined by its type structure alone, regardless of variance + annotations or subtyping. For instance, \begin_inset listings inline true @@ -13167,7 +13323,7 @@ Counted[A] \end_inset - is covariant because it can have a lawful + is covariant because we may implement a lawful \begin_inset listings inline true status open @@ -13179,8 +13335,21 @@ map \end_inset - method, whether or not we use subtyping. - The subtyping variance annotation + method for +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Counted +\end_layout + +\end_inset + +. + (This property holds also in programming languages that do not support + subtyping.) The variance annotation \begin_inset listings inline true status open @@ -13192,8 +13361,8 @@ Counted[+A] \end_inset - merely tells the Scala compiler to activate the subtyping features for - + merely tells the Scala compiler to activate the automatic subtyping features + for \begin_inset listings inline true status open @@ -13206,9 +13375,21 @@ Counted \end_inset . - It is a type error if the subtyping variance annotation specified by the - programmer does not match the actual covariance or contravariance of the - type constructor: + Before doing that, the Scala compiler will check that +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Counted +\end_layout + +\end_inset + + is in fact covariant. + It will be a type error if a variance annotation specified by the programmer + does not match the actual covariance or contravariance of the type constructor: \begin_inset listings inline false status open @@ -13229,7 +13410,7 @@ final case class C[+A](run: A => Unit) // Compile-time error. \end_inset -This type constructor +The type constructor \begin_inset listings inline true status open @@ -13255,7 +13436,7 @@ cmap \end_inset function for it. - The Scala compiler will accept a subtype contravariance annotation on + The Scala compiler will accept a contravariance annotation on \begin_inset listings inline true status open @@ -13422,7 +13603,7 @@ status open \begin_layout Plain Layout -Option[_] +Option \end_layout \end_inset @@ -13434,7 +13615,7 @@ status open \begin_layout Plain Layout -def map[A, B](oa: Option[A])(f: A => B): Option[B] = oa match { +def map[A, B](p: Option[A])(f: A => B): Option[B] = p match { \end_layout \begin_layout Plain Layout @@ -13496,7 +13677,7 @@ map \end_inset - violates the functor laws. + does not obey the functor laws. \end_layout \begin_layout Subparagraph @@ -13535,7 +13716,7 @@ status open \begin_layout Plain Layout -oa +p \end_layout \end_inset @@ -13585,20 +13766,20 @@ status open \begin_layout Plain Layout -oa == Some(123) +p == Some(123) \end_layout \end_inset . - Substitute this value of + Substitute this \begin_inset listings inline true status open \begin_layout Plain Layout -oa +p \end_layout \end_inset @@ -13610,7 +13791,7 @@ status open \begin_layout Plain Layout -map(oa)(identity) == oa +map(p)(identity) == p \end_layout \end_inset @@ -13622,13 +13803,25 @@ status open \begin_layout Plain Layout -map(oa)(identity) == Some(identity((123+1).asInstanceOf[Int])) == Some(124) - != oa +map(p)(identity) == Some(identity((123+1).asInstanceOf[Int])) == Some(124) + != p +\end_layout + +\end_inset + +This shows a violation of the functor identity law: +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Some(124) != Some(123) \end_layout \end_inset -This shows a violation of the functor identity law. +. \end_layout \begin_layout Subsubsection @@ -14233,11 +14426,11 @@ fmap \end_inset separately for -\begin_inset Formula $Q^{\bullet}$ +\begin_inset Formula $Q$ \end_inset and -\begin_inset Formula $\text{Data}^{\bullet}$ +\begin_inset Formula $\text{Data}$ \end_inset . @@ -14257,7 +14450,7 @@ fmap \end_inset for -\begin_inset Formula $Q^{\bullet}$ +\begin_inset Formula $Q$ \end_inset , we begin with the type signature: @@ -14502,7 +14695,7 @@ cmap (a) \series default -\begin_inset Formula $\text{Data}^{A}\triangleq\left(\bbnum 1+A\rightarrow\text{Int}\right)+(A\rightarrow A\times A\rightarrow\text{String})\quad.$ +\begin_inset Formula $\text{Data1}^{A}\triangleq\left(\bbnum 1+A\rightarrow\text{Int}\right)+(A\rightarrow A\times A\rightarrow\text{String})\quad.$ \end_inset @@ -14514,7 +14707,7 @@ cmap (b) \series default -\begin_inset Formula $\text{Data}^{A,B}\triangleq\left(A+B\right)\times\left(\left(A\rightarrow\text{Int}\right)\rightarrow B\right)\quad.$ +\begin_inset Formula $\text{Data2}^{A,B}\triangleq\left(A+B\right)\times\left(\left(A\rightarrow\text{Int}\right)\rightarrow B\right)\quad.$ \end_inset @@ -14530,7 +14723,15 @@ Solution (a) \series default The type constructor -\begin_inset Formula $\text{Data}^{A}$ +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Data1 +\end_layout + \end_inset is defined in Scala as: @@ -14540,7 +14741,7 @@ status open \begin_layout Plain Layout -type Data[A] = Either[Option[A] => Int, A => ((A, A)) => String] +type Data1[A] = Either[Option[A] => Int, A => ((A, A)) => String] \end_layout \end_inset @@ -14551,7 +14752,7 @@ The type parameter is always located to the left of function arrows. So, -\begin_inset Formula $\text{Data}^{A}$ +\begin_inset Formula $\text{Data1}^{A}$ \end_inset @@ -14563,7 +14764,15 @@ consumes \end_inset , and we expect that -\begin_inset Formula $\text{Data}^{A}$ +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Data1 +\end_layout + \end_inset is a contrafunctor. @@ -14586,7 +14795,7 @@ status open \begin_layout Plain Layout -def cmap[A, B](f: B => A): Data[A] => Data[B] = { +def cmap[A, B](f: B => A): Data1[A] => Data1[B] = { \end_layout \begin_layout Plain Layout @@ -14612,11 +14821,19 @@ def cmap[A, B](f: B => A): Data[A] => Data[B] = { \begin_layout Standard -\series bold -(b) -\series default - The type constructor -\begin_inset Formula $\text{Data}^{A,B}$ +\series bold +(b) +\series default + The type constructor +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Data2 +\end_layout + \end_inset has two type parameters, and so we need to answer the question separately @@ -14628,7 +14845,7 @@ status open \begin_layout Plain Layout -type Data[A, B] = (Either[A, B], (A => Int) => B) +type Data2[A, B] = (Either[A, B], (A => Int) => B) \end_layout \end_inset @@ -14642,7 +14859,7 @@ Begin with the type parameter \end_inset and notice that a value of type -\begin_inset Formula $\text{Data}^{A,B}$ +\begin_inset Formula $\text{Data2}^{A,B}$ \end_inset possibly contains a value of type @@ -14736,7 +14953,7 @@ fmap \end_inset of -\begin_inset Formula $\text{Data}^{A,B}$ +\begin_inset Formula $\text{Data2}^{A,B}$ \end_inset . @@ -14857,7 +15074,7 @@ status open \begin_layout Plain Layout -def fmapA[A, Z, C](f: A => C): Data[A, Z] => Data[C, Z] = { +def fmapA[A, Z, C](f: A => C): Data2[A, Z] => Data2[C, Z] = { \end_layout \begin_layout Plain Layout @@ -14892,7 +15109,7 @@ def fmapA[A, Z, C](f: A => C): Data[A, Z] => Data[C, Z] = { \begin_layout Plain Layout - (newE, newG) // This has type Data[C, Z]. + (newE, newG) // This has type Data2[C, Z]. \end_layout \begin_layout Plain Layout @@ -14903,7 +15120,7 @@ def fmapA[A, Z, C](f: A => C): Data[A, Z] => Data[C, Z] = { \end_inset This suggests that -\begin_inset Formula $\text{Data}^{A,Z}$ +\begin_inset Formula $\text{Data2}^{A,Z}$ \end_inset is covariant with respect to the type parameter @@ -14949,7 +15166,7 @@ The analysis is simpler for the type parameter because it is only used in covariant positions, never to the left of function arrows. So, we expect -\begin_inset Formula $\text{Data}^{A,B}$ +\begin_inset Formula $\text{Data2}^{A,B}$ \end_inset to be a functor with respect to @@ -14976,7 +15193,7 @@ status open \begin_layout Plain Layout -def fmapB[Z, B, C](f: B => C): Data[Z, A] => Data[Z, B] = { +def fmapB[Z, B, C](f: B => C): Data2[Z, A] => Data2[Z, B] = { \end_layout \begin_layout Plain Layout @@ -15011,7 +15228,7 @@ def fmapB[Z, B, C](f: B => C): Data[Z, A] => Data[Z, B] = { \begin_layout Plain Layout - (newE, newG) // This has type Data[C, Z]. + (newE, newG) // This has type Data2[C, Z]. \end_layout \begin_layout Plain Layout @@ -15026,7 +15243,7 @@ def fmapB[Z, B, C](f: B => C): Data[Z, A] => Data[Z, B] = { \begin_layout Standard The code indicates that -\begin_inset Formula $\text{Data}^{A,B}$ +\begin_inset Formula $\text{Data2}^{A,B}$ \end_inset is a functor with respect to both @@ -15698,7 +15915,7 @@ map \uwave off \noun off \color none -The laws are easier to read when using +But the laws become easier to read when using \begin_inset listings inline true status open @@ -15710,7 +15927,7 @@ map \end_inset - as a class method: + in Scala's method syntax: \family default \series default \shape default @@ -15797,7 +16014,7 @@ x.map(f) \end_inset . - Then the functor laws become: + The functor laws become: \begin_inset Formula \begin{align*} & x\triangleright\text{fmap}_{L}(\text{id})=x\quad,\\ @@ -15995,7 +16212,7 @@ x\triangleright\left(f\bef g\right)^{\uparrow L}=x\triangleright f^{\uparrow L}\ \end_inset -This equation directly represents the Scala code syntax: +This equation corresponds to the Scala code syntax: \begin_inset listings inline false status open @@ -16007,11 +16224,11 @@ x.map(f andThen g) == x.map(f).map(g) \end_inset -if we make the pipe symbol +because the pipe symbol \begin_inset Formula $\left(\triangleright\right)$ \end_inset - group weaker than the composition symbol + groups weaker than the composition symbol \begin_inset Formula $\left(\bef\right)$ \end_inset @@ -16039,8 +16256,8 @@ backward \end_layout \begin_layout Standard -The analogous notation for a contrafunctor -\begin_inset Formula $C^{\bullet}$ +The lifting notation for a contrafunctor +\begin_inset Formula $C$ \end_inset is: @@ -16071,7 +16288,7 @@ We will almost always use the forward composition translate between forward and backward notations: \begin_inset Formula \[ -f\bef g=g\circ f\quad,\quad\quad x\triangleright f=f(x)\quad,\quad\quad x\triangleright f\triangleright g=g(f(x))\quad. +f\bef g=g\circ f\quad,\quad\quad x\triangleright f\bef g=x\triangleright f\triangleright g=g(f(x))=(g\circ f)(x)\quad. \] \end_inset @@ -16091,8 +16308,7 @@ name "subsec:Bifunctors" \end_layout \begin_layout Standard -A type constructor can be a functor with respect to several type parameters. - A +A \series bold bifunctor \series default @@ -16110,7 +16326,19 @@ bifunctor \emph on two \emph default - type parameters that satisfies the functor laws with respect to both parameters. + type parameters that has lawful +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +fmap +\end_layout + +\end_inset + + methods with respect to both parameters. \end_layout \begin_layout Standard @@ -16128,6 +16356,10 @@ F^{A,B}\triangleq A\times B\times B\quad. If we fix the type parameter \begin_inset Formula $B$ +\end_inset + + in +\begin_inset Formula $F^{A,B}$ \end_inset but let the parameter @@ -16143,7 +16375,7 @@ If we fix the type parameter \begin_inset Formula $F^{\bullet,B}$ \end_inset - is a functor with the + is a functor with the following \begin_inset listings inline true status open @@ -16167,7 +16399,7 @@ Instead of saying that \begin_inset Formula $F^{\bullet,B}$ \end_inset - is a functor, we can also say more verbosely that + is a functor, we will also say more verbosely that \begin_inset Formula $F^{A,B}$ \end_inset @@ -16180,7 +16412,11 @@ Instead of saying that \end_inset is held fixed. - + For brevity, we will prefer the notation +\begin_inset Formula $F^{\bullet,B}$ +\end_inset + +. \end_layout \begin_layout Standard @@ -16217,7 +16453,7 @@ fmap \begin_layout Standard Since the bifunctor -\begin_inset Formula $F^{\bullet,\bullet}$ +\begin_inset Formula $F$ \end_inset is a functor with respect to each type parameter separately, we can transform @@ -16242,7 +16478,7 @@ fmap \end_inset functions one after another. - It is convenient to denote this transformation by a single operation called + It is convenient to express that transformation by a single operation called \begin_inset listings inline true @@ -16272,30 +16508,15 @@ bimap \end_inset -In the condensed notation, this is written as: +In the lifting notation, this is written as: \begin_inset Formula \[ -\text{bimap}_{F}(f^{:A\rightarrow C})(g^{:B\rightarrow D})\triangleq f^{\uparrow F^{\bullet,B}}\bef g^{\uparrow F^{C,\bullet}}\quad, +\text{bimap}_{F}(f^{:A\rightarrow C})(g^{:B\rightarrow D})\triangleq f^{\uparrow F^{\bullet,B}}\bef g^{\uparrow F^{C,\bullet}}\quad. \] \end_inset -although in this case the longer notation in Eq. -\begin_inset space ~ -\end_inset - -( -\begin_inset CommandInset ref -LatexCommand ref -reference "eq:f-definition-of-bimap" -plural "false" -caps "false" -noprefix "false" - -\end_inset -) may be easier to reason about. - \end_layout \begin_layout Standard @@ -16418,13 +16639,20 @@ commutativity law!of bifunctors \begin{align*} & \quad\quad\text{left-hand side}:\quad\\ & \text{fmap}_{F^{\bullet,B}}(f^{:A\rightarrow C})\bef\text{fmap}_{F^{C,\bullet}}(g^{:B\rightarrow D})\\ - & \quad\quad\text{definitions of }\text{fmap}_{F^{\bullet,\bullet}}:\quad\\ + & \quad\quad\text{definitions of }\text{fmap}_{F^{\bullet,B}}\text{ and }\text{fmap}_{F^{C,\bullet}}:\quad\\ & \quad=(a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times b_{1}\times b_{2})\bef(c^{:C}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow c\times g(b_{1})\times g(b_{2}))\\ & \quad\quad\text{compute composition}:\quad\\ - & \quad=a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times g(b_{1})\times g(b_{2})\quad,\\ + & \quad=a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times g(b_{1})\times g(b_{2})\quad. +\end{align*} + +\end_inset + + +\begin_inset Formula +\begin{align*} & \quad\quad\text{right-hand side}:\quad\\ & \text{fmap}_{F^{A,\bullet}}(g^{:B\rightarrow D})\bef\text{fmap}_{F^{\bullet,D}}(f^{:A\rightarrow C})\\ - & \quad\quad\text{definitions of }\text{fmap}_{F^{\bullet,\bullet}}:\quad\\ + & \quad\quad\text{definitions of }\text{fmap}_{F^{A,\bullet}}\text{ and }\text{fmap}_{F^{\bullet,D}}:\quad\\ & \quad=(a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow a\times g(b_{1})\times g(b_{2}))\bef(a^{:A}\times d_{1}^{:D}\times d_{2}^{:D}\rightarrow f(a)\times d_{1}\times d_{2})\\ & \quad\quad\text{compute composition}:\quad\\ & \quad=a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times g(b_{1})\times g(b_{2})\quad. @@ -16432,7 +16660,7 @@ commutativity law!of bifunctors \end_inset -Both sides of the law are equal. +Both sides of the law are now equal. \end_layout \begin_layout Standard @@ -16497,18 +16725,14 @@ fmap functions: \begin_inset Formula \[ -\xymatrix{\xyScaleY{3.0pc}\xyScaleX{12.0pc}F^{A,B}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{1})(g_{1})}\ar[r]\sp(0.4){\text{fmap}_{F^{\bullet,B}}(f_{1}^{:A\rightarrow C})}\ar[d]\sp(0.5){\text{fmap}_{F^{A,\bullet}}(g_{1}^{:B\rightarrow D})} & F^{C,B}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{2})(g_{1})}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,B}}(f_{2}^{:C\rightarrow E})}\ar[d]\sp(0.5){\text{fmap}_{F^{C,\bullet}}(g_{1}^{:B\rightarrow D})~~~} & F^{E,B}\ar[d]\sp(0.5){\text{fmap}_{F^{E,\bullet}}(g_{1}^{:B\rightarrow D})}\\ -F^{A,D}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{1})(g_{2})}\ar[r]\sp(0.4){\text{fmap}_{F^{\bullet,D}}(f_{1}^{:A\rightarrow C})}\ar[d]\sp(0.5){\text{fmap}_{F^{A,\bullet}}(g_{2}^{:D\rightarrow G})} & F^{C,D}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{2})(g_{2})}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,D}}(f_{2}^{:C\rightarrow E})}\ar[d]\sp(0.5){\text{fmap}_{F^{C,\bullet}}(g_{2}^{:D\rightarrow G})~~~} & F^{E,D}\ar[d]\sp(0.5){\text{fmap}_{F^{E,\bullet}}(g_{2}^{:D\rightarrow G})}\\ -F^{A,G}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,G}}(f_{1}^{:A\rightarrow C})} & F^{C,G}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,G}}(f_{2}^{:C\rightarrow E})} & F^{E,G} +\xymatrix{\xyScaleY{3.0pc}\xyScaleX{11.0pc}F^{A,B}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{1})(g_{1})}\ar[r]\sp(0.4){\text{fmap}_{F^{\bullet,B}}(f_{1}^{:A\rightarrow C})}\ar[d]\sp(0.5){\text{fmap}_{F^{A,\bullet}}(g_{1})} & F^{C,B}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{2})(g_{1})}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,B}}(f_{2}^{:C\rightarrow E})}\ar[d]\sp(0.5){\text{fmap}_{F^{C,\bullet}}(g_{1})~~~} & F^{E,B}\ar[d]\sp(0.5){\text{fmap}_{F^{E,\bullet}}(g_{1}^{:B\rightarrow D})}\\ +F^{A,D}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{1})(g_{2})}\ar[r]\sp(0.4){\text{fmap}_{F^{\bullet,D}}(f_{1})}\ar[d]\sp(0.5){\text{fmap}_{F^{A,\bullet}}(g_{2})} & F^{C,D}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{2})(g_{2})}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,D}}(f_{2})}\ar[d]\sp(0.5){\text{fmap}_{F^{C,\bullet}}(g_{2})~~~} & F^{E,D}\ar[d]\sp(0.5){\text{fmap}_{F^{E,\bullet}}(g_{2}^{:D\rightarrow G})}\\ +F^{A,G}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,G}}(f_{1})} & F^{C,G}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,G}}(f_{2})} & F^{E,G} } \] \end_inset - -\end_layout - -\begin_layout Standard To derive the composition law from Eq. \begin_inset space ~ \end_inset @@ -16659,7 +16883,7 @@ noprefix "false" \end_inset -) always hold? The calculation for the example +) always hold? The calculation for \begin_inset Formula $F^{A,B}\triangleq A\times B\times B$ \end_inset @@ -17037,7 +17261,7 @@ function type \begin_inset Formula $C$ \end_inset - a contrafunctor + is a contrafunctor \end_layout \end_inset @@ -17194,7 +17418,7 @@ both \begin_layout Plain Layout Type constructions defining a functor -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset . @@ -17337,7 +17561,7 @@ fmap \end_inset - is the identity function, the laws are satisfied automatically: + is the identity function, the laws are satisfied: \begin_inset Formula \begin{align*} \text{identity law}:\quad & \text{fmap}_{\text{Id}}(\text{id})=\text{id}(\text{id})=\text{id}\quad,\\ @@ -17416,8 +17640,8 @@ fmap function is defined by: \begin_inset Formula \begin{align*} -\text{fmap}_{\text{Const}} & :\left(A\rightarrow B\right)\rightarrow\text{Const}^{Z,A}\rightarrow\text{Const}^{Z,B}\cong\left(A\rightarrow B\right)\rightarrow Z\rightarrow Z\quad,\\ -\text{fmap}_{\text{Const}}(f^{:A\rightarrow B}) & \triangleq(z^{:Z}\rightarrow z)=\text{id}^{:Z\rightarrow Z}\quad. + & \text{fmap}_{\text{Const}}:\left(A\rightarrow B\right)\rightarrow\text{Const}^{Z,A}\rightarrow\text{Const}^{Z,B}\cong\left(A\rightarrow B\right)\rightarrow Z\rightarrow Z\quad,\\ + & \text{fmap}_{\text{Const}}(f^{:A\rightarrow B})\triangleq(z^{:Z}\rightarrow z)=\text{id}^{:Z\rightarrow Z}\quad. \end{align*} \end_inset @@ -17461,15 +17685,8 @@ def fmap[A, B](f: A => B): Const[Z, A] => Const[Z, B] = identity[Z] \end_layout \begin_layout Standard -The identity functor -\begin_inset Formula $\text{Id}^{\bullet}$ -\end_inset - - and the constant functor -\begin_inset Formula $\text{Const}^{Z,\bullet}$ -\end_inset - - are not often used: their +The identity functor and the constant functor are not often used: as their + \begin_inset listings inline true status open @@ -17481,8 +17698,7 @@ fmap \end_inset - implementations are identity functions, and so they rarely provide useful - functionality. + methods are identity functions, they rarely provide useful functionality. \end_layout @@ -17532,11 +17748,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset and -\begin_inset Formula $M^{\bullet}$ +\begin_inset Formula $M$ \end_inset are two functors then the product @@ -17706,17 +17922,21 @@ Apply both sides of this equation to an arbitrary value of type \begin_inset Formula \begin{align*} \text{expect to equal }(l\times m)\triangleright(f\bef g)^{\uparrow P}:\quad & (l^{:L^{A}}\times m^{:M^{A}})\triangleright f^{\uparrow P}\gunderline{\,\bef\,}g^{\uparrow P}\\ -\triangleright\text{ notation}:\quad & =\gunderline (l^{:L^{A}}\times m^{:M^{A}}\gunderline{)\triangleright f^{\uparrow P}}\triangleright g^{\uparrow P}\\ +\triangleright\text{-notation}:\quad & =\gunderline (l^{:L^{A}}\times m^{:M^{A}}\gunderline{)\triangleright f^{\uparrow P}}\triangleright g^{\uparrow P}\\ \text{use Eq.~(\ref{eq:f-def-of-functor-product-lift})}:\quad & =\gunderline{\big(}(l\triangleright f^{\uparrow L})\times(m\triangleright f^{\uparrow M})\gunderline{\big)\triangleright g^{\uparrow P}}\\ \text{use Eq.~(\ref{eq:f-def-of-functor-product-lift})}:\quad & =(l\triangleright f^{\uparrow L}\gunderline{\,\triangleright\,}g^{\uparrow L})\times(m\triangleright f^{\uparrow M}\gunderline{\,\triangleright\,}g^{\uparrow M})\\ -\triangleright\text{ notation}:\quad & =(l\triangleright\gunderline{f^{\uparrow L}\bef g^{\uparrow L}})\times(m\triangleright\gunderline{f^{\uparrow M}\bef g^{\uparrow M}})\\ +\triangleright\text{-notation}:\quad & =(l\triangleright\gunderline{f^{\uparrow L}\bef g^{\uparrow L}})\times(m\triangleright\gunderline{f^{\uparrow M}\bef g^{\uparrow M}})\\ \text{composition laws of }L\text{ and }M:\quad & =(l\triangleright(f\bef g)^{\uparrow L})\times(m\triangleright(f\bef g)^{\uparrow M})\\ \text{use Eq.~(\ref{eq:f-def-of-functor-product-lift})}:\quad & =(l\times m)\triangleright(f\bef g)^{\uparrow P}\quad. \end{align*} \end_inset -The calculations are shorter if we use the pair product operation: +The calculations are shorter if we use the pair product operation ( +\begin_inset Formula $\boxtimes$ +\end_inset + +): \begin_inset Formula \begin{align*} \text{expect to equal }(f\bef g)^{\uparrow P}:\quad & f^{\uparrow P}\bef g^{\uparrow P}=(f^{\uparrow L}\boxtimes f^{\uparrow M})\bef(g^{\uparrow L}\boxtimes g^{\uparrow M})\\ @@ -17847,7 +18067,11 @@ m\triangleright f^{\uparrow M}\triangleright g^{\uparrow M}=m\triangleright f^{\ \end_inset -By the convention of the pipe notation, it groups to the left, so we have: +By convention, the pipe symbol ( +\begin_inset Formula $\triangleright$ +\end_inset + +) groups to the left, and so we can write: \begin_inset Formula \[ \left(x\triangleright f\right)\triangleright g=x\triangleright f\triangleright g=x\triangleright f\bef g=x\triangleright(f\bef g)=(f\bef g)(x)=g(f(x))\quad. @@ -17855,7 +18079,7 @@ By the convention of the pipe notation, it groups to the left, so we have: \end_inset -We will often use this notation in derivations. +We will often use the pipe notation in derivations. (Chapter \begin_inset space ~ \end_inset @@ -17870,8 +18094,7 @@ noprefix "false" \end_inset - gives an overview of the derivation techniques, including some more details - about the pipe notation.) + gives an overview of the derivation techniques.) \end_layout \begin_layout Subsubsection @@ -17897,18 +18120,22 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset and -\begin_inset Formula $Q^{A}$ +\begin_inset Formula $Q$ \end_inset - are functors then + are functors then the type constructor +\begin_inset Formula $L$ +\end_inset + + defined by \begin_inset Formula $L^{A}\triangleq P^{A}+Q^{A}$ \end_inset - is a functor, with + is a functor whose \begin_inset listings inline true status open @@ -17920,7 +18147,7 @@ fmap \end_inset - defined by: + method is given by this code: \begin_inset listings inline false status open @@ -17948,7 +18175,7 @@ def fmap[A, B](f: A => B): Either[P[A], Q[A]] => Either[P[B], Q[B]] = { \end_inset The functor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset is the @@ -17966,15 +18193,15 @@ functor co-product \end_inset of -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset and -\begin_inset Formula $Q^{\bullet}$ +\begin_inset Formula $Q$ \end_inset . - The code notation for the + The code notation for \begin_inset listings inline true status open @@ -17986,7 +18213,7 @@ fmap \end_inset - function is: + is: \begin_inset Formula \[ \text{fmap}_{L}(f^{:A\rightarrow B})=f^{\uparrow L}\triangleq\,\begin{array}{|c||cc|} @@ -18167,7 +18394,7 @@ fmap \end_inset - replaces each occurrence of the a value of type + replaces each occurrence of a value of type \begin_inset Formula $A$ \end_inset @@ -18430,12 +18657,12 @@ To verify the composition law of \end_layout \begin_layout Standard -It is important for this proof that the order of function compositions is - reversed when lifting to a contrafunctor +This proof uses the fact that the order of function compositions is reversed + when lifting to a contrafunctor \begin_inset Formula $C$ \end_inset -: + as in \begin_inset Formula $(f\bef g)^{\downarrow C}=g^{\downarrow C}\bef f^{\downarrow C}$ \end_inset @@ -18486,11 +18713,11 @@ functor exponential \end_inset (with the contrafunctor -\begin_inset Formula $C^{A}$ +\begin_inset Formula $C$ \end_inset - chosen as the constant contrafunctor -\begin_inset Formula $Z$ + chosen as the constant contrafunctor, +\begin_inset Formula $C^{A}\triangleq Z$ \end_inset , where @@ -18521,10 +18748,10 @@ noprefix "false" \end_inset generalizes those examples to arbitrary contrafunctors -\begin_inset Formula $C^{A}$ +\begin_inset Formula $C$ \end_inset - used as arguments of function types. +. \end_layout \begin_layout Standard @@ -18547,7 +18774,10 @@ noprefix "false" \end_inset ). - Together with Statements +\end_layout + +\begin_layout Standard +Together with Statements \begin_inset space ~ \end_inset @@ -18571,8 +18801,8 @@ noprefix "false" \end_inset -, this gives us the rules of reasoning about covariance and contravariance - of type parameters in arbitrary type expressions. +, this gives us rules of reasoning about covariance and contravariance of + type parameters in arbitrary type expressions. Every function arrow ( \begin_inset Formula $\rightarrow$ \end_inset @@ -18594,7 +18824,8 @@ noprefix "false" \begin_inset Formula $A$ \end_inset -, and +. + But \begin_inset Formula $\left(A\rightarrow Z\right)\rightarrow Z$ \end_inset @@ -18603,7 +18834,11 @@ noprefix "false" \end_inset . - As we have seen, + +\end_layout + +\begin_layout Standard +As we have seen, \begin_inset Formula $A\rightarrow A\rightarrow Z$ \end_inset @@ -18617,7 +18852,7 @@ noprefix "false" \end_inset ). - Products and disjunctions do not change variance, so + Products and co-products preserve variance; for example, \begin_inset Formula $\left(A\rightarrow Z_{1}\right)\times\left(A\rightarrow Z_{2}\right)+\left(A\rightarrow Z_{3}\right)$ \end_inset @@ -18626,7 +18861,7 @@ noprefix "false" \end_inset . - This is shown in more detail in Section + More examples illustrating these techniques are given in Section \begin_inset space ~ \end_inset @@ -18640,12 +18875,11 @@ noprefix "false" \end_inset -. + below. \end_layout \begin_layout Standard -The remaining constructions set a type parameter to another type. - The +The remaining construction is the \series bold functor composition \series default @@ -18675,7 +18909,8 @@ P[Q[A]] \end_inset -, is analogous to a function composition such as +. + This is analogous to a function composition such as \begin_inset Formula $f(g(x))$ \end_inset @@ -18694,12 +18929,12 @@ type-level function \end_inset - (i.e., mappings of types). - So, functor composition may be denoted by + (i.e., mappings from types to types). + So, we will denote functor composition by \begin_inset Formula $P\circ Q$ \end_inset -, like the function composition +, similarly to the function composition \begin_inset Formula $f\circ g$ \end_inset @@ -18831,7 +19066,7 @@ status open \begin_layout Plain Layout -List[Option[_]] +List[Option[A]] \end_layout \end_inset @@ -18890,11 +19125,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset and -\begin_inset Formula $Q^{A}$ +\begin_inset Formula $Q$ \end_inset are functors then @@ -18941,7 +19176,7 @@ In the code notation, \begin_inset Formula $\text{fmap}_{L}$ \end_inset - is written equivalently as: + is written as: \begin_inset Formula \begin{align} \text{type signature}:\quad & \text{fmap}_{L}:f^{:A\rightarrow B}\rightarrow P^{Q^{A}}\rightarrow P^{Q^{B}}\quad,\nonumber \\ @@ -19001,6 +19236,10 @@ To verify the composition law of \end_inset +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -19072,7 +19311,7 @@ final case class Branch[A](x: Tree2[A], y: Tree2[A]) extends Tree2[A] \end_inset -is defined by +is denoted by \begin_inset Formula $\text{Tree}_{2}^{A}\triangleq A+\text{Tree}_{2}^{A}\times\text{Tree}_{2}^{A}$ \end_inset @@ -19118,11 +19357,11 @@ recursion scheme . If a recursion scheme -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset is given, the Scala code defining -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset can be written as: @@ -19133,7 +19372,7 @@ status open \begin_layout Plain Layout type S[A, R] = ... - // Must be defined previously as type alias, class, or trait. + // Must be defined as type alias, class, or trait. \end_layout \begin_layout Plain Layout @@ -19155,7 +19394,7 @@ status open \begin_layout Plain Layout -L +L[A] \end_layout \end_inset @@ -19211,16 +19450,16 @@ noprefix "false" summarizes our previous examples of recursive disjunctive types and shows the relevant choices of -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset , which turns out to be always a bifunctor. For abstract syntax trees, the functors -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset and -\begin_inset Formula $Q^{\bullet}$ +\begin_inset Formula $Q$ \end_inset must be given; they specify the available shapes of leaves and branches @@ -19615,7 +19854,7 @@ noprefix "false" \end_inset ) always defines a functor when -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset is a bifunctor. @@ -19685,19 +19924,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $S^{A,B}$ -\end_inset - - is a bifunctor (a functor with respect to both type parameters -\begin_inset Formula $A$ -\end_inset - - and -\begin_inset Formula $B$ +\begin_inset Formula $S$ \end_inset -) then the recursively defined type constructor -\begin_inset Formula $L^{A}$ + is a bifunctor then the recursively defined type constructor +\begin_inset Formula $L$ \end_inset is a lawful functor: @@ -19747,7 +19978,7 @@ status open \begin_layout Plain Layout -final case class L[A](x: S[A, L[A]]) // Must define S[_, _] previously. +final case class L[A](x: S[A, L[A]]) // Must have defined S[_, _] before. \end_layout \begin_layout Plain Layout @@ -19809,13 +20040,13 @@ recursive function (such as \begin_inset Formula $\text{fmap}_{L}$ \end_inset -) must be proved by induction. - In the recursive implementation of +) are proved by induction. + The code of a recursive implementation of \begin_inset Formula $\text{fmap}_{L}$ \end_inset -, its code calls itself in some cases but returns without recursive calls - in other cases. + calls itself in some cases but returns without recursive calls in other + cases. So, the base case of induction corresponds to the non-recursive evaluations in the code of \begin_inset Formula $\text{fmap}_{L}$ @@ -19837,7 +20068,7 @@ recursive function (such as \end_layout \begin_layout Standard -For clarity, we add an overline to recursive calls in the code formula: +For clarity, we add an overline to recursive calls in the code formulas: \begin_inset Formula \[ \text{fmap}_{L}(f)\triangleq\text{bimap}_{S}(f)(\overline{\text{fmap}_{L}}(f))\quad. @@ -19876,7 +20107,7 @@ To prove the composition law: \end_layout \begin_layout Standard -For the perfect-shaped binary tree, the construction +For a perfect-shaped binary tree, the construction \begin_inset space ~ \end_inset @@ -19907,12 +20138,12 @@ noprefix "false" \end_inset . - To see that, consider that + Indeed, \begin_inset Formula $S^{A,L^{A}}$ \end_inset is an application of a type-level function -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset to its two type parameters, which are set to @@ -19935,7 +20166,7 @@ status open \begin_layout Plain Layout -S[A,L[A]] +S[A, L[A]] \end_layout \end_inset @@ -19952,7 +20183,7 @@ status open \begin_layout Plain Layout -S[A,L[A]] +S[A, L[A]] \end_layout \end_inset @@ -19988,7 +20219,7 @@ status open \begin_layout Plain Layout -L[(A,A)] +L[(A, A)] \end_layout \end_inset @@ -19998,16 +20229,25 @@ L[(A,A)] \end_layout \begin_layout Standard -To describe perfect-shaped trees, we need to modify the construction by - adding another arbitrary functor, -\begin_inset Formula $P^{\bullet}$ +To describe perfect-shaped trees, we need to modify Eq. +\begin_inset space ~ \end_inset -, in the type argument of -\begin_inset Formula $L^{\bullet}$ +( +\begin_inset CommandInset ref +LatexCommand ref +reference "eq:f-def-recursive-functor" +plural "false" +caps "false" +noprefix "false" + \end_inset -: +) by introducing another arbitrary functor ( +\begin_inset Formula $P$ +\end_inset + +) like this: \begin_inset Formula \begin{equation} L^{A}\triangleq S^{A,L^{P^{A}}}\quad.\label{eq:f-def-recursive-functor-2} @@ -20015,7 +20255,24 @@ L^{A}\triangleq S^{A,L^{P^{A}}}\quad.\label{eq:f-def-recursive-functor-2} \end_inset -Perfect-shaped binary trees are defined by Eq. +The Scala syntax for +\begin_inset Formula $S^{A,L^{P^{A}}}$ +\end_inset + + is +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +S[A, L[P[A]]] +\end_layout + +\end_inset + +. + Perfect-shaped binary trees are defined by Eq. \begin_inset space ~ \end_inset @@ -20066,7 +20323,7 @@ final case class L[A](s: S[A, L[P[A]]]) \end_inset Different choices of -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset will define perfect-shaped trees with different kinds of branching. @@ -20358,7 +20615,7 @@ function type \begin_inset Formula $H$ \end_inset - a contrafunctor + is a contrafunctor \end_layout \end_inset @@ -20520,7 +20777,7 @@ recursive type \begin_layout Plain Layout Type constructions defining a contrafunctor -\begin_inset Formula $C^{A}$ +\begin_inset Formula $C$ \end_inset . @@ -20679,11 +20936,11 @@ noprefix "false" \begin_layout Standard If -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset is a functor and -\begin_inset Formula $Q^{A}$ +\begin_inset Formula $Q$ \end_inset is a contrafunctor then @@ -20702,7 +20959,7 @@ cmap \end_inset - defined by + defined by: \begin_inset listings inline false status open @@ -20778,7 +21035,8 @@ cmap \end_inset -It is easier to reason about this function if we rewrite it as: +It is easier to reason about this function if we rewrite the previous line + as: \begin_inset Formula \[ f^{\downarrow L}\triangleq\big(f^{\downarrow Q}\big)^{\uparrow P}\quad. @@ -20801,24 +21059,31 @@ The contrafunctor laws for \end_inset +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard -Finally, the recursive construction works for contrafunctors, except that - the type constructor +The recursive construction is the same for contrafunctors, except that \begin_inset Formula $S^{A,R}$ \end_inset - must be a contrafunctor in + must be a contravariant in \begin_inset Formula $A$ \end_inset - (but still a functor in + (but still covariant in \begin_inset Formula $R$ \end_inset ). - An example of such a type constructor is: + An example of such a type constructor +\begin_inset Formula $S$ +\end_inset + + is: \begin_inset Formula \begin{equation} S^{A,R}\triangleq\left(A\rightarrow\text{Int}\right)+R\times R\quad.\label{eq:f-example-contra-bifunctor} @@ -20826,8 +21091,8 @@ S^{A,R}\triangleq\left(A\rightarrow\text{Int}\right)+R\times R\quad.\label{eq:f- \end_inset -The type constructor -\begin_inset Formula $S^{\bullet,\bullet}$ +This +\begin_inset Formula $S$ \end_inset is not a bifunctor because it is contravariant in its first type parameter; @@ -20843,7 +21108,11 @@ bimap \end_inset - function for it. + function for +\begin_inset Formula $S$ +\end_inset + +. However, we can define an analogous function called \begin_inset listings inline true @@ -20969,7 +21238,7 @@ noprefix "false" \begin_layout Standard If we define a type constructor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset using the recursive @@ -21034,16 +21303,16 @@ If \begin_inset Formula $S^{A,R}$ \end_inset - is a contrafunctor with respect to + is contravariant with respect to \begin_inset Formula $A$ \end_inset - and a functor with respect to + and covariant with respect to \begin_inset Formula $R$ \end_inset then the recursively defined type constructor -\begin_inset Formula $C^{A}$ +\begin_inset Formula $C$ \end_inset is a contrafunctor: @@ -21106,7 +21375,7 @@ cmap \begin_inset Formula $_{C}$ \end_inset - as + as: \begin_inset Formula \begin{align*} \text{cmap}_{C}(f^{:B\rightarrow A}) & :C^{A}\rightarrow C^{B}\cong S^{A,C^{A}}\rightarrow S^{B,C^{B}}\quad,\\ @@ -21170,7 +21439,7 @@ Proof \end_layout \begin_layout Standard -The code of +We mark by an overline the recursive call in the code of \begin_inset listings inline true status open @@ -21182,7 +21451,7 @@ cmap \end_inset - is recursive, and the recursive call is marked by an overline: +: \begin_inset Formula \[ \text{cmap}_{C}(f)\triangleq f^{\downarrow C}\triangleq\text{xmap}_{S}(f)(\overline{\text{cmap}_{C}}(f))\quad. @@ -21254,8 +21523,9 @@ noprefix "false" \end_inset - describe how functors and contrafunctors are built from other type expressions. - We can see from Tables + describe how functors and contrafunctors are built from various type expression +s. + Tables \begin_inset space ~ \end_inset @@ -21283,7 +21553,7 @@ noprefix "false" \end_inset - that + show that \emph on every \emph default @@ -21582,8 +21852,8 @@ noprefix "false" \end_layout \begin_layout Standard -To see how this works, consider any exponential-polynomial type expression, - such as Eq. +To see how this works in practice, consider any exponential-polynomial type + expression, such as Eq. \begin_inset space ~ \end_inset @@ -21600,7 +21870,7 @@ noprefix "false" ): \begin_inset Formula \[ -Z^{A,R}\triangleq\left(\left(A\rightarrow A\rightarrow R\right)\rightarrow R\right)\times A+\left(\bbnum 1+R\rightarrow A+\text{Int}\right)+A\times A\times\text{Int}\times\text{Int}\quad, +Z^{A,R}\triangleq\left(\left(A\rightarrow A\rightarrow R\right)\rightarrow R\right)\times A+\left(\bbnum 1+R\rightarrow A+\text{Int}\right)+A\times A\times\text{Int}\times\text{Int}\quad. \] \end_inset @@ -21672,7 +21942,7 @@ map method and verify that the functor laws hold. To do that from scratch, we could use the techniques explained in this - and the previous chapters: starting from the type signature + and the previous chapters: starting from the type signature, \begin_inset Formula \[ \text{map}_{Z}:Z^{A,R}\rightarrow\left(A\rightarrow B\right)\rightarrow Z^{B,R}\quad, @@ -21717,7 +21987,7 @@ map \end_layout \begin_layout Standard -However, that work can be avoided if we find a way of building up +All that work can be avoided if we build \begin_inset Formula $Z^{A,R}$ \end_inset @@ -21735,7 +22005,7 @@ map \end_inset and a proof that the functor laws hold up to that step. - In this way, we will avoid the need to look for an implementation of + In this way, we will avoid the need to guess an implementation of \begin_inset listings inline true status open @@ -21839,11 +22109,11 @@ The type notation for Mark the covariant and the contravariant positions in the type expressions for -\begin_inset Formula $F$ +\begin_inset Formula $F^{A,Z}$ \end_inset and -\begin_inset Formula $G$ +\begin_inset Formula $G^{A,Z}$ \end_inset : @@ -21980,13 +22250,7 @@ Solution \end_layout \begin_layout Standard -We need to build -\begin_inset Formula $G^{A,Z}$ -\end_inset - - via step-by-step constructions that start from primitive types and type - parameters. - At the top level of its type expression, +At the top level of its type expression, \begin_inset Formula $G^{A,Z}$ \end_inset @@ -22166,10 +22430,10 @@ product \begin_inset Formula \begin{align*} & G_{2}^{A,Z}\triangleq\bbnum 1+(Z\rightarrow\text{Int}\rightarrow Z\rightarrow\text{Int}\times A)\quad.\\ -\text{co-product}:\quad & G_{2}^{A,Z}\cong\bbnum 1+G_{3}^{A,Z}\quad\text{ where }G_{3}^{A,Z}\triangleq Z\rightarrow\text{Int}\rightarrow Z\rightarrow\text{Int}\times A\quad.\\ -\text{exponential}:\quad & G_{3}^{A,Z}\cong Z\rightarrow G_{4}^{A,Z}\quad\text{ where }G_{4}^{A,Z}\triangleq\text{Int}\rightarrow Z\rightarrow\text{Int}\times A\quad.\\ -\text{exponential}:\quad & G_{4}^{A,Z}\cong\text{Int}\rightarrow G_{5}^{A,Z}\quad\text{ where }G_{5}^{A,Z}\triangleq Z\rightarrow\text{Int}\times A\quad.\\ -\text{exponential}:\quad & G_{5}^{A,Z}\cong Z\rightarrow G_{6}^{A}\quad\text{ where }G_{6}^{A}\triangleq\text{Int}\times A\quad.\\ +\text{co-product}:\quad & G_{2}^{A,Z}\cong\bbnum 1+G_{3}^{A,Z}\quad\text{ where }\quad G_{3}^{A,Z}\triangleq Z\rightarrow\text{Int}\rightarrow Z\rightarrow\text{Int}\times A\quad.\\ +\text{exponential}:\quad & G_{3}^{A,Z}\cong Z\rightarrow G_{4}^{A,Z}\quad\text{ where \quad}G_{4}^{A,Z}\triangleq\text{Int}\rightarrow Z\rightarrow\text{Int}\times A\quad.\\ +\text{exponential}:\quad & G_{4}^{A,Z}\cong\text{Int}\rightarrow G_{5}^{A,Z}\quad\text{ where }\quad G_{5}^{A,Z}\triangleq Z\rightarrow\text{Int}\times A\quad.\\ +\text{exponential}:\quad & G_{5}^{A,Z}\cong Z\rightarrow G_{6}^{A}\quad\text{ where }\quad G_{6}^{A}\triangleq\text{Int}\times A\quad.\\ \text{product}:\quad & G_{6}^{A}\cong\text{Int}\times A\cong\text{Const}^{\text{Int},A}\times\text{Id}^{A}\quad. \end{align*} @@ -22187,7 +22451,7 @@ Each of the type constructors \begin_inset Formula $A$ \end_inset - because all of the functor constructions preserve the functor laws. + because all of the listed constructions preserve the functor laws. Therefore, \begin_inset Formula $G^{A,Z}$ \end_inset @@ -22197,7 +22461,6 @@ Each of the type constructors \end_inset . - \end_layout \begin_layout Standard @@ -22237,7 +22500,7 @@ status open \begin_layout Plain Layout -map +fmap \end_layout \end_inset @@ -22271,7 +22534,14 @@ x.map(f) \text{co-product}:\quad & G_{1}^{A}\triangleq\text{Int}+A\quad,\quad f^{\uparrow G_{1}}=\,\begin{array}{||cc|} \text{id} & \bbnum 0\\ \bbnum 0 & f -\end{array}\quad.\\ +\end{array}\quad. +\end{align*} + +\end_inset + + +\begin_inset Formula +\begin{align*} \text{co-product}:\quad & G_{2}^{A,Z}\triangleq\bbnum 1+G_{3}^{A,Z}\quad,\quad f^{\uparrow G_{2}}=\,\begin{array}{||cc|} \text{id} & \bbnum 0\\ \bbnum 0 & f^{\uparrow G_{3}} @@ -22354,7 +22624,7 @@ def fmap_G[A, B, Z](f: A => B): G[A, Z] => G[B, Z] = { case G(p, q) => \begin_layout Plain Layout val newQ: Option[Z => Int => Z => (Int, B)] = q.map { // Use the map method - for Option[_]. + for Option. \end_layout \begin_layout Plain Layout @@ -22406,21 +22676,17 @@ derived \begin_inset Formula $G$ \end_inset - given the type expression of -\begin_inset Formula $G^{A}$ -\end_inset - -, and similarly the code for + whose type expression is given, and similarly the code for \begin_inset Formula $\text{cmap}_{C}$ \end_inset - for any contrafunctor + for any given contrafunctor \begin_inset Formula $C$ \end_inset . - The corresponding algorithms could be implemented as a Scala library that - derives the code at compile time. + The corresponding algorithms could be implemented as a Scala macro library + that derives the code at compile time. \end_layout \begin_layout Section @@ -22489,12 +22755,12 @@ cmap \end_inset - without trial and error. + with no guessing. \end_layout \begin_layout Itemize -Use functor blocks to manipulate data wrapped in functors with more readable - code. +Write more readable code using functor blocks to manipulate data wrapped + in functors. \end_layout \begin_layout Subsection @@ -22810,19 +23076,19 @@ Show that \end_inset is, in general, neither a functor nor a contrafunctor if both -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset - are functors or both are contrafunctors (give an example of suitable -\begin_inset Formula $F^{A}$ + are functors or both are contrafunctors (give examples of suitable +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset ). @@ -22999,7 +23265,7 @@ noprefix "false" \begin_layout Standard Show that the recursive type constructor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset defined by: @@ -23064,7 +23330,7 @@ noprefix "false" \begin_layout Standard Show that the perfect-shaped tree -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset defined by: @@ -23200,7 +23466,7 @@ noprefix "false" \begin_inset Formula $F^{A}$ \end_inset -and + and \begin_inset Formula $F^{B}$ \end_inset @@ -23545,7 +23811,7 @@ Consider an exponential-polynomial type constructor \begin_inset Formula $P^{A}$ \end_inset -, no matter how complicated, such as: + such as: \begin_inset Formula \[ P^{A}\triangleq\left(\bbnum 1+A\times A\rightarrow A\right)\times A\rightarrow\bbnum 1+\left(A\rightarrow A+\text{Int}\right)\quad. @@ -23568,7 +23834,7 @@ Each copy of the type parameter \begin_inset Formula $Z$ \end_inset - and so obtain a new type constructor + and obtain a new type constructor \begin_inset Formula $\tilde{P}^{Z,A}$ \end_inset @@ -23635,7 +23901,7 @@ xmap \end_inset a profunctor. - Thus, + So, \emph on every \emph default @@ -23679,7 +23945,7 @@ noprefix "false" \end_inset -, cannot be made into a profunctor. +, cannot be made into profunctors. The type signature of \begin_inset listings inline true @@ -24005,7 +24271,7 @@ status open \begin_layout Plain Layout -sealed class HasID(val id: Long) +class HasID(val id: Long) \end_layout \begin_layout Plain Layout @@ -24226,8 +24492,35 @@ right inverse \begin_layout Standard We see that subtyping does not always involve an injective conversion function. - (Sometimes, the conversion function is surjective, and sometimes neither - injective nor surjective.) + Sometimes, the conversion function is surjective, and sometimes neither + injective nor surjective. + As an example of the latter, consider the product type +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +(Option[A], HasID) +\end_layout + +\end_inset + + and its subtype +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +(Some[A], Person) +\end_layout + +\end_inset + +. + The corresponding type conversion function is neither injective nor surjective. + \begin_inset Note Note status collapsed @@ -24539,7 +24832,7 @@ If the function \end_inset It follows that -\begin_inset Formula $\text{fmap}_{L}(f)\bef\text{fmap}_{L}(f)=\text{id}$ +\begin_inset Formula $\text{fmap}_{L}(f)\bef\text{fmap}_{L}(g)=\text{id}$ \end_inset . diff --git a/sofp-src/lyx/sofp-induction.lyx b/sofp-src/lyx/sofp-induction.lyx index 0173b62c2..11d1a99e0 100644 --- a/sofp-src/lyx/sofp-induction.lyx +++ b/sofp-src/lyx/sofp-induction.lyx @@ -11580,7 +11580,31 @@ import scala.annotation.tailrec \end_inset -Let us trace the evaluation of this function on an example: +(The +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +import +\end_layout + +\end_inset + + declaration is needed whenever the code uses the +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +tailrec +\end_layout + +\end_inset + + annotation.) Let us trace the evaluation of this function on an example: \begin_inset listings inline false status open @@ -11997,15 +12021,6 @@ status open \begin_layout Plain Layout -import scala.annotation.tailrec -\end_layout - -\begin_layout Plain Layout - -\end_layout - -\begin_layout Plain Layout - def length[A](xs: Seq[A]): Int = { \end_layout @@ -12955,7 +12970,7 @@ name "subsec:implementing-general-aggregation-foldleft" \end_layout \begin_layout Standard -An +As a rule, an \series bold aggregation \series default @@ -12969,7 +12984,7 @@ aggregation \end_inset - converts a sequence of values into a single value. + computes a single value from a sequence of values. In general, the type of the result may be different from the type of sequence elements. To describe that general situation, we introduce type parameters @@ -13072,7 +13087,7 @@ status open \begin_layout Plain Layout -f(xs) = b +b = f(xs) \end_layout \end_inset @@ -13218,7 +13233,7 @@ def f[A, B](s: Seq[A], b: B, g: (A, B) => B): B = \begin_layout Plain Layout - else g(s.last, f(s.init, b, g) + else g(s.last, f(s.init, b, g)) \end_layout \end_inset @@ -17608,7 +17623,7 @@ res1: Map[String,Int] = Map(a -> 400, b -> 2) \end_layout \begin_layout Section -Converting a single value into a sequence +Generating a sequence from a single value \begin_inset CommandInset label LatexCommand label name "sec:ch2Converting-a-single" @@ -17635,9 +17650,8 @@ unfolding \begin_inset Quotes erd \end_inset -) converts a single value into a sequence. - An example of this task is to compute the sequence of decimal digits for - a given integer: +) builds a new sequence from a single value and other needed information. + An example is computing the decimal digits of a given integer: \begin_inset listings inline false status open diff --git a/sofp-src/lyx/sofp-monads.lyx b/sofp-src/lyx/sofp-monads.lyx index 4dc5f2a38..e1c4bd2f9 100644 --- a/sofp-src/lyx/sofp-monads.lyx +++ b/sofp-src/lyx/sofp-monads.lyx @@ -408,7 +408,7 @@ How can we translate into code a computation that contains nested iterations, such as: \begin_inset Formula \begin{equation} -\sum_{i=1}^{n}\sum_{j=1}^{n}\sum_{k=1}^{n}\frac{1}{1+i+j+k}=\,?\label{eq:semimonads-numerical-example-1} +\sum_{i=1}^{n}\sum_{j=1}^{i}\sum_{k=1}^{j}\frac{1}{1+i+j+k}=\,?\label{eq:semimonads-numerical-example-1} \end{equation} \end_inset @@ -531,8 +531,8 @@ sum \end_inset function is applied to the sequence. - This separation is useful because it gives us full flexibility to transform - or aggregate the sequence. + This separation is useful because it gives programmers full flexibility + to transform or to aggregate the sequence. \end_layout \begin_layout Standard @@ -554,25 +554,36 @@ map \end_inset - operations, we will obtain a nested data structure, e.g., a vector of vectors: + operations, we will obtain a nested data structure, e.g., a vector of vectors. + As an example, consider this computation: +\begin_inset Formula +\[ +\sum_{i=1}^{4}\sum_{j=1}^{i}i*j\quad. +\] + +\end_inset + +Begin to implement this computation via this code: \begin_inset listings inline false status open \begin_layout Plain Layout -scala> (1 to 5).map(i => (1 to i).map(j => i * j)) +scala> (1 to 4).map(i => (1 to i).map(j => i * j)) \end_layout \begin_layout Plain Layout res0: IndexedSeq[IndexedSeq[Int]] = Vector(Vector(1), Vector(2, 4), Vector(3, - 6, 9), Vector(4, 8, 12, 16), Vector(5, 10, 15, 20, 25)) + 6, 9), Vector(4, 8, 12, 16)) \end_layout \end_inset -We need to +To compute the sum, we need to accumulate all the values from all the nested + lists. + So, we need to \begin_inset Quotes eld \end_inset @@ -646,8 +657,26 @@ scala> (1 to 4).flatMap(i => (1 to i).map(j => i * j)) // Same result as above. res2: IndexedSeq[Int] = Vector(1, 2, 4, 3, 6, 9, 4, 8, 12, 16) \end_layout +\begin_layout Plain Layout + +\end_layout + +\begin_layout Plain Layout + +scala> (1 to 4).flatMap(i => (1 to i).map(j => i * j)).sum +\end_layout + +\begin_layout Plain Layout + +res3: Int = 65 +\end_layout + \end_inset + +\end_layout + +\begin_layout Standard To represent more nesting, we use more \begin_inset listings inline true @@ -687,17 +716,17 @@ def example(n: Int): Double = (1 to n).flatMap { i => \begin_layout Plain Layout - (1 to n).flatMap { j => + (1 to i).flatMap { j => \end_layout \begin_layout Plain Layout - (1 to n).map { k => + (1 to j).map { k => \end_layout \begin_layout Plain Layout - 1.0 / (1.0 + i + j + k) } + 1.0 / (1 + i + j + k) } \end_layout \begin_layout Plain Layout @@ -721,7 +750,7 @@ scala> example(10) \begin_layout Plain Layout -res3: Double = 63.20950497687006 +res4: Double = 14.250481740989049 \end_layout \end_inset @@ -844,17 +873,17 @@ status open \begin_layout Plain Layout - j <- 1 to n + j <- 1 to i \end_layout \begin_layout Plain Layout - k <- 1 to n + k <- 1 to j \end_layout \begin_layout Plain Layout - } yield 1.0 / (1.0 + i + j + k) + } yield 1.0 / (1 + i + j + k) \end_layout \begin_layout Plain Layout @@ -908,17 +937,17 @@ status open \begin_layout Plain Layout - (1 to n).flatMap { j => + (1 to i).flatMap { j => \end_layout \begin_layout Plain Layout - (1 to n).map { k => + (1 to j).map { k => \end_layout \begin_layout Plain Layout - 1.0 / (1.0 + i + j + k) + 1.0 / (1 + i + j + k) \end_layout \begin_layout Plain Layout @@ -1053,7 +1082,7 @@ T=\left\{ \left.x+y+z~\right|~x\in P,\,y\in Q,\,z\in R,\,f(x,y,z)=0\,\right\} \q \end_inset -Here, +Here \begin_inset Formula $P$ \end_inset @@ -1098,8 +1127,7 @@ Here, \end_inset . - A direct implementation of this formula is the code shown at left. - Here, + An implementation of this formula is the code shown above where \begin_inset listings inline true status open @@ -1147,30 +1175,16 @@ t \end_inset - is again an array. - Just like the mathematical formula's result is a collection of some + is again an array that collects some \begin_inset Formula $x+y+z$ \end_inset - values, the functor block's result is a collection of values computed after - the -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -yield -\end_layout - -\end_inset - - keyword. + values. \end_layout \begin_layout Standard To develop more intuition about using functor blocks with multiple left - arrows, look at this code: + arrows, look at this sample code: \end_layout \begin_layout Standard @@ -1361,7 +1375,7 @@ for all \begin_inset Quotes erd \end_inset -, etc.) will produce an intermediate sequence of the same type. +, etc.) produces an intermediate sequence of the same type. Each next line continues the calculation from the previous intermediate sequence. @@ -1370,7 +1384,7 @@ for all \begin_layout Standard If this intuition is correct, we should be able to refactor the code by cutting the calculation at any place and continuing in another functor - block, without changing the result value: + block: \end_layout \begin_layout Standard @@ -1423,7 +1437,7 @@ val result = for { \begin_layout Plain Layout -// We will cut the block here, making i and j available for further computation +// Cut the functor block here, making i and j available for further computation s. \end_layout @@ -1589,8 +1603,8 @@ the same \end_layout \begin_layout Itemize -The entire functor block's result is again a collection using the same type - constructor. +The entire functor block's result is again a collection that uses the same + type constructor. The result is \emph on not @@ -1607,13 +1621,13 @@ yield \end_inset -; instead, it is a collection of those expressions. +; instead, the result is a collection of those expressions. \end_layout \begin_layout Standard -So far, we have been using sequences as the main type constructor. - However, functor blocks with several left arrows will work with any other - type constructor that has +So far, we have been using sequences as the collection type. + However, functor blocks with several left arrows will also work with any + other type constructor that has \begin_inset listings inline true status open @@ -1640,7 +1654,6 @@ flatMap methods. In the next sections, we will see how to use functor blocks with different type constructors. - \end_layout \begin_layout Standard @@ -1779,8 +1792,7 @@ monads \end_inset This chapter will study semi-monads and monads in detail. - For now, we note that the functor block syntax does not require functors - to have a + For now, we note that the functor block syntax does not require a \begin_inset listings inline true status open @@ -1835,7 +1847,7 @@ noprefix "false" \end_inset ). - So, the full functionality of functor blocks can be used with + So, the full functionality of functor blocks is available for \emph on filterable semi-monads \emph default @@ -3073,7 +3085,7 @@ acb \end_inset -, which is the correct part of the final answer. +, which is part of a correct answer. So, we write a nested iteration and concatenate the results: \begin_inset listings inline false @@ -3153,27 +3165,31 @@ status open \begin_layout Plain Layout def permutations(xs: Seq[String]): Seq[String] = if (xs.length == 1) xs else - for { \end_layout \begin_layout Plain Layout - x <- xs + for { \end_layout \begin_layout Plain Layout - xsWithoutX = xs.filter(_ != x) + x <- xs \end_layout \begin_layout Plain Layout - rest <- permutations(xsWithoutX) + xsWithoutX = xs.filter(_ != x) \end_layout \begin_layout Plain Layout -} yield x + rest + rest <- permutations(xsWithoutX) +\end_layout + +\begin_layout Plain Layout + + } yield x + rest \end_layout \begin_layout Plain Layout @@ -3726,7 +3742,7 @@ The \begin_inset Formula $8\times8$ \end_inset - chess board so that no queen threatens any other queen. + chess board in such a way that no queen threatens any other queen. To make our work easier, we note that each queen must be placed in a different row. So, it is sufficient to find the column index for each queen. @@ -4496,7 +4512,7 @@ status open \end_inset -, etc., i.e., Boolean formulas that contain no conjunctions or no disjunctions. +, ..., i.e., Boolean formulas that contain no conjunctions or no disjunctions. Then we may represent a conjunction as a \begin_inset listings inline true @@ -5960,8 +5976,7 @@ def cnf2dnf[A](cnf: CNF[A]): DNF[A] = DNF(dnf2cnf(DNF(cnf.s)).s) \begin_layout Plain Layout - // Verify that dnf2cnf - and cnf2dnf are inverses: + // Verify that dnf2cnf and cnf2dnf are inverses: \end_layout \begin_layout Plain Layout @@ -6578,7 +6593,7 @@ def vectorMatrixProduct[N: Numeric](vector: Seq[N], matrix: Seq[Seq[N]]): \begin_layout Plain Layout -scala> vectorMatrixProduct(Seq(3,4,5), matrix_T) +scala> vectorMatrixProduct(Seq(3, 4, 5), matrix_T) \end_layout \begin_layout Plain Layout @@ -6926,7 +6941,7 @@ iteration \end_inset will perform some computations using the wrapped values. - However, if even one of the + However, if even just one of the \begin_inset listings inline true status open @@ -6938,7 +6953,7 @@ Option \end_inset - values happens to be empty, the computed result will be an empty value: + values happens to be empty, the computed result will be empty: \begin_inset listings inline false status open @@ -7411,7 +7426,7 @@ noprefix "false" \end_inset - introduced custom data type with hand-coded methods such as + introduced a custom data type with hand-coded methods such as \begin_inset listings inline true status open @@ -7540,19 +7555,7 @@ result: Result = Left("error: sqrt(-50)") \end_inset -The concise and readable code of -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -val result -\end_layout - -\end_inset - - replaces more verbose implementations such as: +This concise and readable code replaces verbose implementations such as: \begin_inset listings inline false status open @@ -7623,7 +7626,7 @@ noprefix "false" \end_inset -: chaining computations with +: chaining with \family typewriter Option \family default @@ -7931,7 +7934,7 @@ noprefix "false" \end_inset -: chaining computations with +: chaining with \family typewriter Try \end_layout @@ -8197,8 +8200,8 @@ ExecutionContext \end_inset - argument, which provides access to a JVM thread pool where computations - will be scheduled. + argument, which provides access to a JVM thread pool where parallel computation +s will be scheduled. \end_layout \begin_layout Standard @@ -8240,8 +8243,19 @@ flatMap \end_inset - (or in a functor block) will run sequentially if new values need to wait - for previous values: + (or in a functor block) will run sequentially if new +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Future +\end_layout + +\end_inset + + values depend on previous results: \begin_inset listings inline false status open @@ -8402,7 +8416,7 @@ longComputation(...) \end_layout \begin_layout Standard -Another possibility is that each +What if each \begin_inset listings inline true status open @@ -8414,8 +8428,8 @@ longComputation(...) \end_inset - is independent of the results of the other computations. - Then the three + were independent of the results of the other computations? Then the three + \begin_inset listings inline true status open @@ -8427,8 +8441,8 @@ Future \end_inset - values may be created up front, and the functor block code represents three - + values may be created up front, and the functor block code will represent + three \begin_inset Quotes eld \end_inset @@ -8532,7 +8546,7 @@ noprefix "false" \begin_layout Standard To show that -\begin_inset Formula $\text{BT}^{A}$ +\begin_inset Formula $\text{BT}$ \end_inset is a functor, Statement @@ -8576,7 +8590,7 @@ recursion scheme \end_inset and then shows that the recursive type constructor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset defined by @@ -8584,8 +8598,8 @@ recursion scheme \end_inset is a functor. - (The type -\begin_inset Formula $\text{BT}^{A}$ + (The type constructor +\begin_inset Formula $\text{BT}$ \end_inset is obtained with @@ -8593,40 +8607,42 @@ recursion scheme \end_inset .) As we will see, the type constructor -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset will be a semi-monad or a monad with certain choices of -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset . \end_layout \begin_layout Standard -For lists, nested iteration goes over inner lists contained in an outer - list. +For lists, nested iteration goes over inner lists stored in an outer list. How does nested iteration work for a tree-shaped collection? An iteration - over a tree enumerates the values at the + over a value of type +\begin_inset Formula $\text{BT}^{A}$ +\end_inset + + enumerates the values of type +\begin_inset Formula $A$ +\end_inset + + stored at the \emph on leaves \emph default of a tree. - So, a tree analog of nested iteration implies that each leaf of an outer - tree contains an inner tree. - A -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -flatMap -\end_layout + So, a +\begin_inset Quotes eld +\end_inset +nested tree +\begin_inset Quotes erd \end_inset - function must concatenate all nested trees into a single + means that each leaf of an outer tree contains an inner tree. + To convert a nested tree into a single \begin_inset Quotes eld \end_inset @@ -8634,7 +8650,7 @@ flattened \begin_inset Quotes erd \end_inset - tree. + tree, we just need to graft a subtree in place of a leaf. \end_layout @@ -8652,7 +8668,7 @@ flatMap \end_inset method for the binary tree -\begin_inset Formula $BT^{\bullet}$ +\begin_inset Formula $\text{BT}$ \end_inset in that way. @@ -8771,12 +8787,12 @@ status open \begin_layout Plain Layout -tree1.flatMap(f) +flm(f)(tree1) \end_layout \end_inset -, where we take +, where \begin_inset listings inline true status open @@ -8889,7 +8905,7 @@ status open \begin_layout Plain Layout -flatMap +flm \end_layout \end_inset @@ -8901,7 +8917,7 @@ status open \begin_layout Plain Layout -tree1.flatMap(f) +flm(f)(tree1) \end_layout \end_inset @@ -8965,14 +8981,14 @@ status open \begin_layout Plain Layout -flatMap +flm \end_layout \end_inset . - That code can be generalized to the recursive type -\begin_inset Formula $\text{PT}^{A}$ + That code can be generalized to a recursive type constructor +\begin_inset Formula $\text{PT}$ \end_inset (representing a @@ -8995,13 +9011,13 @@ tree with \end_inset -for any given functor +where \begin_inset Formula $P$ \end_inset -. + is any given functor. The disjunctive part -\begin_inset Formula $A+\bbnum 0$ +\begin_inset Formula $A+\bbnum 0\,$ \end_inset is replaced by a new tree: @@ -9044,8 +9060,8 @@ def flm[P[_]: Functor, A, B](f: A => PT[P, B]): PT[P, A] => PT[P, B] = { \begin_layout Plain Layout - case Branch(p) => Branch(p.map(t => flm(f)(t)) // Conceptually, Branch(p.map -(flm(f))). + case Branch(p) => Branch(p.map(t => flm(f)(t)) // Equivalently, this + is Branch(p.map(flm(f))). \end_layout @@ -9097,7 +9113,7 @@ flatMap \end_inset . - Such + Values of type \begin_inset Formula $L^{A}$ \end_inset @@ -9297,6 +9313,7 @@ flatMap operation for a tree of configuration properties of the form: \begin_inset listings +lstparams "language=bash" inline false status open @@ -9372,16 +9389,11 @@ String \begin_inset Formula $P$ \end_inset --shaped branches, where the functor -\begin_inset Formula $P$ -\end_inset - - is defined as +-shaped branches, where we define \begin_inset Formula $P^{A}\triangleq\text{List}^{\text{String}\times A}$ \end_inset -. - Implement the tree type: +: \begin_inset listings inline false status open @@ -9950,8 +9962,12 @@ status open \begin_layout Plain Layout -def map[B](f: A => B): Term[B] = this match { // This code must be within - `trait Term[A]`. +// This code must be within `trait Term[A]`. +\end_layout + +\begin_layout Plain Layout + +def map[B](f: A => B): Term[B] = this match { \end_layout \begin_layout Plain Layout @@ -10000,8 +10016,12 @@ status open \begin_layout Plain Layout -def flatMap[B](f: A => Term[B]): Term[B] = this match { // This code must - be within `trait Term[A]`. +// This code must be within `trait Term[A]`. +\end_layout + +\begin_layout Plain Layout + +def flatMap[B](f: A => Term[B]): Term[B] = this match { \end_layout \begin_layout Plain Layout @@ -10350,8 +10370,19 @@ flatMap \end_inset method is motivated by the requirements of nested iteration. - We then looked at tree-like monads, which generalize nested list iterations - to tree grafting. + We then looked at tree-like monads whose +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +flatMap +\end_layout + +\end_inset + + methods work via tree grafting. It turns out that the \begin_inset listings inline true @@ -10364,20 +10395,16 @@ flatMap \end_inset - method can be generalized to many other type constructors that are useful - for various programming tasks not limited to nested iteration. + method can be supported by many other type constructors useful for various + programming tasks not necessarily related to nested iteration. \end_layout \begin_layout Standard A general (semi)monad type constructor -\begin_inset Formula $L^{A}$ -\end_inset - - no longer represents a collection of data items of type -\begin_inset Formula $A$ +\begin_inset Formula $L$ \end_inset -. + no longer represents a collection of data items. Instead, we regard \begin_inset Formula $L^{A}$ \end_inset @@ -10460,7 +10487,7 @@ side effect \begin_inset Formula $A\rightarrow L^{B}$ \end_inset - can be referentially transparent + are referentially transparent \begin_inset Index idx status open @@ -10635,7 +10662,7 @@ reading \end_inset with a suitable choice of a type constructor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset . @@ -10687,7 +10714,7 @@ Reader \end_inset . - The Scala definition is + The Scala definition is: \begin_inset listings inline true status open @@ -10727,7 +10754,19 @@ flatMap \end_inset - directly follow from their type signatures: + for +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Reader +\end_layout + +\end_inset + + can be derived unambiguously from their type signatures: \begin_inset listings lstparams "mathescape=true" inline false @@ -11069,7 +11108,12 @@ n \begin_layout Plain Layout - .map { file => runSh( + .map { file => +\end_layout + +\begin_layout Plain Layout + + runSh( \begin_inset Quotes erd \end_inset @@ -11340,7 +11384,12 @@ type Reader[Z, A] = Z => A \begin_layout Plain Layout -def listFilesR(dir: String): Reader[RunSh, String] = runSh => runSh(s +def listFilesR(dir: String): Reader[RunSh, String] = runSh => +\end_layout + +\begin_layout Plain Layout + + runSh(s \begin_inset Quotes eld \end_inset @@ -11362,12 +11411,17 @@ find $dir -type f \begin_layout Plain Layout def filterFilesR(patterns: String): String => Reader[RunSh, String] = files - => runSh => + => \end_layout \begin_layout Plain Layout - runSh(s + runSh => +\end_layout + +\begin_layout Plain Layout + + runSh(s \begin_inset Quotes eld \end_inset @@ -11395,7 +11449,12 @@ n \begin_layout Plain Layout - .map { file => runSh( + .map { file => +\end_layout + +\begin_layout Plain Layout + + runSh( \begin_inset Quotes erd \end_inset @@ -12320,8 +12379,7 @@ n" + other.message) \begin_layout Plain Layout -} // For simplicity, we assume that timestamps will be monotonically - increasing. +} // We assume that timestamps will be monotonically increasing. \end_layout \end_inset @@ -12392,7 +12450,20 @@ Writer[A, Logs] \end_inset . - Here are some example computations: + Here is some example code where we simulate long-running computations via + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Thread.sleep +\end_layout + +\end_inset + +: \begin_inset listings inline false status open @@ -12404,8 +12475,7 @@ type Logged[A] = Writer[A, Logs] \begin_layout Plain Layout -def log[A](message: String)(x: A): Logged[A] = { // Define this function - for convenience. +def log[A](message: String)(x: A): Logged[A] = { // A helper function. \end_layout \begin_layout Plain Layout @@ -12425,8 +12495,7 @@ def log[A](message: String)(x: A): Logged[A] = { // Define this function \begin_layout Plain Layout -def compute[A](x: => A): A = { Thread.sleep(100L); x } // Simulate - a long computation. +def compute[A](x: => A): A = { Thread.sleep(100L); x } \end_layout \begin_layout Plain Layout @@ -12440,7 +12509,7 @@ scala> val result: Logged[Double] = for { \begin_layout Plain Layout - x <- log("begin with 3")(compute(3)) // The initial source + x <- log("begin with 3")(compute(3)) // The initial source type is `Logged[Int]`. \end_layout @@ -12451,7 +12520,7 @@ scala> val result: Logged[Double] = for { \begin_layout Plain Layout - z <- log("multiply by 2.0")(compute(y * 2.0)) // The type of result becomes + z <- log("multiply by 2.0")(compute(y * 2.0)) // The type of result becomes `Logged[Double]`. \end_layout @@ -12550,23 +12619,11 @@ name "subsec:The-State-monad" \end_layout \begin_layout Standard -Heuristically, the -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -Reader -\end_layout - -\end_inset - - monad -\begin_inset Formula $\text{Read}^{S,A}$ +Heuristically, the effect of +\begin_inset Formula $\text{Reader}^{S,A}$ \end_inset - is able to + is to be able to \begin_inset Quotes eld \end_inset @@ -12578,23 +12635,11 @@ read \begin_inset Formula $S$ \end_inset -, while the -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -Writer -\end_layout - -\end_inset - - monad +, while the effect of \begin_inset Formula $\text{Writer}^{A,S}$ \end_inset - may + is to be able to \begin_inset Quotes eld \end_inset @@ -12665,7 +12710,7 @@ To derive the required type constructor, consider a computation of type \begin_inset Formula $A\rightarrow B$ \end_inset - that additionally needs to read and to write a value of type + that additionally needs to read and to update a value of type \begin_inset Formula $S$ \end_inset @@ -12803,20 +12848,18 @@ type State[S, A] = S => (A, S) \begin_layout Plain Layout -def flatMap[S, A, B](prev: State[S, A])(f: A => State[S, B]): State[S, B] - = { s => +def flatMap[S, A, B](p: State[S, A])(f: A => State[S, B]): State[S, B] = + { s => \end_layout \begin_layout Plain Layout - val (a, newState) = prev(s) // Compute result of type `A`, updating - the state. + val (a, newState) = p(s) // Compute result of type `A` and the new state. \end_layout \begin_layout Plain Layout - f(a)(newState) // Pass the updated state to the next computatio -n. + f(a)(newState) // Pass the new state to the next computation. \end_layout \begin_layout Plain Layout @@ -12842,8 +12885,8 @@ State \end_inset - monad is when implementing a random number generator. - A simple generator is the + monad is for implementing pseudo-random number generators. + A simple such generator is the \series bold Lehmer's algorithm \series default @@ -12901,7 +12944,7 @@ updating \begin_inset Formula $x_{n+1}=\text{lehmer}\,(x_{n})$ \end_inset -, can be implemented as: +, can be written as: \begin_inset listings inline false status open @@ -12913,30 +12956,8 @@ def lehmer(x: Long): Long = (x * 48271L) % ((1L << 31) - 1L) \end_inset -In many applications, one needs uniformly distributed floating-point numbers - in the interval -\begin_inset Formula $\left[0,1\right]$ -\end_inset - -. - To produce such numbers, let us define a helper function: -\begin_inset listings -inline false -status open - -\begin_layout Plain Layout - -def uniform(x: Long): Double = (x - 1L).toDouble / ((1L << 31) - 3L) // - Enforce the interval [0, 1]. -\end_layout - -\end_inset - - -\end_layout - -\begin_layout Standard -To use the uniform generator, we need to provide an initial value + To use the random number generator, we need to provide an initial value + \begin_inset Formula $x_{0}$ \end_inset @@ -12986,13 +13007,8 @@ val s1 = lehmer(s0) \begin_layout Plain Layout -val r1 = uniform(s1) -\end_layout - -\begin_layout Plain Layout - ... - // Use pseudo-random value r1. + // Use pseudo-random value s1. \end_layout \begin_layout Plain Layout @@ -13002,13 +13018,8 @@ val s2 = lehmer(s1) \begin_layout Plain Layout -val r2 = uniform(s2) -\end_layout - -\begin_layout Plain Layout - ... - // Use pseudo-random value r2. + // Use pseudo-random value s2. \end_layout \begin_layout Plain Layout @@ -13018,7 +13029,7 @@ val s3 = lehmer(s2) // And so on. \end_inset -We need to keep track of the generator's state values +We need to keep track of the generator's successive state values \begin_inset listings inline true status open @@ -13042,7 +13053,7 @@ s2 \end_inset -, ..., that are not directly needed for other computations. +, etc., and never reuse older values. This \begin_inset Quotes eld \end_inset @@ -13052,7 +13063,7 @@ keeping \begin_inset Quotes erd \end_inset - is error-prone since we might reuse a previous generator state by mistake. + is error-prone since we might by mistake reuse a previous generator state. The \begin_inset listings inline true @@ -13082,13 +13093,30 @@ State \end_layout \begin_layout Standard -As a simple example, consider the task of generating uniformly distributed - floating-point numbers in the interval +As an example, consider the task of generating uniformly distributed floating-po +int numbers in the interval \begin_inset Formula $\left[0,1\right]$ \end_inset . - We need to maintain the generator state while computing the result. + To produce such numbers, define a helper function: +\begin_inset listings +inline false +status open + +\begin_layout Plain Layout + +def uniform(x: Long): Double = (x - 1L).toDouble / ((1L << 31) - 3L) // + Enforce the interval [0, 1]. +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +We need to maintain the generator state while computing the result. The floating-point generator is implemented as a monadic value of type \begin_inset listings @@ -13405,7 +13433,7 @@ Eval[A] \end_inset can be eager (available now) or lazy (available later). - Values of these sub-types can be combined with correct logic: for instance, + Values of these sub-types should be combined with correct logic: for instance, a combination of eager and lazy values automatically becomes lazy. \end_layout @@ -13810,17 +13838,13 @@ pure with a callback argument: \begin_inset listings +lstparams "numbers=left" inline false status open \begin_layout Plain Layout -def pure(x: Int)(callback: Int => Unit): Unit = -\end_layout - -\begin_layout Plain Layout - - callback(x) +def pure(x: Int)(callback: Int => Unit): Unit = callback(x) \end_layout \begin_layout Plain Layout @@ -13918,7 +13942,7 @@ callback(z) \end_inset - is run. + is run (line 7 above). If we need to extend this program with some more calculation steps, we would have to add extra code within the deepest-nested scope of \begin_inset listings @@ -14192,7 +14216,7 @@ Unit For instance, that value could show error information or give access to processes that were scheduled concurrently. So, we generalize the type constructor -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset to @@ -14637,7 +14661,7 @@ mult4 \end_inset -, has a certain cost, which is a value of a monoid type +, has a certain cost represented by a value of a monoid type \begin_inset Formula $W$ \end_inset @@ -14893,8 +14917,12 @@ status open \begin_layout Plain Layout -def addCost[A](c: Cont[W, A], cost: W): Cont[W, A] = { callback => c(callback) - |+| cost } +def addCost[A](c: Cont[W, A], cost: W): Cont[W, A] = { callback => +\end_layout + +\begin_layout Plain Layout + + c(callback) |+| cost } \end_layout \end_inset @@ -15668,7 +15696,7 @@ Solve the \begin_inset Formula $n$ \end_inset --queens problem on an +-queens problem on a \begin_inset Formula $3\times3\times3$ \end_inset @@ -15981,19 +16009,19 @@ literal "false" \end_inset - Use the + Use the monad type \begin_inset listings inline true status open \begin_layout Plain Layout -State[S, Int] +State[S, A] \end_layout \end_inset - monad with + with \begin_inset listings inline true status open @@ -17684,8 +17712,7 @@ This chapter introduced semi-monads to encode nested iteration as functor \end_layout \begin_layout Standard -When functor blocks describe iterations over data collections, a source - line +In a functor block that iterates over data collections, a source line \begin_inset listings inline true status open @@ -17759,19 +17786,19 @@ f(x) \end_inset . - We expect to get the same result by iterating over a collection + We expect to get the same result if \begin_inset listings inline true status open \begin_layout Plain Layout -c +y \end_layout \end_inset - whose values + iterates over a collection whose values \begin_inset listings inline true status open @@ -18376,7 +18403,7 @@ status open \begin_layout Plain Layout -S[_] +S \end_layout \end_inset @@ -18473,7 +18500,7 @@ c \begin_layout Standard \begin_inset Formula \begin{equation} -\text{flm}\,(f^{:A\rightarrow B}\bef g^{:B\rightarrow S^{C}})=f^{\uparrow S}\bef\text{flm}\,(g)\quad.\label{eq:left-naturality-law-flatMap} +\text{flm}^{A,C}(f^{:A\rightarrow B}\bef g^{:B\rightarrow S^{C}})=f^{\uparrow S}\bef\text{flm}^{B,C}(g)\quad.\label{eq:left-naturality-law-flatMap} \end{equation} \end_inset @@ -18547,7 +18574,7 @@ flatMap \end_inset . - More precisely, we may call this equation the naturality law of + We also call this equation the naturality law of \begin_inset listings inline true status open @@ -18555,7 +18582,7 @@ status open \begin_layout Plain Layout \noindent -flatMap[A, B] +flatMap[A, C] \end_layout \end_inset @@ -18581,11 +18608,20 @@ A \begin_inset Quotes erd \end_inset - since -\begin_inset Formula $f^{\uparrow S}$ + since the law operates on the type parameter +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout +\noindent + +A +\end_layout + \end_inset - acts on the type parameter + while \begin_inset listings inline true status open @@ -18593,12 +18629,12 @@ status open \begin_layout Plain Layout \noindent -A +C \end_layout \end_inset -. + is unchanged. \end_layout \begin_layout Standard @@ -18638,7 +18674,7 @@ S^{A}\ar[ru]\sp(0.5){\text{flm}\,(f^{:A\rightarrow S^{B}})~~~}\ar[rr]\sb(0.5){\t \begin_layout Standard \begin_inset Formula \begin{equation} -\text{flm}\,(f^{:A\rightarrow S^{B}}\bef g^{\uparrow S})=\text{flm}\,(f)\bef g^{\uparrow S}\quad.\label{eq:right-naturality-law-flatMap} +\text{flm}^{A,C}(f^{:A\rightarrow S^{B}}\bef g^{\uparrow S})=\text{flm}^{A,B}(f)\bef g^{\uparrow S}\quad.\label{eq:right-naturality-law-flatMap} \end{equation} \end_inset @@ -18648,7 +18684,7 @@ S^{A}\ar[ru]\sp(0.5){\text{flm}\,(f^{:A\rightarrow S^{B}})~~~}\ar[rr]\sb(0.5){\t \begin_layout Standard \noindent -This is a +This is the \begin_inset Index idx status open @@ -18664,11 +18700,11 @@ flatMap \series bold right naturality \series default - law or + law or the \begin_inset Quotes eld \end_inset -naturality with respect to +naturality law with respect to \begin_inset listings inline true status open @@ -18769,7 +18805,7 @@ S^{A}\ar[ru]\sp(0.5){\text{flm}\,(f^{:A\rightarrow S^{B}})~~\ }\ar[rr]\sb(0.5){\ \begin_inset Formula \begin{equation} -\text{flm}\,\big(f^{:A\rightarrow S^{B}}\bef\text{flm}\,(g^{:B\rightarrow S^{C}})\big)=\text{flm}\left(f\right)\bef\text{flm}\left(g\right)\quad.\label{eq:associativity-law-flatMap} +\text{flm}^{A,C}\big(f^{:A\rightarrow S^{B}}\bef\text{flm}^{B,C}(g^{:B\rightarrow S^{C}})\big)=\text{flm}^{A,B}(f)\bef\text{flm}^{B,C}(g)\quad.\label{eq:associativity-law-flatMap} \end{equation} \end_inset @@ -18840,7 +18876,7 @@ status open \begin_layout Plain Layout -Semi-monad +Semimonad \end_layout \end_inset @@ -18902,7 +18938,7 @@ status open \begin_layout Plain Layout -abstract class Semi-monad[F[_]: Functor] { +abstract class Semimonad[F[_]: Functor] { \end_layout \begin_layout Plain Layout @@ -18917,13 +18953,13 @@ abstract class Semi-monad[F[_]: Functor] { \begin_layout Plain Layout -implicit class Semi-monadOps[F[_]: Semi-monad, A](fa: F[A]) { // Define - flatMap as an extension method. +implicit class SemimonadOps[F[_]: Semimonad, A](fa: F[A]) { // Define flatMap + as an extension method. \end_layout \begin_layout Plain Layout - def flatMap[B](f: A => F[B]): F[B] = implicitly[Semi-monad[F]].flatMap(fa)(f) + def flatMap[B](f: A => F[B]): F[B] = implicitly[Semimonad[F]].flatMap(fa)(f) \end_layout \begin_layout Plain Layout @@ -18933,20 +18969,20 @@ implicit class Semi-monadOps[F[_]: Semi-monad, A](fa: F[A]) { // Define \begin_layout Plain Layout -def checkSemi-monadLaws[F[_], A, B, C]()(implicit ff: Semi-monad[F], // - Use the `Arbitrary` typeclass +def checkSemimonadLaws[F[_], A, B, C]()(implicit ff: Semimonad[F], // + Use the `Arbitrary` typeclass from `scalacheck`: \end_layout \begin_layout Plain Layout fa: Arbitrary[F[A]], ab: Arbitrary[A => F[B]], bc: Arbitrary[B => F[C]]) - = { // from `scalacheck`. + = { \end_layout \begin_layout Plain Layout - forAll { (f: A => F[B], g: B => F[C], fa: F[A]) => // - Associativity law of flatMap. + forAll { (f: A => F[B], g: B => F[C], fa: F[A]) => // Associativity + law of flatMap. \end_layout \begin_layout Plain Layout @@ -18961,13 +18997,13 @@ def checkSemi-monadLaws[F[_], A, B, C]()(implicit ff: Semi-monad[F], // \begin_layout Plain Layout -} // Assuming that a Semi-monad instance was defined for Seq[_], check the - laws with specific A, B, C. +} // Assuming that a Semimonad instance was defined for Seq, check the laws \end_layout \begin_layout Plain Layout -checkSemi-monadLaws[Seq, Int, String, Double]() +checkSemimonadLaws[Seq, Int, String, Double]() // with specific A, B, + C. \end_layout \end_inset @@ -19021,8 +19057,31 @@ deflate \end_inset function. - We then showed that these two functions are equivalent if certain laws - are assumed to hold for + We then showed that +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +filter +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +deflate +\end_layout + +\end_inset + + are equivalent if certain laws are assumed to hold for \begin_inset listings inline true status open @@ -19072,7 +19131,19 @@ flatten \end_inset - has fewer laws, and that its laws are simpler to verify. + (denoted by +\begin_inset Quotes eld +\end_inset + + +\begin_inset Formula $\text{ftn}$ +\end_inset + + +\begin_inset Quotes erd +\end_inset + +) has fewer laws, and that its laws are simpler to verify. \end_layout \begin_layout Standard @@ -19369,11 +19440,11 @@ flatten \end_inset . - Prove that + To verify that \begin_inset Formula $p^{\prime}=p$ \end_inset -: +, write: \begin_inset Formula \[ p^{\prime}=q\left(\text{id}\right)=\gunderline{\text{id}^{\uparrow S}}\bef p=\text{id}\bef p=p\quad. @@ -19446,11 +19517,11 @@ noprefix "false" . Then define a new function -\begin_inset Formula $q^{\prime}(f)\triangleq f^{\uparrow S}\bef\text{ftn}$ +\begin_inset Formula $q^{\prime}(f)\triangleq f^{\uparrow S}\bef p$ \end_inset . - Prove that + To verify that \begin_inset Formula $q^{\prime}(f)=q(f)$ \end_inset @@ -19566,24 +19637,24 @@ flatten \begin_inset Formula $\text{ftn}\triangleq\text{flm}\left(\text{id}\right)$ \end_inset - satisfies its + satisfies the following \emph on two \emph default - laws, with an arbitrary + laws (for an arbitrary \begin_inset Formula $f^{:A\rightarrow B}$ \end_inset -: +): \begin_inset Formula \begin{align} -\text{naturality law of }\text{ftn}:\quad & f^{\uparrow S\uparrow S}\bef\text{ftn}=\text{ftn}\bef f^{\uparrow S}\quad,\label{eq:naturality-law-of-flatten}\\ -\text{associativity law of }\text{ftn}:\quad & \text{ftn}^{\uparrow S}\bef\text{ftn}=\text{ftn}\bef\text{ftn}\quad.\label{eq:associativity-law-of-flatten} +\text{naturality law of \texttt{flatten}}:\quad & f^{\uparrow S\uparrow S}\bef\text{ftn}=\text{ftn}\bef f^{\uparrow S}\quad,\label{eq:naturality-law-of-flatten}\\ +\text{associativity law of \texttt{flatten}}:\quad & \text{ftn}^{\uparrow S}\bef\text{ftn}=\text{ftn}\bef\text{ftn}\quad.\label{eq:associativity-law-of-flatten} \end{align} \end_inset -The following type diagrams illustrate these laws: +The following type diagrams show the type parameters used in these laws: \end_layout \begin_layout Standard @@ -20193,7 +20264,8 @@ Try \end_inset is a fixed type. - Show that this monad satisfies the associativity law. + Show that this monad satisfies the associativity law, assuming a fully + parametric implementation. \end_layout \begin_layout Subparagraph @@ -20214,10 +20286,10 @@ flatten \end_inset is -\begin_inset Formula $\text{ftn}:Z+\left(Z+A\right)\rightarrow Z+A$ +\begin_inset Formula $Z+\left(Z+A\right)\rightarrow Z+A$ \end_inset -, and its code is: +; the code is: \begin_inset listings inline false status open @@ -20262,24 +20334,8 @@ A & \bbnum 0 & \text{id} \end_inset - -\end_layout - -\begin_layout Standard -Since -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -flatten -\end_layout - -\end_inset - - is fully parametric, both sides of the associativity law are fully parametric - functions with the type signature +Both sides of the associativity law are fully parametric functions with + the type signature \begin_inset Formula $Z+Z+Z+A\rightarrow Z+A$ \end_inset @@ -20319,7 +20375,7 @@ only one \end_layout \begin_layout Standard -To make this argument rigorous, we may use the Curry-Howard correspondence +To make this argument rigorous, we could use the Curry-Howard correspondence and the LJT algorithm (see Section \begin_inset space ~ \end_inset @@ -20375,7 +20431,7 @@ For comparison, the Scala code for \begin_inset Formula $\text{ftn}^{\uparrow F}$ \end_inset - (had we needed to write it) would look like this: + looks like this: \begin_inset listings inline false status open @@ -20743,7 +20799,7 @@ Flattening that result gives a list of all values : \begin_inset Formula \[ -p\triangleright\text{ftn}^{\uparrow\text{List}}\triangleright\text{ftn}=\left[x_{11},x_{12},...,x_{21},x_{22},...,y_{11},y_{12},...,y_{21},y_{22},...,...\right]\quad. +p\triangleright\text{ftn}^{\uparrow\text{List}}\triangleright\text{ftn}^{A}=\left[x_{11},x_{12},...,x_{21},x_{22},...,y_{11},y_{12},...,y_{21},y_{22},...,...\right]\quad. \] \end_inset @@ -20756,7 +20812,7 @@ Applying \begin_inset Formula $p$ \end_inset - will flatten the outer lists: + will first flatten the outer lists: \begin_inset Formula \[ p\triangleright\text{ftn}^{\text{List}^{A}}=\left[\left[x_{11},x_{12},...\right],\left[x_{21},x_{22},...\right],...,\left[y_{11},y_{12},...\right],\left[y_{21},y_{22},...\right],...\right]\quad. @@ -20764,10 +20820,10 @@ p\triangleright\text{ftn}^{\text{List}^{A}}=\left[\left[x_{11},x_{12},...\right] \end_inset -Flattening that value results in: +Flattening that value again will give: \begin_inset Formula \[ -p\triangleright\text{ftn}^{\text{List}^{A}}\triangleright\text{ftn}=\left[x_{11},x_{12},...,x_{21},x_{22},...,y_{11},y_{12},...,y_{21},y_{22},...,...\right]\quad. +p\triangleright\text{ftn}^{\text{List}^{A}}\triangleright\text{ftn}^{A}=\left[x_{11},x_{12},...,x_{21},x_{22},...,y_{11},y_{12},...,y_{21},y_{22},...,...\right]\quad. \] \end_inset @@ -21001,7 +21057,7 @@ We cannot split that column because the expression \end_inset -Substituting this value +Substituting the same value \begin_inset Formula $h$ \end_inset @@ -21029,11 +21085,11 @@ This holds by the inductive assumption. \end_layout \begin_layout Standard -It remains to examine the second possibility, +It remains to examine the second possibility ( \begin_inset Formula $h=\bbnum 0+g\times k$ \end_inset -: +): \begin_inset Formula \begin{align*} \text{with }h=\bbnum 0+g\times k:\quad & \left(\bbnum 0+\left(\bbnum 0+g\times k\right)\times t\right)\triangleright\text{ftn}^{\text{List}^{A}}\bef\text{ftn}^{A}\\ @@ -21041,7 +21097,7 @@ It remains to examine the second possibility, \text{code of }\pplus:\quad & =\big(\bbnum 0+g\times\big(k\pplus\big(t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\big)\big)\big)\triangleright\text{ftn}^{A}\\ \text{code of }\text{ftn}^{A}:\quad & =g\pplus\big(k\pplus\big(t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\big)\big)\triangleright\overline{\text{ftn}}\\ \text{Exercise~\ref{subsec:Exercise-flatten-concat-distributive-law}}:\quad & =\gunderline{g\pplus\big(k\triangleright\overline{\text{ftn}}\big)}\pplus\big(t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\triangleright\overline{\text{ftn}}\big)\\ - & =\left(\bbnum 0+g\times k\right)\triangleright\overline{\text{ftn}}\pplus\big(\gunderline{t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\triangleright\overline{\text{ftn}}}\big)\\ + & =\left(\bbnum 0+g\times k\right)\triangleright\overline{\text{ftn}}\,\pplus\big(\gunderline{t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\triangleright\overline{\text{ftn}}}\big)\\ \text{inductive assumption}:\quad & =\text{ftn}\,(h)\pplus\big(t\triangleright\overline{\text{ftn}^{\uparrow\text{List}}}\triangleright\overline{\text{ftn}}\big)\quad. \end{align*} @@ -21255,8 +21311,7 @@ where \end_inset -To verify the associativity law, it is convenient to substitute a value - +To verify the associativity law, substitute a value \begin_inset Formula $\left(\left(a\times w_{1}\right)\times w_{2}\right)\times w_{3}$ \end_inset @@ -21279,11 +21334,11 @@ The operation \begin_inset Formula $\oplus$ \end_inset - is associative since + is associative because, by assumption, \begin_inset Formula $W$ \end_inset - is a semigroup. + is a lawful semigroup. So, both sides of the law are equal. \end_layout @@ -21380,7 +21435,7 @@ Had the code not exchanged the order of \end_layout \begin_layout Subsection -From semi-monads to monads: Motivating the identity laws +From semi-monads to monads: The identity laws \end_layout \begin_layout Standard @@ -21413,7 +21468,7 @@ merge \begin_inset Quotes erd \end_inset - the effects associatively. + the effects. It is generally useful to be able to create values with an \begin_inset Quotes eld \end_inset @@ -21532,8 +21587,8 @@ empty effect \emph on one \emph default - element, because an iteration of such an array goes over a single value, - which is equivalent to no iteration. + element. + An iteration over such an array will just need to process that single value. In a functor block, this intuition says that a source line with an \begin_inset Quotes eld \end_inset @@ -21566,8 +21621,8 @@ y = x \end_inset -. - This line must occur either before or after another source line. + with no iteration. + This line may occur either before or after another source line. So, we need to examine two situations: first, when an empty effect comes before another source line: \end_layout @@ -21623,12 +21678,12 @@ result1 = for { \begin_layout Plain Layout - z <- p(y) // p: A => M[B] + z <- g(y) // g: A => M[B] \end_layout \begin_layout Plain Layout - // Same as z <- pure(x).flatMap(p) + // Same as z <- pure(x).flatMap(g) \end_layout \end_inset @@ -21688,12 +21743,12 @@ result2 = for { \begin_layout Plain Layout - z <- p(y) // p: A => M[B] + z <- g(y) // g: A => M[B] \end_layout \begin_layout Plain Layout - // Same as z <- p(x) + // Same as z <- g(x) \end_layout \end_inset @@ -21928,7 +21983,7 @@ status open \begin_layout Plain Layout -g.flatMap(pure) == g +p.flatMap(pure) == p \end_layout \end_inset @@ -22006,12 +22061,12 @@ def checkMonadIdentityLaws[F[_], A, B]()(implicit mf: Monad[F], sf: Semi-monad[F \begin_layout Plain Layout - forAll { (fa: F[A]) => + forAll { (p: F[A]) => \end_layout \begin_layout Plain Layout - fa.flatMap(mf.pure[A]) shouldEqual fa // Right identity law. + p.flatMap(mf.pure[A]) shouldEqual p // Right identity law. \end_layout \begin_layout Plain Layout @@ -22141,7 +22196,7 @@ wrapped unit \begin_inset Formula $\text{wu}_{M}$ \end_inset -), derivations turn out to be easier when using +), derivations are easier when using \begin_inset listings inline true status open @@ -22263,7 +22318,7 @@ flatten \end_layout \begin_layout Standard -Since the laws of semi-monads are simpler when formulated via the +The laws of semi-monads are simpler when formulated via \begin_inset listings inline true status open @@ -22275,7 +22330,7 @@ flatten \end_inset - method, let us convert the identity laws to that form. +, so let us convert the identity laws to that form. We use the code for \begin_inset listings inline true @@ -22384,7 +22439,7 @@ flatten \end_inset . - Conversely, if Eq. + If Eq. \begin_inset space ~ \end_inset @@ -22486,7 +22541,7 @@ M^{A}\ar[ru]\sp(0.5){(\text{pu}^{A})^{\uparrow M}\quad}\ar[rr]\sb(0.5){\text{id} \end_layout \begin_layout Standard -In the next section, we will see reasons why these laws have their names. +In the next section, we will give reasons for the names of these laws. \end_layout \begin_layout Subsection @@ -22565,7 +22620,7 @@ Option \begin_inset Formula $A\rightarrow\bbnum 1+B$ \end_inset - can be composed using the Kleisli composition denoted by + can be composed using the Kleisli composition operator denoted by \begin_inset Formula $\diamond_{_{\text{Opt}}}$ \end_inset @@ -22600,7 +22655,7 @@ noprefix "false" stated the general properties of Kleisli composition. We will now show that the Kleisli composition gives a useful way of formulating - the laws of a monad. + the monad laws. \end_layout \begin_layout Standard @@ -22618,7 +22673,7 @@ Kleisli composition \begin_inset Formula $M$ \end_inset -, denoted +, denoted by \begin_inset Formula $\diamond_{_{M}}$ \end_inset @@ -22773,11 +22828,15 @@ For a lawful monad \begin_inset Formula $\diamond_{_{M}}$ \end_inset - satisfies the identity laws: + satisfies the identity laws: for any +\begin_inset Formula $f^{:A\rightarrow M^{B}}$ +\end_inset + +, \begin_inset Formula \begin{align} -\text{left identity law of }\diamond_{_{M}}:\quad & \text{pu}_{M}\diamond_{_{M}}f=f\quad,\quad\forall f^{:A\rightarrow M^{B}}\quad,\label{eq:kleisli-left-identity-law}\\ -\text{right identity law of }\diamond_{_{M}}:\quad & f\diamond_{_{M}}\text{pu}_{M}=f\quad,\quad\forall f^{:A\rightarrow M^{B}}\quad.\label{eq:kleisli-right-identity-law} +\text{left identity law of }\diamond_{_{M}}:\quad & \text{pu}_{M}\diamond_{_{M}}f=f\quad,\label{eq:kleisli-left-identity-law}\\ +\text{right identity law of }\diamond_{_{M}}:\quad & f\diamond_{_{M}}\text{pu}_{M}=f\quad.\label{eq:kleisli-right-identity-law} \end{align} \end_inset @@ -22815,7 +22874,7 @@ noprefix "false" \end_inset ) hold. - Using the definition + Using definition \begin_inset space ~ \end_inset @@ -22829,18 +22888,22 @@ noprefix "false" \end_inset -), we find: +), we directly verify the identity laws: \begin_inset Formula \begin{align*} -\text{left identity law of }\diamond_{_{M}},\text{ should equal }f:\quad & \text{pu}_{M}\diamond_{_{M}}f=\gunderline{\text{pu}_{M}\bef\text{flm}_{M}}(f)\\ +\text{left identity law of }\diamond_{_{M}}:\quad & \text{pu}_{M}\diamond_{_{M}}f=\gunderline{\text{pu}_{M}\bef\text{flm}_{M}}(f)\\ \text{use Eq.~(\ref{eq:monad-left-identity-law-for-flatMap})}:\quad & \quad=f\quad,\\ -\text{right identity law of }\diamond_{_{M}},\text{ should equal }f:\quad & f\diamond_{_{M}}\text{pu}_{M}=f\bef\gunderline{\text{flm}_{M}(\text{pu}_{M})}\\ +\text{right identity law of }\diamond_{_{M}}:\quad & f\diamond_{_{M}}\text{pu}_{M}=f\bef\gunderline{\text{flm}_{M}(\text{pu}_{M})}\\ \text{use Eq.~(\ref{eq:monad-right-identity-law-for-flatMap})}:\quad & \quad=f\bef\text{id}=f\quad. \end{align*} \end_inset +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -22881,16 +22944,16 @@ lifting : \begin_inset Formula \[ -\text{flm}_{M}:(A\rightarrow M^{B})\rightarrow(M^{A}\rightarrow M^{B})\quad, +\text{flm}_{M}:(A\rightarrow M^{B})\rightarrow(M^{A}\rightarrow M^{B})\quad. \] \end_inset -from Kleisli functions +It transforms Kleisli functions \begin_inset Formula $A\rightarrow M^{B}$ \end_inset - to + into \begin_inset Formula $M$ \end_inset @@ -22898,15 +22961,16 @@ from Kleisli functions \begin_inset Formula $M^{A}\rightarrow M^{B}$ \end_inset -, except that Kleisli functions must be composed using +. + Keep in mind that that Kleisli functions must be composed using \begin_inset Formula $\diamond_{_{M}}$ \end_inset -, while +, and that the function \begin_inset Formula $\text{pu}_{M}$ \end_inset - plays the role of the Kleisli-identity function. + plays the role of the identity functions among the Kleisli functions. \end_layout \begin_layout Subsubsection @@ -23153,7 +23217,7 @@ We find that the properties of the operation status open \begin_layout Plain Layout -It means that Kleisli functions satisfy the properties of morphisms of a +It means that Kleisli functions satisfy the properties of morphisms in a category; see Section \begin_inset space ~ \end_inset @@ -23213,7 +23277,11 @@ x => for { \end_inset -corresponds to this Kleisli composition: +corresponds to this Kleisli composition +\begin_inset Formula $f\diamond_{_{_{M}}}g\diamond_{_{_{M}}}h$ +\end_inset + +, or more verbosely: \begin_inset Formula \[ (x\rightarrow f(x))\diamond_{_{_{M}}}(y\rightarrow g(y))\diamond_{_{_{M}}}(z\rightarrow h(z))\quad. @@ -23245,8 +23313,19 @@ flatMap its \emph default laws. - In other words, we may equally well use the Kleisli composition when formulatin -g the requirements for a functor + One may equally well use the Kleisli composition instead of +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +flatMap +\end_layout + +\end_inset + + when formulating the requirements for a functor \begin_inset Formula $M$ \end_inset @@ -23339,7 +23418,7 @@ noprefix "false" \end_inset -Note that this law makes parentheses unnecessary in the expression +This law makes parentheses unnecessary in the expression \begin_inset Formula $f\bef g\diamond_{_{M}}h$ \end_inset @@ -23477,7 +23556,7 @@ noprefix "false" \begin_inset Quotes erd \end_inset - is associative, + is associative: \begin_inset Formula \begin{align*} & (f\bef g)\diamond_{_{M}}h=(f\bef g)\bef\text{flm}_{M}(h)=f\bef(g\bef\text{flm}_{M}(h))=f\bef(g\diamond_{_{M}}h)\quad. @@ -23534,6 +23613,10 @@ noprefix "false" \end_inset +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -23730,7 +23813,7 @@ flatMap \begin_inset Formula $\text{flm}_{M}$ \end_inset - into both sides: + into both sides of that law: \begin_inset Formula \begin{align*} \text{left-hand side}:\quad & \text{flm}_{M}(f\bef\text{flm}_{M}(g))=\text{id}\diamond_{_{M}}(\gunderline{f\bef\text{id}}\diamond_{_{M}}g)=\text{id}\diamond_{_{M}}(f\diamond_{_{M}}g)\\ @@ -23742,6 +23825,11 @@ flatMap \end_inset Both sides of the law are now equal. + +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -23761,7 +23849,8 @@ flatMap \begin_inset Formula $\diamond_{_{M}}$ \end_inset -, but we omit those derivations. +. + We omit those derivations. \end_layout @@ -23841,7 +23930,7 @@ flipped Kleisli \series bold flipped Kleisli \series default - makes direct proofs of laws much shorter. + makes direct proofs of laws shorter. That trick applies to monads of a function type, such as the continuation and the state monads. \end_layout @@ -23894,7 +23983,7 @@ This function type has two curried arguments. We obtain: \begin_inset Formula \[ -\left(B\rightarrow R\right)\rightarrow A\rightarrow R\quad. +A\rightarrow\text{Cont}^{R,B}\cong\left(B\rightarrow R\right)\rightarrow A\rightarrow R\quad. \] \end_inset @@ -24180,6 +24269,11 @@ noprefix "false" \end_inset ), the proof is finished. + +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard @@ -24205,8 +24299,8 @@ flatten \end_inset These type signatures are complicated and confusing to read. - Direct proofs of the monad laws for these functions are much longer than - the proofs of Statements + Direct proofs of the monad laws for these functions are more complicated + than the proofs shown in Statements \begin_inset space ~ \end_inset @@ -24295,7 +24389,11 @@ A constant functor \begin_inset Formula $F^{A}\triangleq Z$ \end_inset - is a lawful semi-monad because we can implement: + is a lawful semi-monad with +\begin_inset Formula $\text{ftn}_{F}$ +\end_inset + + defined by: \begin_inset Formula \[ \text{ftn}_{F}=\text{id}^{:Z\rightarrow Z}\quad. @@ -24484,7 +24582,7 @@ The functor \begin_inset Formula $Z$ \end_inset - are fixed but arbitrary types, cannot have a + are fixed but arbitrary types, cannot have a fully parametric \begin_inset listings inline true status open @@ -24568,7 +24666,7 @@ Products \end_layout \begin_layout Standard -The product construction works for semi-monads as well as for monads. +The product construction works for semi-monads and for monads. \end_layout \begin_layout Subsubsection @@ -24594,11 +24692,11 @@ noprefix "false" \begin_layout Standard Given two semi-monads -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset , the functor @@ -24607,15 +24705,15 @@ Given two semi-monads is a semi-monad. If both -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset are monads then -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset is also a monad. @@ -24827,7 +24925,7 @@ In order to use this law, we need to move the two functions \begin_inset Formula $\text{ftn}_{F}$ \end_inset - next to each other in the expressions + next to each other in the expressions: \begin_inset Formula \[ \big(\text{ftn}_{L}^{\uparrow F}\bef\pi_{1}^{\uparrow F}\bef\text{ftn}_{F}\big)\quad\text{ and }\quad\big(\pi_{1}^{\uparrow F}\bef\text{ftn}_{F}\bef\pi_{1}^{\uparrow F}\bef\text{ftn}_{F}\big)\quad, @@ -24868,7 +24966,7 @@ Now we assume that \begin_inset Formula $G$ \end_inset - are monads with given + are lawful monads with given \begin_inset listings inline true status open @@ -24897,15 +24995,7 @@ pure \end_inset -Assuming that identity laws hold for -\begin_inset Formula $F$ -\end_inset - - and -\begin_inset Formula $G$ -\end_inset - -, we can now verify the identity laws for +To verify the identity laws for \begin_inset Formula $L$ \end_inset @@ -24929,7 +25019,7 @@ Assuming that identity laws hold for & \quad=\big(\big(\Delta\bef\gunderline{(\text{pu}_{F}\boxtimes\text{pu}_{G})\big)^{\uparrow F}\bef\pi_{1}^{\uparrow F}}\bef\text{ftn}_{F}\big)\boxtimes\big(\big(\Delta\bef\gunderline{(\text{pu}_{F}\boxtimes\text{pu}_{G})\big)^{\uparrow G}\bef\pi_{2}^{\uparrow G}}\bef\text{ftn}_{G}\big)\\ & \quad\quad\text{projection laws~(\ref{eq:pair-product-left-projection-law}) and~(\ref{eq:pair-product-right-projection-law})}:\quad\\ & \quad=\big((\gunderline{\Delta\bef\pi_{1}}\bef\text{pu}_{F})^{\uparrow F}\bef\text{ftn}_{F}\big)\boxtimes\big(\big(\gunderline{\Delta\bef\pi_{2}}\bef\text{pu}_{G}\big)^{\uparrow G}\bef\text{ftn}_{G}\big)\\ -\quad & \quad\quad\text{identity laws~(\ref{eq:pair-identity-law-left})}:\\ +\quad & \quad\quad\text{identity laws~(\ref{eq:pair-identity-law-left})}:\quad\\ & \quad=\big(\gunderline{\text{pu}_{F}^{\uparrow F}\bef\text{ftn}_{F}}\big)\boxtimes\big(\gunderline{\text{pu}_{G}^{\uparrow G}\bef\text{ftn}_{G}}\big)=\text{id}\boxtimes\text{id}=\text{id}\quad. \end{align*} @@ -24954,7 +25044,7 @@ flatten \end_inset - function is defined by + function is defined by: \begin_inset listings inline false status open @@ -24972,15 +25062,15 @@ def flatten[A]: Pair[Pair[A]] => Pair[A] = { case ((a, b), (c, d)) => (a, \end_inset -A sample calculation shows that +A \begin_inset Quotes eld \end_inset -nested iterations +nested iteration \begin_inset Quotes erd \end_inset - apply functions element by element: + performs an element-by-element function application: \begin_inset listings inline false status open @@ -25611,13 +25701,12 @@ status open \begin_layout Plain Layout -def flatten2_L[A]: ((A, F[A]), F[(A, F[A])]) => (A, F[A]) = { case (afa, - fafa) => +def flatten2_L[A]: ((A, F[A]), F[(A, F[A])]) => (A, F[A]) = { \end_layout \begin_layout Plain Layout - (afa._1, fafa.map(_._1)) + case (afa, fafa) => (afa._1, fafa.map(_._1)) \end_layout \begin_layout Plain Layout @@ -25792,7 +25881,11 @@ As a rule, the co-product of two monads ( \end_inset ) is not a monad. - For simple examples, see Exercise + Examples are +\begin_inset Formula $\bbnum 1+A\times A$ +\end_inset + + (Exercise \begin_inset space ~ \end_inset @@ -25806,15 +25899,15 @@ noprefix "false" \end_inset - for -\begin_inset Formula $\bbnum 1+F^{A}$ +) and +\begin_inset Formula $M^{A}+M^{A}$ \end_inset - (where -\begin_inset Formula $F^{A}\triangleq A\times A$ + with an arbitrary monad +\begin_inset Formula $M$ \end_inset -) and Exercise + (Exercise \begin_inset space ~ \end_inset @@ -25828,15 +25921,7 @@ noprefix "false" \end_inset - for -\begin_inset Formula $M^{A}+M^{A}$ -\end_inset - - with an arbitrary monad -\begin_inset Formula $M$ -\end_inset - -. +). An exception to that rule is a co-product with the \emph on identity @@ -25870,11 +25955,11 @@ If \begin_inset Formula $F$ \end_inset - is any monad, the functor + is a monad, the functor \begin_inset Formula $L^{A}\triangleq A+F^{A}$ \end_inset - is a monad. + is also a monad. (The functor \begin_inset Formula $L$ \end_inset @@ -25988,19 +26073,23 @@ flatten \end_inset - method, which needs to have the type signature: + method of +\begin_inset Formula $F$ +\end_inset + +: \begin_inset Formula \[ -\text{ftn}_{L}:L^{L^{A}}\rightarrow L^{A}=A+F^{A}+F^{A+F^{A}}\rightarrow A+F^{A}\quad. +\text{ftn}_{L}:L^{L^{A}}\rightarrow L^{A}\quad,\quad\quad\text{or equivalently}:\quad\text{ftn}_{L}:A+F^{A}+F^{A+F^{A}}\rightarrow A+F^{A}\quad. \] \end_inset -Since we know nothing about the specific monad +We know nothing about the specific monad \begin_inset Formula $F$ \end_inset -, we cannot extract a value of type +, so we cannot extract a value of type \begin_inset Formula $A$ \end_inset @@ -26025,25 +26114,16 @@ pure \end_inset - method to create a value of type -\begin_inset Formula $F^{A}$ -\end_inset - - out of -\begin_inset Formula $A$ + method for implementing a function we will denote by +\begin_inset Formula $\gamma$ \end_inset -. - This allows us to convert + that converts \begin_inset Formula $A+F^{A}$ \end_inset into \begin_inset Formula $F^{A}$ -\end_inset - - using the function we will denote -\begin_inset Formula $\gamma$ \end_inset : @@ -26081,7 +26161,7 @@ def gamma[A]: L[A] => F[A] = { \begin_inset Formula \[ -\gamma^{A}\triangleq\,\begin{array}{|c||c|} +\gamma^{A}:L^{A}\rightarrow F^{A}\quad,\quad\quad\gamma^{A}\triangleq\,\begin{array}{|c||c|} & F^{A}\\ \hline A & \text{pu}_{F}\\ F^{A} & \text{id} @@ -26106,23 +26186,7 @@ Lifting this function to \begin_inset Formula $F^{A}$ \end_inset - via -\begin_inset Formula $F$ -\end_inset - -'s -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -flatten -\end_layout - -\end_inset - - method: +: \begin_inset listings inline false status open @@ -26149,7 +26213,7 @@ def flatten_L[A]: L[L[A]] => L[A] = { \begin_layout Plain Layout -} // The last line equals `Right(g.flatMap(gamma))`. +} // The last expression is equal to `Right(g.flatMap(gamma))`. \end_layout \end_inset @@ -26171,7 +26235,7 @@ F^{L^{A}} & \bbnum 0 & \gamma^{\uparrow F}\bef\text{ftn}_{F} \end_layout \begin_layout Standard -Is there another implementation for +Is there another implementation of \begin_inset Formula $\text{ftn}_{L}$ \end_inset @@ -26192,7 +26256,7 @@ Is there another implementation for \begin_inset Formula $A+\bbnum 0$ \end_inset -, which makes it impossible to satisfy identity laws such as +, which makes it impossible to satisfy the identity law \begin_inset Formula $\text{pu}_{F}\bef\text{ftn}_{F}=\text{id}$ \end_inset @@ -26221,7 +26285,7 @@ pure \end_inset or -\begin_inset Formula $\text{pu}_{L}\triangleq a\rightarrow\bbnum 0+\text{pu}_{F}(a)$ +\begin_inset Formula $\text{pu}_{L}\triangleq a^{:A}\rightarrow\bbnum 0+\text{pu}_{F}(a)$ \end_inset . @@ -26506,7 +26570,7 @@ F^{L^{L^{A}}} & \bbnum 0 & \gamma^{\uparrow F}\bef\text{ftn}_{F}\bef\gamma^{\upa \begin_inset Note Note -status open +status collapsed \begin_layout Plain Layout \begin_inset Formula @@ -26601,7 +26665,7 @@ It remains to show the equality of the functions under \end_inset where in the last step we used the naturality law of -\begin_inset Formula $\gamma^{:L^{^{A}}\rightarrow F^{A}}$ +\begin_inset Formula $\gamma^{:L^{A}\rightarrow F^{A}}$ \end_inset , which is a natural transformation: @@ -27002,7 +27066,7 @@ To verify the laws, it is convenient to substitute an arbitrary by: \begin_inset Formula \[ -z\triangleright(f\tilde{\diamond}_{_{L}}g)\triangleq(z\triangleright f)\diamond_{_{F}}(z\triangleright g)\quad,\quad\quad z\triangleright\tilde{\text{pu}}_{L}\triangleq\text{pu}_{F}\quad. +z\triangleright(f\,\tilde{\diamond}_{_{L}}g)\triangleq(z\triangleright f)\diamond_{_{F}}(z\triangleright g)\quad,\quad\quad z\triangleright\tilde{\text{pu}}_{L}\triangleq\text{pu}_{F}\quad. \] \end_inset @@ -27108,8 +27172,8 @@ status open \begin_layout Plain Layout -type L[A] = Z => F[A] // The type Z and a semi-monad F must be - already defined. +type L[A] = Z => F[A] // A type Z and a semi-monad F must be already + defined. \end_layout \begin_layout Plain Layout @@ -27151,7 +27215,7 @@ noprefix "false" \begin_layout Standard For any contrafunctor -\begin_inset Formula $H^{A}$ +\begin_inset Formula $H$ \end_inset , the functor @@ -27204,7 +27268,7 @@ We use the flipped Kleisli formulation for To infer this function's code, begin with a typed hole: \begin_inset Formula \[ -f^{:H^{B}\rightarrow A\rightarrow B}\tilde{\diamond}_{_{L}}g^{:H^{C}\rightarrow B\rightarrow C}=k^{:H^{C}}\rightarrow\text{???}^{:A\rightarrow C}\quad. +f^{:H^{B}\rightarrow A\rightarrow B}\,\tilde{\diamond}_{_{L}}g^{:H^{C}\rightarrow B\rightarrow C}=k^{:H^{C}}\rightarrow\text{???}^{:A\rightarrow C}\quad. \] \end_inset @@ -27306,7 +27370,7 @@ Putting the entire code together and substituting an arbitrary value , we get: \begin_inset Formula \begin{equation} -k^{:H^{C}}\triangleright\big(f^{:H^{B}\rightarrow A\rightarrow B}\tilde{\diamond}_{_{L}}g^{:H^{C}\rightarrow B\rightarrow C}\big)\triangleq f\big(k\triangleright(g(k))^{\downarrow H}\big)\bef g(k)\quad.\label{eq:def-of-Kleisli-for-exp-construction-h-a-a} +k^{:H^{C}}\triangleright\big(f^{:H^{B}\rightarrow A\rightarrow B}\,\tilde{\diamond}_{_{L}}g^{:H^{C}\rightarrow B\rightarrow C}\big)\triangleq f\big(k\triangleright(g(k))^{\downarrow H}\big)\bef g(k)\quad.\label{eq:def-of-Kleisli-for-exp-construction-h-a-a} \end{equation} \end_inset @@ -27764,7 +27828,7 @@ In each case, we need to define the function \begin_inset Formula $\text{lift}_{G,H}$ \end_inset - assuming only the associativity law of + from the associativity law of \begin_inset Formula $G$ \end_inset @@ -27777,7 +27841,7 @@ In each case, we need to define the function \begin_inset Formula $\text{lift}_{G,H}$ \end_inset - by using the identity laws of + from the identity laws of \begin_inset Formula $G$ \end_inset @@ -30132,7 +30196,7 @@ Consider the functor \begin_inset Formula $D^{A}\triangleq\bbnum 1+A\times A$ \end_inset - (in Scala, + (in Scala, this is defined by \begin_inset listings inline true status open @@ -30174,16 +30238,44 @@ pure \end_inset in at least two different ways. - Show that some of the monad laws fail to hold for those implementations. + Show that some of the monad laws fail to hold for every implementation. \begin_inset Foot status open \begin_layout Plain Layout -One can prove that +There are several implementations of +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +pure +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +flatten +\end_layout + +\end_inset + + for \begin_inset Formula $D^{A}\triangleq\bbnum 1+A\times A$ \end_inset - cannot be a lawful monad. +, but +\emph on +none +\emph default + of them obey the monad laws. For details, see \family typewriter @@ -30461,7 +30553,7 @@ status open \begin_layout Plain Layout -M[_] +M \end_layout \end_inset @@ -31060,12 +31152,8 @@ Use monad constructions to show that the functors \begin_inset Formula $p(x)=x^{n_{1}}+x^{n_{2}}+...+x^{n_{k}}$ \end_inset - with some positive integers -\begin_inset Formula $n_{1}$ -\end_inset - -, ..., -\begin_inset Formula $n_{k}$ + with some distinct positive integers +\begin_inset Formula $n_{1}<... R) => R) \end_inset -In Scala syntax, this makes monadic programs appear to have a method called - +In Scala syntax, this makes monadic programs appear to have a \begin_inset listings inline true status open @@ -33527,7 +33613,7 @@ run \end_inset -: + method: \begin_inset listings inline false status open @@ -34178,7 +34264,7 @@ status open \begin_layout Plain Layout def runner: Option[Int] => Int = _.getOrElse(0) // For empty Option values, - return a default. + return some default Int value. \end_layout \end_inset @@ -34195,7 +34281,7 @@ Int \end_inset -, this runner will fail to obey the composition law: +, this runner will fail the composition law: \begin_inset listings inline false status open @@ -34375,7 +34461,7 @@ For any monad \begin_inset Formula $M$ \end_inset -, one defines a category, called the +, one defines a category, which we call the \begin_inset Formula $M$ \end_inset @@ -34389,7 +34475,7 @@ Kleisli!category \end_inset -Kleisli category where objects are all types ( +Kleisli category, where objects are all types ( \begin_inset listings inline true status open @@ -34430,15 +34516,17 @@ String \end_layout \begin_layout Standard -One axiom of a category requires us to have an identity morphism -\begin_inset Formula $A\rightarrow M^{A}$ +One axiom of a category requires us to have an identity morphism for every + object +\begin_inset Formula $A$ \end_inset - for every object -\begin_inset Formula $A$ +. + For the +\begin_inset Formula $M$ \end_inset -; this is the monad +-Kleisli category, this is the monad \begin_inset Formula $M$ \end_inset @@ -34459,8 +34547,13 @@ pure \end_inset . - Another axiom is the associativity of morphism composition operation, which - must combine functions of types + Another axiom is the associativity of morphism composition operation. + For the +\begin_inset Formula $M$ +\end_inset + +-Kleisli category, the composition operation must combine functions of types + \begin_inset Formula $A\rightarrow M^{B}$ \end_inset @@ -34530,7 +34623,7 @@ So, a functor \end_inset -Kleisli category is lawful. - This is an concise way of formulating the monad laws. + This is a concise way of formulating the monad laws. \end_layout \begin_layout Standard @@ -34832,7 +34925,7 @@ flatMap \end_layout \begin_layout Standard -In terms of the Kleisli composition operations +In terms of the operations \begin_inset Formula $\diamond_{_{M}}$ \end_inset @@ -35098,7 +35191,7 @@ Option \end_inset monads. - The implementation of + The code of \begin_inset Formula $\phi$ \end_inset @@ -35975,7 +36068,7 @@ noprefix "false" ): \begin_inset Formula \begin{align*} - & f^{\uparrow M}\bef\text{ftn}_{M}\bef\phi=\gunderline{\phi\bef(f\bef\phi)^{\uparrow N}}\bef\text{ftn}_{N}\quad.\\ + & f^{\uparrow M}\bef\text{ftn}_{M}\bef\phi=\gunderline{\phi\bef(f\bef\phi)^{\uparrow N}}\bef\text{ftn}_{N}\\ \text{use Eq.~(\ref{eq:monad-morphism-naturality-law})}:\quad & =\gunderline{(f\bef\phi)^{\uparrow M}}\bef\phi\bef\text{ftn}_{N}=f^{\uparrow M}\bef\phi^{\uparrow M}\bef\phi\bef\text{ftn}_{M}\quad. \end{align*} @@ -36126,7 +36219,11 @@ For any monad \begin_inset Formula $\text{pu}_{M}:\text{Id}\leadsto M$ \end_inset - between the identity monad and + between the identity monad ( +\begin_inset Formula $\text{Id}$ +\end_inset + +) and \begin_inset Formula $M$ \end_inset @@ -36268,7 +36365,7 @@ Reader \end_inset to the monad -\begin_inset Formula $M^{A}$ +\begin_inset Formula $M$ \end_inset , despite having the correct type signature. @@ -36304,7 +36401,7 @@ not \end_inset to -\begin_inset Formula $M^{A}$ +\begin_inset Formula $M$ \end_inset . @@ -36357,7 +36454,7 @@ headOption \end_inset method viewed as a function of type -\begin_inset Formula $\forall A.\:\text{List}^{A}\rightarrow\bbnum 1+A$ +\begin_inset Formula $\forall A.\,\text{List}^{A}\rightarrow\bbnum 1+A$ \end_inset is a natural transformation but @@ -36391,6 +36488,126 @@ Option . \end_layout +\begin_layout Subsubsection +Exercise +\begin_inset CommandInset label +LatexCommand label +name "subsec:Exercise-reasoning-1-4-1" + +\end_inset + + +\begin_inset CommandInset ref +LatexCommand ref +reference "subsec:Exercise-reasoning-1-4-1" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + +\end_layout + +\begin_layout Standard +Assume given functors +\begin_inset Formula $F$ +\end_inset + +, +\begin_inset Formula $G$ +\end_inset + +, +\begin_inset Formula $K$ +\end_inset + +, +\begin_inset Formula $L$ +\end_inset + + and a natural transformation +\begin_inset Formula $\phi:F^{A}\rightarrow G^{A}$ +\end_inset + +. +\end_layout + +\begin_layout Standard + +\series bold +(a) +\series default + Prove that +\begin_inset Formula $\phi^{\uparrow K}:K^{F^{A}}\rightarrow K^{G^{A}}$ +\end_inset + + is also a natural transformation. +\end_layout + +\begin_layout Standard + +\series bold +(b) +\series default + Given another natural transformation +\begin_inset Formula $\psi:K^{A}\rightarrow L^{A}$ +\end_inset + +, prove that the pair product of +\begin_inset Formula $\phi$ +\end_inset + + and +\begin_inset Formula $\psi$ +\end_inset + +, that is, +\begin_inset Formula $\phi\boxtimes\psi:F^{A}\times K^{A}\rightarrow G^{A}\times L^{A}$ +\end_inset + +, as well as the pair co-product +\begin_inset Formula $\phi\boxplus\psi:F^{A}+K^{A}\rightarrow G^{A}+L^{A}$ +\end_inset + +, are also natural transformations. + The +\series bold +pair co-product +\series default + +\begin_inset Index idx +status open + +\begin_layout Plain Layout +pair co-product of functions|textit +\end_layout + +\end_inset + + of two functions +\begin_inset Formula $\phi$ +\end_inset + + and +\begin_inset Formula $\psi$ +\end_inset + + is defined by: +\begin_inset Formula +\[ +(\phi\boxplus\psi):F^{A}+K^{A}\rightarrow G^{A}+L^{A}\quad,\quad\quad\phi\boxplus\psi\triangleq\,\begin{array}{|c||cc|} + & G^{A} & L^{A}\\ +\hline F^{A} & \phi & \bbnum 0\\ +K^{A} & \bbnum 0 & \psi +\end{array}\quad. +\] + +\end_inset + + +\end_layout + \begin_layout Subsection Constructions of polynomial monads \begin_inset CommandInset label @@ -36429,7 +36646,7 @@ Start with \begin_layout Enumerate Given a polynomial monad -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset , create the monad @@ -36441,11 +36658,11 @@ Given a polynomial monad \begin_layout Enumerate Given two polynomial monads -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset , create the monad @@ -36457,7 +36674,7 @@ Given two polynomial monads \begin_layout Enumerate Given a polynomial monad -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset , create the monad @@ -36496,8 +36713,8 @@ noprefix "false" \end_inset -) that these are the only constructions available for polynomial monads. - If the conjecture is true, we can create an algorithm that recognizes whether +) whether these are the only constructions available for polynomial monads. + If that is true, one could create an algorithm that recognizes whether a given polynomial functor can be made into a monad by suitable definitions of \begin_inset listings @@ -36548,7 +36765,7 @@ noprefix "false" ). One can also show that -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset cannot be obtained through the monad constructions listed above. @@ -36900,7 +37117,7 @@ noprefix "false" \end_layout \begin_layout Standard -In the following constructions, we always assume that +In the following constructions, we assume that \begin_inset Formula $M$ \end_inset @@ -37023,7 +37240,7 @@ noprefix "false" \begin_layout Standard The monad -\begin_inset Formula $M^{A}$ +\begin_inset Formula $M$ \end_inset itself is @@ -37138,11 +37355,11 @@ Products \begin_layout Standard If -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset are @@ -37180,11 +37397,11 @@ Co-products \begin_layout Standard If -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset are @@ -37222,7 +37439,7 @@ Function types \begin_layout Standard If -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset is an @@ -37230,7 +37447,7 @@ If \end_inset -filterable functor and -\begin_inset Formula $G^{A}$ +\begin_inset Formula $G$ \end_inset is an @@ -37336,7 +37553,7 @@ If \end_inset , the recursive functor -\begin_inset Formula $F^{A}$ +\begin_inset Formula $F$ \end_inset defined by the type equation @@ -38551,7 +38768,7 @@ ng you don't often use this method you use it sometimes but specific ones similar to function if ting laws as we will see so what are the properties of this closely operation so let's reformulate the laws of flat map in terms of the class the operation a class decomposition a diamond so the - formulation becomes a very elegant set of laws so that left and right identity + formulation becomes an elegant set of laws so that left and right identity laws are like this so pure composed with F is if F composed with pure is f now here F must be one of these functions in now it's obvious why they're called left and right identity loss pure is identity and this is exactly diff --git a/sofp-src/lyx/sofp-preface.lyx b/sofp-src/lyx/sofp-preface.lyx index 3142b07bf..4224313d1 100644 --- a/sofp-src/lyx/sofp-preface.lyx +++ b/sofp-src/lyx/sofp-preface.lyx @@ -326,8 +326,8 @@ Readers will need to learn some difficult concepts through prolonged mental \end_inset -The book assumes a certain amount of mathematical experience, at about the - level of undergraduate algebra or calculus, as well as some experience +The book assumes a certain amount of mathematical experience (at about the + level of undergraduate algebra or calculus) as well as some experience writing code in general-purpose programming languages. \end_layout @@ -404,11 +404,15 @@ noprefix "false" ). The presentation is self-contained, defining and explaining all required techniques, notations, and Scala features. + All code examples have been tested to work but are intended only for explanatio +n and illustration. + As a rule, the code is not optimized for performance. Although the code examples are in Scala, the material in this book also applies to many other functional programming languages. -\end_layout +\begin_inset Note Note +status open -\begin_layout Standard +\begin_layout Plain Layout All concepts and techniques are illustrated by examples and explained as simply as possible ( \begin_inset Quotes eld @@ -422,6 +426,11 @@ but not simpler Exercises should be attempted after absorbing the preceding material. \end_layout +\end_inset + + +\end_layout + \begin_layout Standard A software engineer needs to learn only those few fragments of mathematical theory that answer questions arising in the programming practice. @@ -516,99 +525,42 @@ noprefix "false" \end_inset . + \end_layout \begin_layout Standard -Chapters -\begin_inset space ~ -\end_inset - - -\begin_inset CommandInset ref -LatexCommand ref -reference "chap:5-Curry-Howard" -plural "false" -caps "false" -noprefix "false" - -\end_inset - -– -\begin_inset CommandInset ref -LatexCommand ref -reference "chap:Functors,-contrafunctors,-and" -plural "false" -caps "false" -noprefix "false" - -\end_inset - - begin using the code notation, such as Eq. -\begin_inset space ~ -\end_inset - -( -\begin_inset CommandInset ref -LatexCommand ref -reference "eq:f-functor-exponential-def-of-fmap" -plural "false" -caps "false" -noprefix "false" - +Participation in the meetup +\begin_inset Quotes eld \end_inset -). - If that notation still appears hard to follow after going through Chapters -\begin_inset space ~ +San Francisco Types, Theorems, and Programming Languages +\begin_inset Quotes erd \end_inset -\begin_inset CommandInset ref -LatexCommand ref -reference "chap:5-Curry-Howard" -plural "false" -caps "false" -noprefix "false" - -\end_inset +\begin_inset Foot +status open -– -\begin_inset CommandInset ref -LatexCommand ref -reference "chap:Functors,-contrafunctors,-and" -plural "false" -caps "false" -noprefix "false" +\begin_layout Plain Layout -\end_inset +\family typewriter +\begin_inset CommandInset href +LatexCommand href +target "https://www.meetup.com/sf-types-theorems-and-programming-languages/" +literal "false" -, readers will benefit from working through Chapter -\begin_inset space ~ \end_inset -\begin_inset CommandInset ref -LatexCommand ref -reference "chap:Reasoning-about-code" -plural "false" -caps "false" -noprefix "false" - -\end_inset - -, which summarizes the code notation more systematically and clarifies it - with additional examples. \end_layout -\begin_layout Standard -All code examples have been tested to work but are intended only for explanation - and illustration. - As a rule, the code is not optimized for performance. -\end_layout +\end_inset -\begin_layout Standard -The author thanks Joseph Kim and Jim Kleck for doing some of the exercises - and reporting some errors in earlier versions of this book. + initially motivated the author to begin working on this book. + Thanks are due to Adrian King, Hew Wolff, Peter Vanderbilt, and Young-il + Choo for inspiration and support in that meetup. + The author appreciates the work of Joseph Kim and Jim Kleck who did many + of the exercises and reported some errors in earlier versions of this book. The author also thanks Bill Venners for many helpful comments on the draft, and Harald Gliebe, Andreas R \begin_inset ERT @@ -790,8 +742,8 @@ res0: Int = 3628800 \begin_layout Itemize In the introductory chapters, type expressions and code examples are written - in the syntax of Scala. - Starting from Chapters + in the Scala syntax. + In Chapters \begin_inset space ~ \end_inset @@ -815,8 +767,8 @@ noprefix "false" \end_inset -, the book introduces a mathematical notation for types: for example, the - Scala type expression +, the book introduces a mathematical notation for types: e.g., the Scala type + expression \begin_inset listings inline true status open @@ -957,6 +909,46 @@ andThen \end_inset ). + If the notation still appears hard to follow after going through Chapters +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand ref +reference "chap:5-Curry-Howard" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +– +\begin_inset CommandInset ref +LatexCommand ref +reference "chap:Functors,-contrafunctors,-and" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +, readers will benefit from working through Chapter +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand ref +reference "chap:Reasoning-about-code" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +, which summarizes the code notation more systematically and clarifies it + with additional examples. Appendix \begin_inset space ~ \end_inset diff --git a/sofp-src/lyx/sofp-reasoning.lyx b/sofp-src/lyx/sofp-reasoning.lyx index 02b7e5db6..eef192700 100644 --- a/sofp-src/lyx/sofp-reasoning.lyx +++ b/sofp-src/lyx/sofp-reasoning.lyx @@ -1280,6 +1280,16 @@ status open def f[A](x: A): Int = 123 \end_layout +\begin_layout Plain Layout + + // Or equivalently: +\end_layout + +\begin_layout Plain Layout + +def f[A]: A => Int = { _ => 123 } +\end_layout + \end_inset Code notation: @@ -1421,7 +1431,7 @@ x \end_inset - as a free variable because + as a free variable: indeed, \begin_inset listings inline true status open @@ -1433,7 +1443,7 @@ status open \end_inset - only makes sense if + makes sense only if \begin_inset listings inline true status open @@ -1582,7 +1592,12 @@ status open \begin_layout Plain Layout -{ x: Int => { z: Int => z } } +{ x: Int => { z: Int => z } } // Or equivalently: +\end_layout + +\begin_layout Plain Layout + +(x: Int) => (z: Int) => z \end_layout \end_inset @@ -1604,7 +1619,7 @@ x^{:\text{Int}}\rightarrow z^{:\text{Int}}\rightarrow z\quad. \begin_layout Standard If a function is already defined, we can use it by applying it to an argument. - Scala example: + A Scala example: \begin_inset listings inline false status open @@ -1748,7 +1763,7 @@ p._2 \end_inset . - The auxiliary functions + The standard functions \begin_inset Formula $\pi_{i}$ \end_inset @@ -1783,15 +1798,15 @@ Code notation: \end_inset -The notation +We use the notation \begin_inset Formula $a\times b$ \end_inset - is used in an + in an \emph on argument \emph default - of a function to destructure a tuple. + of a function to destructure tuples. \end_layout \begin_layout Paragraph @@ -1908,7 +1923,7 @@ disjunctive functions \begin_inset Quotes erd \end_inset -, which we will explain next. +. \end_layout \begin_layout Paragraph @@ -1942,8 +1957,7 @@ case \end_inset syntax). - Recall that functions that take a disjunctive value as an argument (called - + Recall that functions that take a disjunctive value as an argument ( \begin_inset Quotes eld \end_inset @@ -1982,7 +1996,7 @@ match \end_inset keyword. - Scala example: + A Scala example: \begin_inset listings inline false status open @@ -2009,7 +2023,7 @@ val compute: Option[Int] => Option[Int] = { \end_inset -The code notation for this disjunctive function is modeled after that Scala +The code notation for this disjunctive function is modeled after the Scala code: \begin_inset Formula \[ @@ -2065,6 +2079,8 @@ case \end_inset expressions. + The column to the left of the double line shows the corresponding disjunctive + subtypes. In this example, the disjunctive type \begin_inset listings inline true @@ -2182,7 +2198,7 @@ case \end_inset - expressions line by line from the Scala code. + expressions line by line. Look at the first \begin_inset listings inline true @@ -2227,6 +2243,10 @@ None \begin_inset Formula $1\rightarrow\bbnum 0^{:\bbnum 1}+100^{:\text{Int}}$ \end_inset +, or more concisely as +\begin_inset Formula $\_\rightarrow\bbnum 0+100$ +\end_inset + . \end_layout @@ -2277,8 +2297,19 @@ Int \end_layout \begin_layout Standard -To obtain the matrix notation, we write the two partial functions in the - two rows: +To obtain the matrix code notation for +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +compute +\end_layout + +\end_inset + +, we may begin by writing the two partial functions as two rows of a matrix: \begin_inset listings inline false status open @@ -2305,7 +2336,7 @@ val compute: Option[Int] => Option[Int] = { \end_inset -Code notation: +The code notation is: \begin_inset Formula \[ \text{compute}^{:\bbnum 1+\text{Int}\rightarrow\bbnum 1+\text{Int}}\triangleq\,\begin{array}{|c||c|} @@ -2318,26 +2349,70 @@ Code notation: \end_inset This is already a valid matrix notation for the function -\begin_inset Formula $f$ +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +compute +\end_layout + \end_inset . So far, the matrix has two rows and one column. - However, we notice that each row's return value is + Then we notice that each row's return value is \emph on known \emph default - to be in a specific part of the disjunctive type + to be in a specific subtype of the disjunctive type \begin_inset Formula $\bbnum 1+\text{Int}$ \end_inset - (in this example, both rows return values of type +; in this example, both rows return values of the subtype \begin_inset Formula $\bbnum 0+\text{Int}$ \end_inset -). - So, we can split the column into two columns and obtain a clearer and more - useful notation for this function: +. + So, we split the column into two columns labeled +\begin_inset Quotes eld +\end_inset + + +\begin_inset Formula $\bbnum 1$ +\end_inset + + +\begin_inset Quotes erd +\end_inset + + and +\begin_inset Quotes eld +\end_inset + + +\begin_inset Formula $\text{Int}$ +\end_inset + + +\begin_inset Quotes erd +\end_inset + +. + This gives a more useful code notation for +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +compute +\end_layout + +\end_inset + +: \begin_inset Formula \[ \text{compute}^{:\bbnum 1+\text{Int}\rightarrow\bbnum 1+\text{Int}}\triangleq\,\begin{array}{|c||cc|} @@ -2359,14 +2434,25 @@ void type!in matrix notation \end_inset - + ( +\begin_inset Formula $\bbnum 0$ +\end_inset + +) is written in the first column to indicate that the disjunctive part in + that column is not returned. + There is no confusion with other columns because the type \begin_inset Formula $\bbnum 0$ \end_inset - is written symbolically to indicate that the disjunctive part in that column - is not returned. - In this way, the matrix displays the parts of disjunctive types that are - being returned. + has no values. + In this way, the matrix clearly displays the parts of disjunctive types + that are being returned in each case. +\end_layout + +\begin_layout Standard +Because only one part of a disjunctive type can ever be returned, a row + can have at most one non-void value. + That value will be in the column corresponding to the part being returned. \end_layout @@ -2497,14 +2583,7 @@ p match {...} \end_layout \begin_layout Standard -Because only one part of a disjunctive type can ever be returned, a row - can have at most one non-void value. - That value will be in the column corresponding to the part being returned. - -\end_layout - -\begin_layout Standard -The matrix notation allows us to compute such function applications directly. +Let us see how to compute function applications in the matrix notation. We view the disjunctive value \begin_inset Formula $\bbnum 0+64^{:\text{Int}}$ \end_inset @@ -2526,7 +2605,9 @@ row vector \begin_inset space ~ \end_inset -, written with a single left line to distinguish it from a function matrix. +. + Vectors are written with a single line at left, to distinguish them from + function matrices. Calculations use the standard rules of a vector-matrix product: \begin_inset Formula \begin{align*} @@ -2540,13 +2621,13 @@ row vector \end{array}\\ & =\,\begin{array}{|cc|} \bbnum 0 & 64\triangleright(x\rightarrow\frac{x}{2})\end{array}\,=\,\begin{array}{|cc|} -\bbnum 0 & 32\end{array}\,=(\bbnum 0+32)\quad. +\bbnum 0 & 32\end{array}\,=\bbnum 0^{:\bbnum 1}+32^{:\text{Int}}\quad. \end{align*} \end_inset Instead of the multiplication of matrix elements as it would be done in - matrix algebra, we use the pipe ( + linear algebra, we use the pipe ( \begin_inset Formula $\triangleright$ \end_inset @@ -2555,8 +2636,8 @@ Instead of the multiplication of matrix elements as it would be done in \end_inset . - (We omitted type annotations here, because we already checked that the - types match.) + (Type annotations are omitted because we already checked that the types + match.) \end_layout \begin_layout Paragraph @@ -2581,16 +2662,14 @@ noprefix "false" \end_inset because the constructive propositional logic (which was the main focus - in that chapter) cannot represent a recursively defined value. + of that chapter) cannot represent recursively defined values. However, this limitation of propositional logic means only that we do not have an algorithm for \emph on automatic \emph default derivation of recursive code. - (Similarly, no algorithm can automatically derive code that involves type - constructors with known methods.) Nevertheless, those derivations can be - performed by hand. + Those derivations can be performed by hand. \end_layout @@ -2869,7 +2948,11 @@ pipe notation \begin_inset Formula $x\triangleright f$ \end_inset -, which places the argument ( +, which means just the same as +\begin_inset Formula $f(x)$ +\end_inset + + but places the argument ( \begin_inset Formula $x$ \end_inset @@ -2886,8 +2969,8 @@ left \emph on right \emph default -, for example -\begin_inset Formula $(x\triangleright f)\triangleright g$ +, for example, +\begin_inset Formula $x\triangleright f\triangleright g$ \end_inset meaning @@ -2943,11 +3026,11 @@ x\triangleright\text{fmap}\,(f)\triangleright\text{filt}\,(p)\quad. \end_layout \begin_layout Standard -To enable this common usage, the +To enable this common usage, the operation \begin_inset Formula $\triangleright$ \end_inset - operation is defined to group towards the left. + is defined to group towards the left. So, the parentheses in \begin_inset Formula $(x\triangleright f)\triangleright g=x\triangleright f\triangleright g$ \end_inset @@ -3001,7 +3084,7 @@ pipe notation!operator precedence \end_inset - We can then omit more parentheses: + We can then omit the parentheses: \begin_inset Formula $x\triangleright(f\bef g)=x\triangleright f\bef g$ \end_inset @@ -3108,7 +3191,7 @@ get[Int] \begin{align*} & \text{compute}\bef\text{get}=\,\begin{array}{|c||cc|} & \bbnum 1 & \text{Int}\\ -\hline \bbnum 1 & \bbnum 0 & 1\rightarrow100\\ +\hline \bbnum 1 & \bbnum 0 & \_\rightarrow100\\ \text{Int} & \bbnum 0 & x\rightarrow\frac{x}{2} \end{array}\,\bef\,\begin{array}{|c||c|} & \text{Int}\\ @@ -3117,35 +3200,28 @@ get[Int] \end{array}\\ & \quad=\,\begin{array}{|c||c|} & \text{Int}\\ -\hline \bbnum 1 & (1\rightarrow100)\bef\text{id}\\ +\hline \bbnum 1 & (\_\rightarrow100)\bef\text{id}\\ \text{Int} & (x\rightarrow\frac{x}{2})\bef\text{id} -\end{array}=\,\begin{array}{|c||c|} +\end{array}\,=\,\begin{array}{|c||c|} & \text{Int}\\ -\hline \bbnum 1 & 1\rightarrow100\\ +\hline \bbnum 1 & \_\rightarrow100\\ \text{Int} & x\rightarrow\frac{x}{2} \end{array}\quad. \end{align*} \end_inset -In this computation, we used the composition ( +In such computations, we use the standard rules of matrix multiplication + but apply the composition ( \begin_inset Formula $\bef$ \end_inset -) instead of the -\begin_inset Quotes eld -\end_inset - -multiplication -\begin_inset Quotes erd -\end_inset - - of matrix elements. +) instead of the multiplication of matrix elements. \end_layout \begin_layout Standard -Why does the rule for matrix multiplication work for function compositions? - The reason is the equivalence +Why do the matrix multiplication rules work for function compositions? The + reason is the equivalence \begin_inset Formula $x\triangleright f\triangleright g=x\triangleright f\bef g$ \end_inset @@ -3206,7 +3282,7 @@ row-vector \begin_inset Formula $g$ \end_inset - must yield the function + will yield the function \begin_inset Formula $f\bef g$ \end_inset @@ -3214,15 +3290,7 @@ row-vector \end_layout \begin_layout Standard -A -\begin_inset Quotes eld -\end_inset - -non-disjunctive -\begin_inset Quotes erd -\end_inset - - function (i.e., one not taking or returning disjunctive types) may be written +A function that does not take or return disjunctive types may be written as a \begin_inset Formula $1\times1$ \end_inset @@ -3325,8 +3393,8 @@ In these formulas, the labels \end_layout \begin_layout Standard -The lifting notation helps us recognize that these steps are possible just - by looking at the formula. +The lifting notation helps us recognize that those steps are possible more + easily, just by looking at the formula. Of course, we still need to find a useful sequence of steps in a given derivation or proof. \end_layout @@ -3362,7 +3430,7 @@ The functions denoted by \end_layout \begin_layout Standard -We already saw the definition and the implementation of the functions +We already saw the definition and the code for the functions \begin_inset Formula $\pi_{1}$ \end_inset @@ -3383,19 +3451,11 @@ diagonal \begin_inset Quotes erd \end_inset - function + function ( \begin_inset Formula $\Delta$ \end_inset - is a right inverse for -\begin_inset Formula $\pi_{1}$ -\end_inset - - and -\begin_inset Formula $\pi_{2}$ -\end_inset - -: +) is defined by: \begin_inset listings inline false status open @@ -3444,11 +3504,11 @@ x \end_inset . - This property can be written as an equation or a + This property can be written via equations or \begin_inset Quotes eld \end_inset -law +laws \begin_inset Quotes erd \end_inset @@ -3459,7 +3519,7 @@ status open \begin_layout Plain Layout -delta(x)._1 == x +delta(x)._1 == x and delta(x)._2 == x \end_layout \end_inset @@ -3467,7 +3527,7 @@ delta(x)._1 == x Code notation: \begin_inset Formula \[ -\pi_{1}(\Delta(x))=x\quad. +\pi_{1}(\Delta(x))=x\quad,\quad\quad\pi_{2}(\Delta(x))=x\quad. \] \end_inset @@ -3476,8 +3536,8 @@ Code notation: \end_layout \begin_layout Standard -We can transform this law into a point-free equation by first using the - pipe notation: +We can transform these laws into point-free equations. + First, use the pipe notation: \begin_inset Formula \[ \pi_{1}(\Delta(x))=(\Delta(x))\triangleright\pi_{1}=x\triangleright\Delta\triangleright\pi_{1}=x\triangleright\Delta\bef\pi_{1}\quad, @@ -3490,7 +3550,7 @@ which gives the equation \end_inset . - Now we omit + Then we omit \begin_inset Quotes eld \end_inset @@ -3514,6 +3574,10 @@ The same property holds for \begin_inset Formula $\pi_{2}$ \end_inset +, namely: +\begin_inset Formula $\Delta\bef\pi_{2}=\text{id}$ +\end_inset + . \end_layout @@ -3551,7 +3615,8 @@ status open \begin_layout Plain Layout -def pairProduct[A,B,P,Q](f: A => P, g: B => Q): ((A, B)) => (P, Q) = { +def pairProduct[A, B, P, Q](f: A => P, g: B => Q): ((A, B)) => (P, Q) = + { \end_layout \begin_layout Plain Layout @@ -3616,7 +3681,7 @@ status open \begin_layout Plain Layout -def pairProduct[A,B,P,Q](f: A => P, g: B => Q)(p: (A, B)): (P, Q) = +def pairProduct[A, B, P, Q](f: A => P, g: B => Q)(p: (A, B)): (P, Q) = \end_layout \begin_layout Plain Layout @@ -3668,8 +3733,8 @@ name "subsec:Deriving-laws-for-functions-" \end_layout \begin_layout Standard -The task is to prove a given law (an equation) for a function whose code - is known. +We will often need to prove a given law (an equation) for a function whose + code is known. An example of such an equation is the \begin_inset Index idx status open @@ -3768,7 +3833,7 @@ noprefix "false" \begin_inset Formula $\Delta$ \end_inset - in the right-hand side must take arguments of type + in the right-hand side must take an argument of type \begin_inset Formula $A$ \end_inset @@ -3787,7 +3852,7 @@ noprefix "false" type annotations: \begin_inset Formula \[ -\xymatrix{\xyScaleY{1.6pc}\xyScaleX{4.0pc}A\ar[d]\sb(0.45){f}\ar[r]\sb(0.45){\Delta^{A}} & A\times A\ar[d]\sp(0.45){f\boxtimes f}\\ +\xymatrix{\xyScaleY{1.6pc}\xyScaleX{4.0pc}A\ar[d]\sb(0.45){f}\ar[r]\sp(0.45){\Delta^{A}} & A\times A\ar[d]\sp(0.45){f\boxtimes f}\\ B\ar[r]\sp(0.45){\Delta^{B}} & B\times B } \] @@ -3813,13 +3878,12 @@ To prove the law, we need to use the known code of the function . We substitute that code into the left-hand side of the law and into the - right-hand side of the law, hoping to transform these two expressions until + right-hand side of the law, hoping to transform those two expressions until they are the same. \end_layout \begin_layout Standard -We will now perform this computation in the Scala syntax and in the code - notation: +We perform this derivation in the Scala syntax and in the code notation: \begin_inset listings inline false status open @@ -3856,7 +3920,7 @@ x.pipe(delta andThen { case (a, b) => (f(a), f(b)) }) \end_inset -In the code notation: + \begin_inset Formula \begin{align*} & x\triangleright f\bef\Delta=f(x)\,\gunderline{\triangleright\,(b}\rightarrow b\times b)=f(x)\times f(x)\quad.\\ @@ -4179,7 +4243,7 @@ merge : \begin_inset Formula \[ -\xymatrix{\xyScaleY{1.6pc}\xyScaleX{4.0pc}A+A\ar[d]\sb(0.45){f^{\uparrow E}}\ar[r]\sb(0.55){\text{merge}^{A}} & A\ar[d]\sp(0.45){f}\\ +\xymatrix{\xyScaleY{1.6pc}\xyScaleX{4.0pc}A+A\ar[d]\sb(0.45){f^{\uparrow E}}\ar[r]\sp(0.55){\text{merge}^{A}} & A\ar[d]\sp(0.45){f}\\ B+B\ar[r]\sp(0.55){\text{merge}^{B}} & B } \] @@ -4282,7 +4346,7 @@ merge : \begin_inset Formula \[ -\xymatrix{\xyScaleY{1.5pc}\xyScaleX{4.5pc}E^{A+A}\ar[d]\sp(0.45){\text{merge}^{\uparrow E}}\ar[r]\sp(0.55){\text{merge}^{A+A}} & A+A\ar[d]\sb(0.5){\text{merge}^{A}}\\ +\xymatrix{\xyScaleY{1.5pc}\xyScaleX{4.5pc}E^{A+A}\ar[d]\sb(0.45){\text{merge}^{\uparrow E}}\ar[r]\sp(0.55){\text{merge}^{A+A}} & A+A\ar[d]\sp(0.45){\text{merge}^{A}}\\ E^{A}\ar[r]\sb(0.55){\text{merge}^{A}} & A } \] @@ -4393,8 +4457,8 @@ A & \text{id} We cannot proceed with matrix composition because the dimensions of the matrices do not match. - To compute further, we need to expand the rows and the columns of the first - matrix: + We need to expand the rows and the columns of the first matrix. + Then we can finish the proof of the law: \begin_inset Formula \[ \begin{array}{|c||c|} @@ -4426,8 +4490,8 @@ A & \text{id} \end_inset -This proves the law and also helps visualize how various types are transformed - by +The matrix notation helps visualize how various types are transformed by + \begin_inset listings inline true status open @@ -4500,7 +4564,7 @@ The single column of this matrix remains unsplit. \begin_inset Formula $h$ \end_inset - will allow us to split the column. + will allow us to split that column. \end_layout \begin_layout Standard @@ -4546,9 +4610,9 @@ Ignored arguments \end_layout \begin_layout Standard -If all rows of the disjunctive function ignore their arguments and always - return the same results, we may collapse all rows into one, as shown in - this example: +If all rows of the disjunctive function ignore their arguments and if all + rows return the same result, we may collapse all the rows into one, as + shown in this example: \begin_inset listings inline false status open @@ -4591,7 +4655,7 @@ A & \_\rightarrow1 & \bbnum 0 \end{array}=\,\begin{array}{|c||cc|} & \bbnum 1 & A\\ \hline A+\bbnum 1+A & \_\rightarrow1 & \bbnum 0 -\end{array}\quad. +\end{array}\,=\_\rightarrow1+\bbnum 0^{:A}\quad. \] \end_inset @@ -4612,8 +4676,7 @@ B & \_\rightarrow f(x) \end_inset -In this case, we can completely collapse the matrix, getting an ordinary - (non-disjunctive) function. +The code matrix is replaced by an ordinary (non-disjunctive) function. \end_layout \begin_layout Paragraph @@ -4635,11 +4698,11 @@ Consider the pair product of two disjunctive functions such as \end_inset in the matrix notation requires, in general, to split the rows and the - columns of the matrices because the type of + columns of the matrices because the input type of \begin_inset Formula $f\boxtimes g$ \end_inset - is: + is disjunctive: \begin_inset Formula \begin{align*} f\boxtimes g & :(A+B)\times(P+Q)\rightarrow R\times S\\ @@ -4648,15 +4711,15 @@ f\boxtimes g & :(A+B)\times(P+Q)\rightarrow R\times S\\ \end_inset -So, the pair product of two -\begin_inset Formula $2\times1$ -\end_inset - - matrices must be written +So, \emph on in general \emph default - as a + the pair product +\begin_inset Formula $f\boxtimes g$ +\end_inset + + must be written as a \begin_inset Formula $4\times1$ \end_inset @@ -4682,12 +4745,11 @@ B\times Q & f_{2}\boxtimes g_{2} \end_inset +A simplification trick exists when +\begin_inset Formula $f\boxtimes g$ +\end_inset -\end_layout - -\begin_layout Standard -A simplification trick exists when the pair product is composed with the - diagonal function + is composed with the diagonal function \begin_inset Formula $\Delta$ \end_inset @@ -4744,7 +4806,7 @@ B & \Delta\bef(f_{2}\boxtimes g_{2}) \end_inset -The rules of matrix multiplication do not help in deriving this law. +The rules of matrix multiplication do not help in deriving this law directly. So, we use a more basic approach: show that both sides are equal when applied to arbitrary values \begin_inset Formula $p$ @@ -4850,7 +4912,7 @@ As an example, let us derive the property that \begin_inset Formula $\text{fmap}_{F}$ \end_inset - exists and satisfies the functor law, but we do not know the code of + exists and satisfies the functor laws, but we do not know the code of \begin_inset Formula $\text{fmap}_{F}$ \end_inset @@ -4868,11 +4930,7 @@ First, we need to define \begin_inset Formula $^{\uparrow F}$ \end_inset - and write, for any -\begin_inset Formula $f^{:A\rightarrow B}$ -\end_inset - -: + and write: \begin_inset listings inline false status open @@ -4999,7 +5057,7 @@ f^{\uparrow F}\bef g^{\uparrow F}=(f\bef g)^{\uparrow F}\quad.\label{eq:composit \end_inset -We could use this law only if we somehow bring +We could make use of this law only if we somehow brought \begin_inset Formula $f^{\uparrow F}$ \end_inset @@ -5129,126 +5187,6 @@ exercises \end_inset -\end_layout - -\begin_layout Subsubsection -Exercise -\begin_inset CommandInset label -LatexCommand label -name "subsec:Exercise-reasoning-1-4-1" - -\end_inset - - -\begin_inset CommandInset ref -LatexCommand ref -reference "subsec:Exercise-reasoning-1-4-1" -plural "false" -caps "false" -noprefix "false" - -\end_inset - - -\end_layout - -\begin_layout Standard -Assume given functors -\begin_inset Formula $F$ -\end_inset - -, -\begin_inset Formula $G$ -\end_inset - -, -\begin_inset Formula $K$ -\end_inset - -, -\begin_inset Formula $L$ -\end_inset - - and a natural transformation -\begin_inset Formula $\phi:F^{A}\rightarrow G^{A}$ -\end_inset - -. -\end_layout - -\begin_layout Standard - -\series bold -(a) -\series default - Prove that -\begin_inset Formula $\phi^{\uparrow K}:K^{F^{A}}\rightarrow K^{G^{A}}$ -\end_inset - - is also a natural transformation. -\end_layout - -\begin_layout Standard - -\series bold -(b) -\series default - Given another natural transformation -\begin_inset Formula $\psi:K^{A}\rightarrow L^{A}$ -\end_inset - -, prove that the pair product of -\begin_inset Formula $\phi$ -\end_inset - - and -\begin_inset Formula $\psi$ -\end_inset - -, that is, -\begin_inset Formula $\phi\boxtimes\psi:F^{A}\times K^{A}\rightarrow G^{A}\times L^{A}$ -\end_inset - -, as well as the pair co-product -\begin_inset Formula $\phi\boxplus\psi:F^{A}+K^{A}\rightarrow G^{A}+L^{A}$ -\end_inset - -, are also natural transformations. - The -\series bold -pair co-product -\series default - -\begin_inset Index idx -status open - -\begin_layout Plain Layout -pair co-product of functions|textit -\end_layout - -\end_inset - - of two functions -\begin_inset Formula $\phi$ -\end_inset - - and -\begin_inset Formula $\psi$ -\end_inset - - is defined by: -\begin_inset Formula -\[ -(\phi\boxplus\psi):F^{A}+K^{A}\rightarrow G^{A}+L^{A}\quad,\quad\quad\phi\boxplus\psi\triangleq\,\begin{array}{|c||cc|} - & G^{A} & L^{A}\\ -\hline F^{A} & \phi & \bbnum 0\\ -K^{A} & \bbnum 0 & \psi -\end{array}\quad. -\] - -\end_inset - - \end_layout \begin_layout Subsubsection @@ -5418,7 +5356,7 @@ f^{\uparrow F}\bef\Delta=\Delta\bef f^{\uparrow L}\quad. \end_inset -Write out all types in this law and draw a type diagram. +Write out all types in that law and draw a type diagram. \end_layout \begin_layout Subsubsection @@ -5499,20 +5437,20 @@ naturality law , \begin_inset Formula \[ -(\text{id}\boxtimes f)\bef\text{two}=\text{two}\bef f^{\uparrow E}\quad, +(\text{id}\boxtimes f)\bef\text{two}=\text{two}\bef f^{\uparrow E}\quad. \] \end_inset -where +Here \begin_inset Formula $E^{A}\triangleq A+A$ \end_inset - is the functor whose lifting -\begin_inset Formula $^{\uparrow E}$ + is the functor whose lifting ( +\begin_inset Formula $...^{\uparrow E}$ \end_inset - was defined in Section +) was defined in Section \begin_inset space ~ \end_inset @@ -5527,7 +5465,7 @@ noprefix "false" \end_inset . - Write out the types in this law and draw a type diagram. + Write out the types in that naturality law and draw a type diagram. \end_layout @@ -5646,7 +5584,7 @@ noprefix "false" \begin_layout Standard Consider the functor -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset defined as: diff --git a/sofp-src/lyx/sofp-summary.lyx b/sofp-src/lyx/sofp-summary.lyx index 0d267cb45..221bf7535 100644 --- a/sofp-src/lyx/sofp-summary.lyx +++ b/sofp-src/lyx/sofp-summary.lyx @@ -3023,7 +3023,7 @@ If \end_layout \begin_layout Subsubsection -Exercise** +Exercise \begin_inset CommandInset label LatexCommand label name "par:Exercise-additional-14" @@ -3276,7 +3276,7 @@ From that, prove the type equivalence \end_layout \begin_layout Subsubsection -Exercise** +Exercise \begin_inset CommandInset label LatexCommand label name "par:Problem-Peirce-law" @@ -3362,134 +3362,10 @@ noprefix "false" . -\end_layout - -\begin_layout Standard -\begin_inset Note Note -status open - -\begin_layout Plain Layout -Here are some known facts about -\begin_inset Formula $F^{R}$ -\end_inset - -. - The type expression -\begin_inset Formula $F^{R}$ -\end_inset - - is covariant in -\begin_inset Formula $R$ -\end_inset - -, so -\begin_inset Formula $F$ -\end_inset - - is a functor. - It follows from the covariant Yoneda identity that -\begin_inset Formula $F^{\bbnum 1}\cong\bbnum 1$ -\end_inset - -. - So, -\begin_inset Formula $F$ -\end_inset - - is a pointed functor (and, in particular, -\begin_inset Formula $F^{R}\not\cong\bbnum 0$ -\end_inset - - when -\begin_inset Formula $R\not\cong\bbnum 0$ -\end_inset - -). - The corresponding natural transformation -\begin_inset Formula $\forall R.\,R\rightarrow F^{R}$ -\end_inset - - is implemented as: -\begin_inset Formula -\[ -\text{pu}_{F}:R\rightarrow F^{R}\quad,\quad\quad\text{pu}_{F}\triangleq r^{:R}\rightarrow p^{:\left(A\rightarrow R\right)\rightarrow A}\rightarrow p(\_\rightarrow r)\quad. -\] - -\end_inset - -However, -\begin_inset Formula $F^{R}$ -\end_inset - - is not equivalent to -\begin_inset Formula $R$ -\end_inset - - via a fully parametric isomorphism. - Does a natural transformation -\begin_inset Formula $\forall R.\,F^{R}\rightarrow R$ -\end_inset - - exist? No. -\end_layout - -\begin_layout Plain Layout -The type -\begin_inset Formula $F^{\bbnum 0}$ -\end_inset - - is void. - This can be shown using relational parametricity. -\end_layout - -\begin_layout Plain Layout -A function with type signature -\begin_inset Formula $\forall R.\,F^{R}\rightarrow R\rightarrow R$ -\end_inset - - can be implemented: -\begin_inset Formula -\[ -\phi:F^{R}\rightarrow R\rightarrow R\quad,\quad\quad\phi\triangleq f^{:\forall A.\,((A\rightarrow R)\rightarrow A)\rightarrow A}\rightarrow r^{:R}\rightarrow f^{R}(k^{:R\rightarrow R}\rightarrow k(r))\quad. -\] - -\end_inset - -But there is no fully parametric function with type signature -\begin_inset Formula $\forall R.\,(R\rightarrow R)\rightarrow F^{R}$ -\end_inset - -. - (To show that, take -\begin_inset Formula $R=\bbnum 0$ -\end_inset - - and use the fact that -\begin_inset Formula $F^{\bbnum 0}\cong\bbnum 0$ -\end_inset - -.) So, -\begin_inset Formula $F^{R}$ -\end_inset - - is -\emph on -not -\emph default - equivalent to the function type -\begin_inset Formula $R\rightarrow R$ -\end_inset - -. -\end_layout - -\end_inset - - \end_layout \begin_layout Subsubsection -Exercise** +Exercise \begin_inset CommandInset label LatexCommand label name "par:Problem-Peirce-law-1" @@ -3738,7 +3614,7 @@ status collapsed \end_layout \begin_layout Subsubsection -Exercise* +Exercise \begin_inset CommandInset label LatexCommand label name "par:Problem-Peirce-law-2" @@ -4009,7 +3885,7 @@ By Yoneda, \end_layout \begin_layout Subsubsection -Exercise* +Exercise \begin_inset CommandInset label LatexCommand label name "par:Exercise-additional-16" @@ -4069,7 +3945,7 @@ noprefix "false" \end_layout \begin_layout Subsubsection -Exercise** +Exercise \begin_inset CommandInset label LatexCommand label name "par:Exercise-additional-16-1" @@ -4415,7 +4291,7 @@ List \end_layout \begin_layout Subsubsection -Exercise** +Exercise \begin_inset CommandInset label LatexCommand label name "par:Exercise-additional-17" @@ -5770,14 +5646,14 @@ R\times\text{List}^{R} & h\times t\rightarrow h\oplus_{R}\overline{\text{reduce} \end_inset -We can similarly implement a base runner ( +We can similarly implement a special base runner ( \begin_inset listings inline true status open \begin_layout Plain Layout -brun +brunE \end_layout \end_inset @@ -5802,7 +5678,7 @@ status open \begin_layout Plain Layout -brun +brunE \end_layout \end_inset @@ -5819,10 +5695,10 @@ brun (which is also a monoid type): \begin_inset Formula \[ -\text{brun}:M^{L^{R}}\rightarrow M^{R}\quad,\quad\quad\text{brun}\triangleq\text{flm}_{M}\bigg(\,\begin{array}{|c||c|} +\text{brunE}:M^{L^{R}}\rightarrow M^{R}\quad,\quad\quad\text{brunE}\triangleq\text{flm}_{M}\bigg(\,\begin{array}{|c||c|} & M^{R}\\ \hline \bbnum 1 & 1\rightarrow\text{pu}_{M}(e_{R})\\ -R\times M^{L^{R}} & h\times t\rightarrow\text{pu}_{M}(h)\oplus_{M}\overline{\text{brun}}\,(t) +R\times M^{L^{R}} & h\times t\rightarrow\text{pu}_{M}(h)\oplus_{M}\overline{\text{brunE}}\,(t) \end{array}\,\bigg)\quad. \] @@ -5859,7 +5735,7 @@ status open \begin_layout Plain Layout -brun +brunE \end_layout \end_inset @@ -5883,7 +5759,7 @@ status open \begin_layout Plain Layout -brun +brunE \end_layout \end_inset @@ -5904,20 +5780,20 @@ status open \begin_layout Plain Layout -brun +brunE \end_layout \end_inset - hold when restricted to a monoid type + hold when restricted to monoid types \begin_inset Formula $A$ \end_inset ? \begin_inset Formula \begin{align*} -\text{for all monoid types }A:\quad & a^{:A}\triangleright\text{pu}_{T}\bef\text{brun}=a^{:A}\triangleright\text{pu}_{M}\quad,\\ -\text{composition law}:\quad & p^{:T^{T^{A}}}\triangleright\text{ftn}_{T}\bef\text{brun }=p^{:T^{T^{A}}}\triangleright\text{brun}\bef\text{brun}^{\uparrow M}\bef\text{ftn}_{M}\quad. +\text{for all monoid types }A:\quad & a^{:A}\triangleright\text{pu}_{T}\bef\text{brunE}=a^{:A}\triangleright\text{pu}_{M}\quad,\\ +\text{composition law}:\quad & p^{:T^{T^{A}}}\triangleright\text{ftn}_{T}\bef\text{brunE }=p^{:T^{T^{A}}}\triangleright\text{brunE}\bef\text{brunE}^{\uparrow M}\bef\text{ftn}_{M}\quad. \end{align*} \end_inset @@ -5943,7 +5819,7 @@ status open \begin_layout Plain Layout -brun +brunE \end_layout \end_inset diff --git a/sofp-src/lyx/sofp-transformers.lyx b/sofp-src/lyx/sofp-transformers.lyx index 1195c40be..653111ebc 100644 --- a/sofp-src/lyx/sofp-transformers.lyx +++ b/sofp-src/lyx/sofp-transformers.lyx @@ -5770,8 +5770,8 @@ final case class ListT[M[_]: Monad : Functor, A](value: M[Option[(A, ListT[M, \begin_layout Plain Layout - case Some((head, tail)) => comb(f(head), tail).value // Type - is M[Option[(A, ListT)]]. + case Some((head, tail)) => comb(f(head), tail).value // The `.value` + has type M[Option[(A, ListT)]]. \end_layout \begin_layout Plain Layout @@ -5838,8 +5838,17 @@ status open \begin_layout Plain Layout -def flift[M[_]: Monad, A]: M[A] => ListT[M, A] = ListT(a => m.flatMap(ListT.pure(a -).value)) +def flift[M[_]: Monad, A]: M[A] => ListT[M, A] = { m => +\end_layout + +\begin_layout Plain Layout + + ListT(m.map(a => Some((a, ListT(Monad[M].pure(None)))))) +\end_layout + +\begin_layout Plain Layout + +} \end_layout \begin_layout Plain Layout @@ -5849,8 +5858,17 @@ def blift[M[_]: Monad, A]: List[A] => ListT[M, A] = \begin_layout Plain Layout - _.foldRight(Monad[M].pure(None)){ case (a, tail) => Monad[M].pure(Some((a, - blift(tail)))) } + _.foldRight(Monad[M].pure(None)) { +\end_layout + +\begin_layout Plain Layout + + case (a, tail) => Monad[M].pure(Some((a, blift(tail)))) +\end_layout + +\begin_layout Plain Layout + + } \end_layout \end_inset @@ -5858,8 +5876,10 @@ def blift[M[_]: Monad, A]: List[A] => ListT[M, A] = \begin_inset Formula \begin{align*} -\text{foreign lift}:\quad & \text{flift}:M^{A}\rightarrow T_{\text{List}}^{M,A}\quad,\quad\quad m^{:M^{A}}\triangleright\text{flift}\triangleq m\triangleright(a\rightarrow\bbnum 0+a\times\text{pu}_{M}(1+\bbnum 0))^{\uparrow M}\quad,\\ -\text{base lift}:\quad & \text{blift}:\text{List}^{A}\rightarrow T_{\text{List}}^{M,A}\quad,\quad\quad\text{blift}\triangleq\,\begin{array}{|c||c|} +\text{foreign lift}:\quad & \text{flift}:M^{A}\rightarrow T_{\text{List}}^{M,A}\quad,\\ + & \text{flift}\triangleq(a\rightarrow\bbnum 0+a\times\text{pu}_{M}(1+\bbnum 0))^{\uparrow M}\quad,\\ +\text{base lift}:\quad & \text{blift}:\text{List}^{A}\rightarrow T_{\text{List}}^{M,A}\quad,\\ + & \text{blift}\triangleq\,\begin{array}{|c||c|} & M^{\bbnum 1+A\times T_{\text{List}}^{M,A}}\\ \hline \bbnum 1 & 1\rightarrow\text{pu}_{M}(1+\bbnum 0)\\ A\times\text{List}^{A} & a\times t\rightarrow\text{pu}_{M}(\bbnum 0+a\times\overline{\text{blift}}\,(t)) @@ -6147,8 +6167,12 @@ status open \begin_layout Plain Layout -def ascend(n: Int): ListT[Reader[Int, *], Int] = ListT(Lazy(k => Some((n, - ascend(n + k))))) +def ascend(n: Int): ListT[Reader[Int, *], Int] = +\end_layout + +\begin_layout Plain Layout + + ListT(Lazy(k => Some((n, ascend(n + k))))) \end_layout \end_inset @@ -6165,11 +6189,12 @@ ascend(n) \end_inset - gives a well-defined value representing the sequence + gives a well-defined, finite value, but that value contains functions evaluatin +g the infinite sequence \begin_inset Formula $\left[n,n+k,n+2k,...\right]$ \end_inset -. + step by step. The parameter \begin_inset Formula $k$ \end_inset @@ -6199,7 +6224,19 @@ runListT \end_inset - will not terminate when applied to such a sequence. + will not terminate when applied to +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +ascend(n) +\end_layout + +\end_inset + +. \end_layout @@ -6221,8 +6258,8 @@ ListT \begin_inset Formula $M$ \end_inset - for an unbounded-length stream, the target should be a stream type (i.e., - an on-call or a lazy list) rather than an eagerly evaluated + for an unbounded-length stream, the target should be an iterator type (i.e., + an on-call list) rather than an eagerly evaluated \begin_inset listings inline true status open @@ -6262,7 +6299,7 @@ A \end_inset - is a monoid type. + is a monoid. For monoid types \begin_inset listings inline true @@ -6279,7 +6316,7 @@ A \begin_inset Formula $\text{List}^{A}\rightarrow A$ \end_inset - as a standard + via the standard \begin_inset listings inline true status open @@ -6325,7 +6362,7 @@ status open \begin_layout Plain Layout -brun +brunE \end_layout \end_inset @@ -6373,7 +6410,7 @@ status open \begin_layout Plain Layout -def brun[M[_]: Monad, A: Monoid](listT: ListT[M, A]): M[A] = listT.value.flatMap +def brunE[M[_]: Monad, A: Monoid](listT: ListT[M, A]): M[A] = listT.value.flatMap { \end_layout @@ -6384,8 +6421,8 @@ def brun[M[_]: Monad, A: Monoid](listT: ListT[M, A]): M[A] = listT.value.flatMap \begin_layout Plain Layout - case Some((head, tail)) => Monad[M].pure(head) |+| brun(tail) // Monoid - M[A]. + case Some((head, tail)) => Monad[M].pure(head) |+| brunE(tail) // Using + the Monoid instance for M[A]. \end_layout \begin_layout Plain Layout @@ -6451,14 +6488,14 @@ noprefix "false" \end_inset - whether this special version of + whether \begin_inset listings inline true status open \begin_layout Plain Layout -brun +brunE \end_layout \end_inset @@ -6702,11 +6739,12 @@ StateT \end_inset - does not have a general base runner of the form + does not have a general base runner that could be expressed in the form + \begin_inset Formula $\text{brun}\,(\theta_{\text{State}})$ \end_inset -: we cannot convert an +, because the type signatures do not allow us to convert an \emph on arbitrary \emph default @@ -6955,10 +6993,10 @@ base lift \begin_inset Quotes erd \end_inset - is impossible: the required type signature + is impossible: the required type signature, \begin_inset Formula \[ -\text{blift}:(\left(A\rightarrow R\right)\rightarrow R)\rightarrow(A\rightarrow M^{R})\rightarrow M^{R} +\text{blift}:(\left(A\rightarrow R\right)\rightarrow R)\rightarrow(A\rightarrow M^{R})\rightarrow M^{R}\quad, \] \end_inset @@ -8196,7 +8234,7 @@ Monad \series bold \size small Monad -\begin_inset Formula $T_{L}^{M}$ +\begin_inset Formula $T_{L}^{M,A}$ \end_inset @@ -8212,7 +8250,7 @@ Monad \series bold \size small Monad -\begin_inset Formula $T_{M}^{L}$ +\begin_inset Formula $T_{M}^{L,A}$ \end_inset @@ -8284,7 +8322,7 @@ Either[E, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{L}^{M,A}=R\rightarrow E+A$ +\begin_inset Formula $R\rightarrow E+A$ \end_inset @@ -8298,7 +8336,7 @@ Either[E, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{M}^{L,A}=R\rightarrow E+A$ +\begin_inset Formula $R\rightarrow E+A$ \end_inset @@ -8369,7 +8407,7 @@ Writer[W, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{L}^{M,A}=R\rightarrow A\times W$ +\begin_inset Formula $R\rightarrow A\times W$ \end_inset @@ -8383,7 +8421,7 @@ Writer[W, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{M}^{L,A}=R\rightarrow A\times W$ +\begin_inset Formula $R\rightarrow A\times W$ \end_inset @@ -8454,7 +8492,7 @@ List[A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{L}^{M,A}=R\rightarrow\text{List}^{A}$ +\begin_inset Formula $R\rightarrow\text{List}^{A}$ \end_inset @@ -8468,7 +8506,7 @@ List[A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{M}^{L,A}\triangleq R\rightarrow\bbnum 1+A\times T_{M}^{L,A}$ +\begin_inset Formula $R\rightarrow\bbnum 1+A\times T_{M}^{L,A}$ \end_inset @@ -8539,7 +8577,7 @@ State[S, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{L}^{M,A}=R\rightarrow S\rightarrow A\times S$ +\begin_inset Formula $R\rightarrow S\rightarrow A\times S$ \end_inset @@ -8553,7 +8591,7 @@ State[S, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{M}^{L,A}=S\rightarrow R\rightarrow A\times S$ +\begin_inset Formula $S\rightarrow R\rightarrow A\times S$ \end_inset @@ -8624,7 +8662,7 @@ State[S, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{L}^{M,A}=S\rightarrow(E+A)\times S$ +\begin_inset Formula $S\rightarrow(E+A)\times S$ \end_inset @@ -8638,7 +8676,7 @@ State[S, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{M}^{L,A}=S\rightarrow E+A\times S$ +\begin_inset Formula $S\rightarrow E+A\times S$ \end_inset @@ -8709,7 +8747,7 @@ Cont[R, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{L}^{M,A}=\left(E+A\rightarrow R\right)\rightarrow R$ +\begin_inset Formula $\left(E+A\rightarrow R\right)\rightarrow R$ \end_inset @@ -8723,7 +8761,7 @@ Cont[R, A] \begin_layout Plain Layout \size small -\begin_inset Formula $T_{M}^{L,A}=\left(A\rightarrow E+R\right)\rightarrow E+R$ +\begin_inset Formula $\left(A\rightarrow E+R\right)\rightarrow E+R$ \end_inset @@ -10260,9 +10298,9 @@ literal "false" \end_layout \begin_layout Standard -Throughout this chapter, we will build transformers for every exponential-polyno -mial monad considered in this book (although some transformers will only - partially satisfy the required laws). +Throughout this chapter, we will build transformers for every monad considered + in this book (although some transformers will only partially satisfy the + required laws). \end_layout \begin_layout Subsection @@ -10613,12 +10651,12 @@ Any monad contained in a stack, such as relevant transformers, such as: \begin_inset Formula \[ -\text{flift}_{K}^{P}:P\leadsto(K\varangle P)\quad,\quad\text{blift}_{L}^{P}:L\leadsto(L\varangle P)\quad,\quad\text{frun}_{M}^{P,Q}(f^{:P\leadsto Q}):M\varangle P\leadsto M\varangle Q\quad,\quad\text{etc.,} +\text{flift}_{K}^{P}:P\leadsto(K\varangle P)\quad,\quad\text{blift}_{L}^{P}:L\leadsto(L\varangle P)\quad,\quad\text{frun}_{M}^{P,Q}(f^{:P\leadsto Q}):M\varangle P\leadsto M\varangle Q\quad, \] \end_inset -for any monad +and so on, for any monad \begin_inset Formula $P$ \end_inset @@ -10646,9 +10684,10 @@ for any monad : \begin_inset Formula -\[ -\text{flift}_{K\varangle L}^{P}:P\leadsto K\varangle L\varangle P=\text{flift}_{L}^{P}\bef\text{flift}_{K}^{L\varangle P}\quad,\quad\text{blift}_{L\varangle M}^{P}:L\varangle M\leadsto L\varangle M\varangle P=\text{frun}_{L}^{M,M\varangle P}(\text{blift}_{M}^{P})\quad. -\] +\begin{align*} + & \text{flift}_{K\varangle L}^{P}:P\leadsto K\varangle L\varangle P=\text{flift}_{L}^{P}\bef\text{flift}_{K}^{L\varangle P}\quad,\\ + & \text{blift}_{L\varangle M}^{P}:L\varangle M\leadsto L\varangle M\varangle P=\text{frun}_{L}^{M,M\varangle P}(\text{blift}_{M}^{P})\quad. +\end{align*} \end_inset @@ -12200,9 +12239,10 @@ set that work with the state value: \begin_inset Formula -\[ -\text{get}:\text{State}^{S,S}\quad,\quad\quad\text{get}\triangleq s^{:S}\rightarrow s\times s\quad,\quad\quad\text{set}:S\rightarrow\text{State}^{S,\bbnum 1}\quad,\quad\quad\text{set}\triangleq s^{:S}\rightarrow\_^{:S}\rightarrow1\times s\quad. -\] +\begin{align*} + & \text{get}:\text{State}^{S,S}\quad,\quad\quad\text{get}\triangleq s^{:S}\rightarrow s\times s\quad,\\ + & \text{set}:S\rightarrow\text{State}^{S,\bbnum 1}\quad,\quad\quad\text{set}\triangleq s^{:S}\rightarrow\_^{:S}\rightarrow1\times s\quad. +\end{align*} \end_inset @@ -12257,8 +12297,8 @@ State \end_inset --monadic program can then be written as shown at left, without referring - to the type signature +-monadic program can then be written as shown above, without referring to + the type signature \begin_inset Formula $S\rightarrow A\times S$ \end_inset @@ -12676,7 +12716,7 @@ Either \end_layout \begin_layout Standard -The second ingredient of the MTL-style programming involves +The second ingredient of the MTL-style programming is \begin_inset Quotes eld \end_inset @@ -12684,7 +12724,7 @@ lifting \begin_inset Quotes erd \end_inset - the monad operations to arbitrary monad stacks. + the monadic operations to arbitrary monad stacks. For example, consider the \begin_inset listings inline true @@ -12697,7 +12737,7 @@ State \end_inset - monad with the operations + monad and its operations \begin_inset Formula $\text{get}:\text{State}^{S,S}$ \end_inset @@ -14564,9 +14604,10 @@ Since the stack type is is: \begin_inset Formula -\[ -\text{clear}_{P}:\left(S\rightarrow W\times A\times S\right)\rightarrow S\rightarrow W\times A\times S\quad,\quad\quad\text{clear}_{P}\triangleq p^{:S\rightarrow W\times A\times S}\rightarrow p\bef(\text{clear}\boxtimes\text{id}^{S})\quad. -\] +\begin{align*} + & \text{clear}_{P}:\left(S\rightarrow W\times A\times S\right)\rightarrow S\rightarrow W\times A\times S\quad,\\ + & \text{clear}_{P}\triangleq p^{:S\rightarrow W\times A\times S}\rightarrow p\bef(\text{clear}\boxtimes\text{id}^{S})\quad. +\end{align*} \end_inset @@ -14971,17 +15012,17 @@ status open \begin_layout Plain Layout - // Anywhere inside a for/yield: +// Anywhere inside a for/yield: \end_layout \begin_layout Plain Layout - y <- Monad[L].pure(x).up // Assume x: A + y <- Monad[L].pure(x).up // x: A \end_layout \begin_layout Plain Layout - z <- f(y) // f: A => T[B] + z <- f(y) // f: A => T[B] \end_layout \end_inset @@ -15139,7 +15180,7 @@ status open \begin_layout Plain Layout - // Anywhere inside a for/yield: +// Anywhere inside a for/yield: \end_layout \begin_layout Plain Layout @@ -15305,7 +15346,7 @@ status open \begin_layout Plain Layout - // Anywhere inside a for/yield: +// Anywhere inside a for/yield: \end_layout \begin_layout Plain Layout @@ -16061,8 +16102,10 @@ frun : \begin_inset Formula \begin{align*} - & \text{brun}:T_{\text{State}}^{M,A}\rightarrow M^{A}=(S\rightarrow M^{A\times S})\rightarrow M^{A}\quad\quad\text{vs.}\quad\theta_{\text{State}}:\left(S\rightarrow A\times S\right)\rightarrow A\quad,\\ - & \text{frun}:T_{\text{State}}^{M,A}\rightarrow\text{State}^{S,A}=(S\rightarrow M^{A\times S})\rightarrow S\rightarrow A\times S\quad\quad\text{vs.}\quad\theta_{M}^{\uparrow\text{State}}:(S\rightarrow M^{A}\times S)\rightarrow S\rightarrow A\times S\quad. + & \text{brun}:T_{\text{State}}^{M,A}\rightarrow M^{A}=(S\rightarrow M^{A\times S})\rightarrow M^{A}\\ + & \quad\quad\text{vs.}\quad\theta_{\text{State}}:\left(S\rightarrow A\times S\right)\rightarrow A\quad,\\ + & \text{frun}:T_{\text{State}}^{M,A}\rightarrow\text{State}^{S,A}=(S\rightarrow M^{A\times S})\rightarrow S\rightarrow A\times S\\ + & \quad\quad\text{vs.}\quad\theta_{M}^{\uparrow\text{State}}:(S\rightarrow M^{A}\times S)\rightarrow S\rightarrow A\times S\quad. \end{align*} \end_inset @@ -16150,7 +16193,8 @@ is no longer automatic and needs to be verified. \begin{align*} \text{left-hand side}:\quad & t\triangleright\text{frun}_{\text{State}}(\theta_{M})\bef\theta_{\text{State}}(i)=t\triangleright(t\rightarrow t\bef\theta_{M})\bef(t\rightarrow i\triangleright t\triangleright\pi_{1})\\ \text{compute composition}:\quad & \quad=i\triangleright(t\bef\theta_{M})\triangleright\pi_{1}=i\triangleright t\bef\theta_{M}\bef\pi_{1}\quad,\\ -\text{left-hand side}:\quad & t\triangleright\text{brun}_{\text{State}}(i)\bef\theta_{M}=t\triangleright(t\rightarrow i\triangleright t\triangleright\pi_{1}^{\uparrow M})\triangleright\theta_{M}=i\triangleright t\bef\gunderline{\pi_{1}^{\uparrow M}\bef\theta_{M}}\\ +\text{left-hand side}:\quad & t\triangleright\text{brun}_{\text{State}}(i)\bef\theta_{M}=t\triangleright(t\rightarrow i\triangleright t\triangleright\pi_{1}^{\uparrow M})\triangleright\theta_{M}\\ + & \quad=i\triangleright t\bef\gunderline{\pi_{1}^{\uparrow M}\bef\theta_{M}}\\ \text{naturality law of }\theta_{M}:\quad & \quad=i\triangleright t\bef\theta_{M}\bef\pi_{1}\quad. \end{align*} @@ -16899,9 +16943,10 @@ purity law: \begin_inset Formula -\[ -m\triangleright\text{flift}_{L}\triangleright\text{frun}_{L}(\theta_{M})=m\triangleright\theta_{M}\triangleright\text{pu}_{L}\quad,\quad\text{or equivalently:}\quad\quad\text{flift}_{L}\bef\text{frun}_{L}(\theta_{M})=\theta_{M}\bef\text{pu}_{L}\quad. -\] +\begin{align*} + & m\triangleright\text{flift}_{L}\triangleright\text{frun}_{L}(\theta_{M})=m\triangleright\theta_{M}\triangleright\text{pu}_{L}\quad,\\ +\text{or equivalently}:\quad & \text{flift}_{L}\bef\text{frun}_{L}(\theta_{M})=\theta_{M}\bef\text{pu}_{L}\quad. +\end{align*} \end_inset @@ -18595,9 +18640,10 @@ brun as the natural transformations: \begin_inset Formula -\[ -\text{flift}_{T}^{M}:\text{Id}^{M}\leadsto T^{M}\quad,\quad\quad\text{brun}_{T}^{M}:T^{M}\leadsto\text{Id}^{M}\quad,\quad\quad\text{frun}_{T}^{M,N}:(M\leadsto N)\rightarrow T^{M}\leadsto T^{N}\quad. -\] +\begin{align*} + & \text{flift}_{T}^{M}:\text{Id}^{M}\leadsto T^{M}\quad,\quad\quad\text{brun}_{T}^{M}:T^{M}\leadsto\text{Id}^{M}\quad,\\ + & \text{frun}_{T}^{M,N}:(M\leadsto N)\rightarrow T^{M}\leadsto T^{N}\quad. +\end{align*} \end_inset @@ -19954,7 +20000,7 @@ literal "false" \end_inset - and always produces a functor since Eq. + and always produces a new functor out of two functors, since Eq. \begin_inset space ~ \end_inset @@ -19992,7 +20038,8 @@ noprefix "false" \begin{align*} & \left(L\star M\right)^{A}\\ \text{definitions of }L,M,\star:\quad & \cong\exists P.\,\exists Q.\,\gunderline{\left(P\times Q\rightarrow A\right)}\times\left(\bbnum 1+P\right)\times\left(R\rightarrow Q\right)\\ -\text{curry the arguments, move a quantifier}:\quad & \cong\exists P.\left(\bbnum 1+P\right)\times\gunderline{\exists Q.\left(Q\rightarrow P\rightarrow A\right)\times\left(R\rightarrow Q\right)}\\ + & \text{curry the arguments, move a quantifier}:\quad\\ + & \cong\exists P.\left(\bbnum 1+P\right)\times\gunderline{\exists Q.\left(Q\rightarrow P\rightarrow A\right)\times\left(R\rightarrow Q\right)}\\ \text{co-Yoneda identity with }\exists Q:\quad & \cong\exists P.\left(\bbnum 1+P\right)\times\left(\gunderline{R\rightarrow P}\rightarrow A\right)\\ \text{swap curried arguments}:\quad & \cong\exists P.\left(\bbnum 1+P\right)\times\left(P\rightarrow R\rightarrow A\right)\\ \text{co-Yoneda identity with }\exists P:\quad & \cong\bbnum 1+\left(R\rightarrow A\right)\quad. @@ -20758,7 +20805,7 @@ stacking \begin_inset Quotes erd \end_inset - construction is written as + construction is written as: \begin_inset listings inline false status open @@ -21423,7 +21470,8 @@ The monadic naturality of : \begin_inset Formula \begin{align*} -\text{expect }\text{brun}_{R}(\theta)\bef\phi:\quad & \text{frun}_{R}(\phi)\bef\text{brun}_{R}(\theta)=\gunderline{\text{frun}_{P}(\text{frun}_{Q}(\phi))\bef\text{brun}_{P}}\left(\text{blift}_{P}\bef\theta\right)\bef\text{brun}_{Q}\left(\text{flift}_{P}\bef\theta\right)\\ +\text{expect }\text{brun}_{R}(\theta)\bef\phi:\quad & \text{frun}_{R}(\phi)\bef\text{brun}_{R}(\theta)\\ + & =\gunderline{\text{frun}_{P}(\text{frun}_{Q}(\phi))\bef\text{brun}_{P}}\left(\text{blift}_{P}\bef\theta\right)\bef\text{brun}_{Q}\left(\text{flift}_{P}\bef\theta\right)\\ \text{same law for }\text{brun}_{P}:\quad & =\text{brun}_{P}\left(\text{blift}_{P}\bef\theta\right)\bef\gunderline{\text{frun}_{Q}(\phi)\bef\text{brun}_{Q}}\left(\text{flift}_{P}\bef\theta\right)\\ \text{same law for }\text{brun}_{Q}:\quad & =\text{brun}_{P}\left(\text{blift}_{P}\bef\theta\right)\bef\text{brun}_{Q}\left(\text{flift}_{P}\bef\theta\right)\bef\phi=\text{brun}_{R}(\theta)\bef\phi\quad. \end{align*} @@ -24428,7 +24476,7 @@ compatibility laws \begin_inset Quotes erd \end_inset - + given in Eqs. \begin_inset space ~ \end_inset @@ -26104,7 +26152,7 @@ The laws of runners require that must be monad morphisms, i.e., the identity and composition laws must hold: \begin_inset Formula \begin{align*} - & \text{pu}_{L\circ M}\bef\text{frun}\,(\phi)=\text{pu}_{L\circ N}\quad,\quad\quad\text{ftn}_{L\circ M}\bef\text{frun}\,(\phi)=\left(\text{frun}\,(\phi)\right)^{\uparrow M\uparrow L}\bef\text{frun}\,(\phi)\bef\text{ftn}_{L\circ N}\quad,\\ + & \text{pu}_{L\circ M}\bef\text{frun}\,(\phi)=\text{pu}_{L\circ N}\quad,\quad\text{ftn}_{L\circ M}\bef\text{frun}\,(\phi)=\left(\text{frun}\,(\phi)\right)^{\uparrow M\uparrow L}\bef\text{frun}\,(\phi)\bef\text{ftn}_{L\circ N}\quad,\\ & \text{pu}_{L\circ M}\bef\text{brun}\,(\theta)=\text{pu}_{M}\quad,\quad\quad\text{ftn}_{L\circ M}\bef\text{brun}\,(\theta)=\left(\text{brun}\,(\theta)\right)^{\uparrow M\uparrow L}\bef\text{brun}\,(\theta)\bef\text{ftn}_{M}\quad. \end{align*} @@ -26410,9 +26458,10 @@ brun : \begin_inset Formula -\[ -\phi^{:M\leadsto N}\bef\text{flift}_{L}^{N}=\text{flift}_{L}^{M}\bef(\phi^{:M\leadsto N})^{\uparrow L}\quad,\quad\quad(\phi^{:M\leadsto N})^{\uparrow L}\bef\text{brun}_{L}^{N}(\theta_{L})=\text{brun}_{L}^{M}(\theta_{L})\bef\phi^{:M\leadsto N}\quad. -\] +\begin{align*} + & \phi^{:M\leadsto N}\bef\text{flift}_{L}^{N}=\text{flift}_{L}^{M}\bef(\phi^{:M\leadsto N})^{\uparrow L}\quad,\\ + & (\phi^{:M\leadsto N})^{\uparrow L}\bef\text{brun}_{L}^{N}(\theta_{L})=\text{brun}_{L}^{M}(\theta_{L})\bef\phi^{:M\leadsto N}\quad. +\end{align*} \end_inset @@ -26465,7 +26514,7 @@ The laws of runners require that must be monad morphisms, i.e., the identity and composition laws must hold: \begin_inset Formula \begin{align*} - & \text{pu}_{L\circ M}\bef\text{frun}\,(\phi)=\text{pu}_{N\circ M}\quad,\quad\quad\text{ftn}_{L\circ M}\bef\text{frun}\,(\phi)=\left(\text{frun}\,(\phi)\right)^{\uparrow M\uparrow L}\bef\text{frun}\,(\phi)\bef\text{ftn}_{N\circ M}\quad,\\ + & \text{pu}_{L\circ M}\bef\text{frun}\,(\phi)=\text{pu}_{N\circ M}\quad,\quad\text{ftn}_{L\circ M}\bef\text{frun}\,(\phi)=\left(\text{frun}\,(\phi)\right)^{\uparrow M\uparrow L}\bef\text{frun}\,(\phi)\bef\text{ftn}_{N\circ M}\quad,\\ & \text{pu}_{L\circ M}\bef\text{brun}\,(\theta)=\text{pu}_{L}\quad,\quad\quad\text{ftn}_{L\circ M}\bef\text{brun}\,(\theta)=\left(\text{brun}\,(\theta)\right)^{\uparrow M\uparrow L}\bef\text{brun}\,(\theta)\bef\text{ftn}_{L}\quad. \end{align*} @@ -26604,7 +26653,7 @@ We now transform the left-hand side, aiming to obtain the same expression. \text{definitions of }\text{ftn}_{L\circ M}\text{ and }\text{brun}:\quad & =\text{sw}_{L,M}^{\uparrow L}\bef\text{ftn}_{L}\bef\gunderline{\text{ftn}_{M}^{\uparrow L}\bef\theta^{\uparrow L}}\\ \text{composition law of }\theta:\quad & =\text{sw}_{L,M}^{\uparrow L}\bef\gunderline{\text{ftn}_{L}\bef\left(\theta\bef\theta\right)^{\uparrow L}}=\gunderline{\text{sw}_{L,M}^{\uparrow L}\bef\left(\theta\bef\theta\right)^{\uparrow L\uparrow L}}\bef\text{ftn}_{L}\\ \text{functor composition}:\quad & =\gunderline{(\text{sw}_{L,M}\bef\theta^{\uparrow L})}^{\uparrow L}\bef\theta^{\uparrow L\uparrow L}\bef\text{ftn}_{L}\\ -\text{monadic naturality law of }\text{sw}_{L,M}:\quad & =\gunderline{\theta^{\uparrow L}}\bef\theta^{\uparrow L\uparrow L}\bef\text{ftn}_{L}\quad. +\text{monadic naturality of }\text{sw}_{L,M}:\quad & =\gunderline{\theta^{\uparrow L}}\bef\theta^{\uparrow L\uparrow L}\bef\text{ftn}_{L}\quad. \end{align*} \end_inset @@ -27244,7 +27293,7 @@ OptionT \end_inset - monad transformer can be written as + monad transformer can be written as: \begin_inset listings inline false status open @@ -27690,25 +27739,29 @@ We need to show that : \begin_inset Formula \begin{align*} - & \text{pu}_{L}^{\uparrow M}\bef\text{sw}=\,\left\Vert \begin{array}{cc} + & \text{pu}_{L}^{\uparrow M}\bef\text{sw}\\ + & =\,\left\Vert \begin{array}{cc} \text{id} & \bbnum 0\\ \bbnum 0 & q\times a\rightarrow q\times\text{pu}_{L}(a) \end{array}\right|\,\bef\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ q\times l\rightarrow l\triangleright(a^{:A}\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow L} \end{array}\right|\\ -\text{composition}:\quad & =\,\,\left\Vert \begin{array}{c} + & \text{composition}:\quad\\ + & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ q\times a\rightarrow a\triangleright\gunderline{\text{pu}_{L}\bef(a^{:A}\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow L}} \end{array}\right|\\ -\text{pu}_{L}\text{'s naturality}:\quad & =\,\left\Vert \begin{array}{c} + & \text{pu}_{L}\text{'s naturality}:\quad\\ + & =\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ (q\times a\rightarrow\bbnum 0^{:P}+q\times a)\bef\text{pu}_{L} \end{array}\right|\,=\,\begin{array}{|c||c|} P & x^{:P}\rightarrow x+\bbnum 0^{:Q\times A}\\ Q\times A & q\times a\rightarrow\bbnum 0^{:P}+q\times a \end{array}\,\bef\text{pu}_{L}\\ -\text{matrix notation}:\quad & =\text{id}\bef\text{pu}_{L}=\text{pu}_{L}\quad. + & \text{matrix notation}:\quad\\ + & =\text{id}\bef\text{pu}_{L}=\text{pu}_{L}\quad. \end{align*} \end_inset @@ -27728,7 +27781,8 @@ We need to show that : \begin_inset Formula \begin{align*} - & \text{pu}_{M}\bef\text{sw}=\,\left\Vert \begin{array}{cc} + & \text{pu}_{M}\bef\text{sw}\\ + & =\,\left\Vert \begin{array}{cc} \bbnum 0 & l^{:L^{A}}\rightarrow q_{0}\times l\end{array}\right|\,\bef\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ q\times l\rightarrow l\triangleright(x^{:A}\rightarrow\bbnum 0^{:P}+q\times x)^{\uparrow L} @@ -27802,7 +27856,8 @@ noprefix "false" \text{definition of }\text{sw}:\quad & =x\rightarrow(x+\bbnum 0)\triangleright\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ q\times l\rightarrow l\triangleright(a\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow L} -\end{array}\right|\,=(x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\quad. +\end{array}\right|\\ + & =(x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\quad. \end{align*} \end_inset @@ -27916,14 +27971,15 @@ We need to show that : \begin_inset Formula \begin{align} - & \text{ftn}_{M}\bef\text{sw}=\,\left\Vert \begin{array}{cc} + & \text{ftn}_{M}\bef\text{sw}\nonumber \\ + & =\,\left\Vert \begin{array}{cc} \text{id} & \bbnum 0\\ q\times p\rightarrow p & \bbnum 0\\ \bbnum 0 & q_{1}\times q_{2}\times a\rightarrow\left(q_{1}\oplus q_{2}\right)\times a \end{array}\right|\,\bef\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ q\times l\rightarrow l\triangleright(x\rightarrow\bbnum 0+q\times x)^{\uparrow L} -\end{array}\right|\nonumber \\ +\end{array}\right|\\ & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ \left(q\times p\rightarrow p+\bbnum 0\right)\bef\text{pu}_{L}\\ @@ -27972,7 +28028,8 @@ Then compute the composition : \begin_inset Formula \begin{align*} - & \text{sw}^{\uparrow M}\bef\text{sw}=\,\left\Vert \begin{array}{cc} + & \text{sw}^{\uparrow M}\bef\text{sw}\\ + & =\,\left\Vert \begin{array}{cc} \text{id} & \bbnum 0\\ \bbnum 0 & q\times p\rightarrow q\times\text{pu}_{L}\left(p+\bbnum 0\right)\\ \bbnum 0 & q_{1}\times q_{2}\times l\rightarrow q_{1}\times(l\triangleright(x\rightarrow\bbnum 0+q_{2}\times x)^{\uparrow L}) @@ -27984,14 +28041,7 @@ q\times l\rightarrow(l\triangleright(x\rightarrow\bbnum 0+q\times x)^{\uparrow L (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ q\times p\rightarrow\left(p+\bbnum 0\right)\triangleright\text{pu}_{L}\bef(x^{:M^{A}}\rightarrow\bbnum 0^{:P}+q\times x)^{\uparrow L}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\bbnum 0^{:P}+q_{1}\times x)^{\uparrow L}\bef(x\rightarrow\bbnum 0+q_{2}\times x)^{\uparrow L} -\end{array}\right| -\end{align*} - -\end_inset - - -\begin_inset Formula -\begin{align*} +\end{array}\right|\\ & =\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ q\times p\rightarrow(\bbnum 0^{:P}+q\times\left(p+\bbnum 0\right))\triangleright\text{pu}_{L}\\ @@ -28047,17 +28097,20 @@ Now we need to post-compose (q\times p\rightarrow\bbnum 0^{:P}+q\times\left(p+\bbnum 0\right))\bef\gunderline{\text{pu}_{L}\bef\text{ftn}_{M}^{\uparrow L}}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\bbnum 0+q_{1}\times(\bbnum 0+q_{2}\times x))^{\uparrow L}\bef\text{ftn}_{M}^{\uparrow L} \end{array}\right|\\ -\text{naturality law of }\text{pu}_{L}:\quad & =\,\,\left\Vert \begin{array}{c} + & \text{naturality law of }\text{pu}_{L}:\quad\\ + & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{ftn}_{M}\bef\text{pu}_{L}\\ (q\times p\rightarrow\bbnum 0^{:P}+q\times\left(p+\bbnum 0\right))\bef\text{ftn}_{M}\bef\text{pu}_{L}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\gunderline{\text{ftn}_{M}\left(\bbnum 0+q_{1}\times(\bbnum 0+q_{2}\times x)\right)})^{\uparrow L} \end{array}\right|\\ -\text{compute }\text{ftn}_{M}(...):\quad & =\,\left\Vert \begin{array}{c} + & \text{compute }\text{ftn}_{M}(...):\quad\\ + & =\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow\text{ftn}_{M}(x+\bbnum 0))\bef\text{pu}_{L}\\ (q\times p\rightarrow\gunderline{\text{ftn}_{M}(\bbnum 0^{:P}+q\times\left(p+\bbnum 0\right))})\bef\text{pu}_{L}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\bbnum 0+\left(q_{1}\oplus q_{2}\right)\times x)^{\uparrow L} \end{array}\right|\\ -\text{compute }\text{ftn}_{M}(...):\quad & =\,\,\left\Vert \begin{array}{c} + & \text{compute }\text{ftn}_{M}(...):\quad\\ + & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ (q\times p\rightarrow p+\bbnum 0)\bef\text{pu}_{L}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\bbnum 0+\left(q_{1}\oplus q_{2}\right)\times x)^{\uparrow L} @@ -28213,14 +28266,16 @@ q\times l\rightarrow l\triangleright\phi\bef(a\rightarrow\bbnum 0^{:P}+q\times a The right-hand side is: \begin_inset Formula \begin{align*} - & \phi^{\uparrow M}\bef\text{sw}_{N,M}=\,\left\Vert \begin{array}{cc} + & \phi^{\uparrow M}\bef\text{sw}_{N,M}\\ + & =\,\left\Vert \begin{array}{cc} \text{id} & \bbnum 0\\ \bbnum 0 & q\times l\rightarrow q\times\left(l\triangleright\phi\right) \end{array}\right|\,\bef\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{N}\\ q\times n\rightarrow n\triangleright(a\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow N} \end{array}\right|\\ -\text{composition}:\quad & =\,\,\left\Vert \begin{array}{c} + & \text{composition}:\quad\\ + & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{N}\\ q\times l\rightarrow l\triangleright\phi\bef(a\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow N} \end{array}\right|\quad. @@ -29332,7 +29387,7 @@ noprefix "false" \end_layout \begin_layout Standard -Consider the functor composition of the +Consider the \begin_inset listings inline true status open @@ -29364,10 +29419,19 @@ Reader \begin_inset Formula $R_{2}^{A}\triangleq Z\rightarrow A$ \end_inset -: +. + The functor composition of +\begin_inset Formula $R_{1}$ +\end_inset + + and +\begin_inset Formula $R_{2}$ +\end_inset + + is: \begin_inset Formula \[ -P^{A}\triangleq((Z\rightarrow A)\rightarrow Q)\rightarrow Z\rightarrow A\quad. +P^{A}\triangleq R_{1}\circ R_{2}=((Z\rightarrow A)\rightarrow Q)\rightarrow Z\rightarrow A\quad. \] \end_inset @@ -30036,7 +30100,8 @@ swap \begin{align} & \text{sw}_{R,M}:M^{R^{A}}\rightarrow R^{M^{A}}\quad,\quad\quad\text{sw}_{R,M}\triangleq m^{:M^{R^{A}}}\rightarrow q^{:H^{M^{A}}}\rightarrow m\triangleright(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}\quad,\label{eq:rigid-monad-short-formula-for-swap}\\ & \text{pu}_{T}:A\rightarrow H^{M^{A}}\rightarrow M^{A}\quad,\quad\quad\text{pu}_{T}\triangleq\text{pu}_{M}\bef\text{pu}_{R}=a^{:A}\rightarrow\_^{:H^{M^{A}}}\rightarrow\text{pu}_{M}(a)\quad,\nonumber \\ - & \text{ftn}_{T}\triangleq t^{:T^{T^{A}}}\rightarrow k^{:H^{M^{A}}}\rightarrow k\triangleright\big(t\triangleright(\text{flm}_{M}(r^{:R^{M^{A}}}\rightarrow r(k)))^{\uparrow R}\big)=\text{flm}_{R}\left(t\rightarrow q\rightarrow t\triangleright\text{flm}_{M}(r\rightarrow r(q))\right)\quad,\nonumber \\ + & \text{ftn}_{T}\triangleq t^{:T^{T^{A}}}\rightarrow k^{:H^{M^{A}}}\rightarrow k\triangleright\big(t\triangleright(\text{flm}_{M}(r^{:R^{M^{A}}}\rightarrow r(k)))^{\uparrow R}\big)\nonumber \\ + & \quad=\text{flm}_{R}\left(t\rightarrow q\rightarrow t\triangleright\text{flm}_{M}(r\rightarrow r(q))\right)\quad,\\ & \text{flm}_{T}(f)=\text{flm}_{R}\big(y\rightarrow q\rightarrow y\triangleright\text{flm}_{M}(x\rightarrow q\triangleright(x\triangleright f))\big)\label{eq:rigid-monad-def-flm-t-via-flm-r}\\ & \quad=t^{:R^{M^{A}}}\rightarrow q^{:H^{M^{B}}}\rightarrow q\triangleright\big(t\triangleright\big(\text{flm}_{M}(x^{:A}\rightarrow q\triangleright(x\triangleright f))\big)^{\uparrow R}\big)\quad.\label{eq:rigid-monad-flm-T-def} \end{align} @@ -30229,9 +30294,10 @@ noprefix "false" ): \begin_inset Formula -\[ -\text{sw}_{R,M}:M^{R^{A}}\rightarrow R^{M^{A}}\quad,\quad\quad\text{sw}_{R,M}\triangleq m^{:M^{R^{A}}}\rightarrow q^{:H^{M^{A}}}\rightarrow m\triangleright(r^{:H^{A}\rightarrow A}\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}\quad. -\] +\begin{align*} + & \text{sw}_{R,M}:M^{R^{A}}\rightarrow R^{M^{A}}\quad,\\ + & \text{sw}_{R,M}\triangleq m^{:M^{R^{A}}}\rightarrow q^{:H^{M^{A}}}\rightarrow m\triangleright(r^{:H^{A}\rightarrow A}\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}\quad. +\end{align*} \end_inset @@ -30373,7 +30439,8 @@ Substituting these definitions, we compute: \begin{align*} & f\diamond_{_{R}}g=a^{:A}\rightarrow k^{:H^{C}}\rightarrow a\triangleright\tilde{f}\big(k\triangleright(\tilde{g}(k))^{\downarrow H}\big)\,\gunderline{\bef}\,\tilde{g}(k)\\ \triangleright\text{-notation}:\quad & =a\rightarrow k\rightarrow a\triangleright\gunderline{\tilde{f}}\big(k\triangleright(\tilde{g}(k))^{\downarrow H}\big)\triangleright\tilde{g}(k)\\ - & =a\rightarrow k\rightarrow f(a)(k\,\gunderline{\triangleright\,(\tilde{g}(k)})^{\downarrow H})\,\gunderline{\triangleright\,\tilde{g}(k)}=a\rightarrow k\rightarrow g\big(f(a)(k\triangleright(b\rightarrow g(b)(k))^{\downarrow H})\big)(k)\quad. + & =a\rightarrow k\rightarrow f(a)(k\,\gunderline{\triangleright\,(\tilde{g}(k)})^{\downarrow H})\,\gunderline{\triangleright\,\tilde{g}(k)}\\ + & =a\rightarrow k\rightarrow g\big(f(a)(k\triangleright(b\rightarrow g(b)(k))^{\downarrow H})\big)(k)\quad. \end{align*} \end_inset @@ -30999,7 +31066,8 @@ noprefix "false" : \begin_inset Formula \begin{align*} - & (\text{ftn}_{R}^{\uparrow M}\bef\text{sw})(m)(q)=q\triangleright\big(m\triangleright\text{ftn}_{R}^{\uparrow M}\gunderline{\bef}\text{sw}\big)=q\triangleright\big(m\triangleright\text{ftn}_{R}^{\uparrow M}\triangleright\gunderline{\text{sw}}\big)\\ + & (\text{ftn}_{R}^{\uparrow M}\bef\text{sw})(m)(q)=q\triangleright\big(m\triangleright\text{ftn}_{R}^{\uparrow M}\gunderline{\bef}\text{sw}\big)\\ + & =q\triangleright\big(m\triangleright\text{ftn}_{R}^{\uparrow M}\triangleright\gunderline{\text{sw}}\big)\\ \text{use Eq.~(\ref{eq:rigid-monad-choice-swap-short})}:\quad & =m\triangleright\gunderline{\text{ftn}_{R}^{\uparrow M}\bef(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}}\\ \text{composition law of }M:\quad & =m\triangleright\big(\text{ftn}_{R}\bef(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)\big)^{\uparrow M}\quad. \end{align*} @@ -31126,7 +31194,8 @@ noprefix "false" & q\triangleright\big(q\triangleright\big(\text{sw}\bef(x\rightarrow q\triangleright x)\big)^{\downarrow H}\triangleright\gunderline{\text{sw}(m)\bef\text{sw}}\big)\\ \text{use Eq.~(\ref{eq:swap-law-3-derivation-1})}:\quad & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\gunderline{\big(\text{sw}\bef(x\rightarrow q\triangleright x)\big)^{\downarrow H}\triangleright\text{pu}_{M}^{\downarrow H}}\bef r)\big)^{\uparrow M}\\ \text{composition under }^{\downarrow H}:\quad & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\big(\gunderline{\text{pu}_{M}\bef\text{sw}}\bef(x\rightarrow q\triangleright x)\big)^{\downarrow H}\bef r)\big)^{\uparrow M}\\ -\text{outer identity law of }\text{sw}:\quad & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\big(\gunderline{\text{pu}_{M}^{\uparrow R}\bef(x\rightarrow q\triangleright x)}\big)^{\downarrow H}\bef r)\big)^{\uparrow M}\\ + & \text{outer identity law of }\text{sw}:\quad\\ + & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\big(\gunderline{\text{pu}_{M}^{\uparrow R}\bef(x\rightarrow q\triangleright x)}\big)^{\downarrow H}\bef r)\big)^{\uparrow M}\\ \text{compute composition}:\quad & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\big(x\rightarrow q\triangleright\text{pu}_{M}^{\uparrow R}(x)\big)^{\downarrow H}\bef r)\big)^{\uparrow M}\quad. \end{align*} @@ -31164,7 +31233,8 @@ We will apply both sides of the law to arbitrary \text{definition of }^{\uparrow R}:\quad & =q\triangleright\big(\text{ftn}_{M}^{\downarrow H}\gunderline{\bef}(m\triangleright\text{sw}^{\uparrow M}\triangleright\text{sw})\gunderline{\bef}\text{ftn}_{M}\big)\nonumber \\ \triangleright\text{-notation}:\quad & =\big(\gunderline{q\triangleright\text{ftn}_{M}^{\downarrow H}}\triangleright(\gunderline{m\triangleright\text{sw}^{\uparrow M}}\triangleright\gunderline{\text{sw}})\big)\triangleright\text{ftn}_{M}\nonumber \\ \text{use Eq.~(\ref{eq:rigid-monad-choice-swap-short})}:\quad & =\big(m\triangleright\gunderline{\text{sw}^{\uparrow M}}\triangleright\big(r\rightarrow q\triangleright\gunderline{\text{ftn}_{M}^{\downarrow H}\triangleright\text{pu}_{M}^{\downarrow H}}\bef r\gunderline{\big)^{\uparrow M}}\big)\triangleright\text{ftn}_{M}\nonumber \\ -\text{composition under }^{\downarrow H}\text{ and }^{\uparrow M}:\quad & =m\triangleright\big(\text{sw}\bef\big(r\rightarrow q\triangleright(\gunderline{\text{pu}_{M}\bef\text{ftn}_{M}})^{\downarrow H}\bef r\big)\big)^{\uparrow M}\bef\text{ftn}_{M}\nonumber \\ + & \text{composition under }^{\downarrow H}\text{ and }^{\uparrow M}:\quad\nonumber \\ + & =m\triangleright\big(\text{sw}\bef\big(r\rightarrow q\triangleright(\gunderline{\text{pu}_{M}\bef\text{ftn}_{M}})^{\downarrow H}\bef r\big)\big)^{\uparrow M}\bef\text{ftn}_{M}\\ \text{left identity law of }M:\quad & =m\triangleright\big(\text{sw}\bef(r\rightarrow q\triangleright r)\big)^{\uparrow M}\bef\text{ftn}_{M}\quad.\label{eq:rigid-monad-1-swap-law-4-derivation-5} \end{align} @@ -31200,7 +31270,8 @@ noprefix "false" ), we get: \begin_inset Formula \begin{align*} - & m\triangleright\big(\gunderline{\text{sw}\bef(r\rightarrow q\triangleright r)}\big)^{\uparrow M}\bef\text{ftn}_{M}=m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r\gunderline{\big)^{\uparrow M\uparrow M}\bef\text{ftn}_{M}}\\ + & m\triangleright\big(\gunderline{\text{sw}\bef(r\rightarrow q\triangleright r)}\big)^{\uparrow M}\bef\text{ftn}_{M}\\ + & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r\gunderline{\big)^{\uparrow M\uparrow M}\bef\text{ftn}_{M}}\\ \text{naturality law of }\text{ftn}_{M}:\quad & =m\triangleright\text{ftn}_{M}\bef\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r\big)^{\uparrow M}\quad. \end{align*} @@ -32028,7 +32099,8 @@ The functor laws hold due to the properties of the pair product: \begin_inset Formula \begin{align*} \text{expect to equal }\text{id}:\quad & \text{frun}_{L}(\text{id})=\text{frun}_{G}(\text{id})\boxtimes\text{frun}_{H}(\text{id})=\text{id}\boxtimes\text{id}=\text{id}\quad,\\ -\text{expect to equal }\text{frun}_{L}(\phi\bef\chi):\quad & \text{frun}_{L}(\phi)\bef\text{frun}_{L}(\chi)=\big(\gunderline{\text{frun}_{G}(\phi)\bef\text{frun}_{G}(\chi)}\big)\boxtimes\big(\gunderline{\text{frun}_{H}(\phi)\bef\text{frun}_{H}(\chi)}\big)\\ +\text{expect to equal }\text{frun}_{L}(\phi\bef\chi):\quad & \text{frun}_{L}(\phi)\bef\text{frun}_{L}(\chi)\\ + & \quad=\big(\gunderline{\text{frun}_{G}(\phi)\bef\text{frun}_{G}(\chi)}\big)\boxtimes\big(\gunderline{\text{frun}_{H}(\phi)\bef\text{frun}_{H}(\chi)}\big)\\ \text{functor laws of }\text{frun}_{G},\,\text{frun}_{H}:\quad & \quad=\text{frun}_{G}(\phi\bef\chi)\boxtimes\text{frun}_{H}(\phi\bef\chi)=\text{frun}_{L}(\phi\bef\chi)\quad. \end{align*} @@ -32053,7 +32125,16 @@ We may define a base runner in two different ways, by dropping either the \end_inset -It is a monad morphism because it is a composition of a projection (Statement +It is a monad morphism because it is a function composition of two monad + morphisms: +\begin_inset Formula $\text{brun}_{G}$ +\end_inset + + and a projection function +\begin_inset Formula $\pi_{1}$ +\end_inset + + (which is a monad morphism by Statement \begin_inset space ~ \end_inset @@ -32067,11 +32148,7 @@ noprefix "false" \end_inset -) and a monad morphism -\begin_inset Formula $\text{brun}_{G}$ -\end_inset - -. +). Function composition preserves monad morphisms (Statement \begin_inset space ~ \end_inset @@ -32221,10 +32298,6 @@ To verify the monadic naturality law of \end_inset - -\end_layout - -\begin_layout Standard To verify the monadic naturality law of an information-losing definition of \begin_inset Formula $\text{brun}_{L}$ @@ -32754,12 +32827,13 @@ N^{M^{A}} & (t\rightarrow\bbnum 0^{:A}+\text{merge}\,(t))\bef\text{pu}_{M} & M^{A+N^{A}}\\ \hline A & \text{pu}_{M}\bef(a\rightarrow a+\bbnum 0^{:N^{A}})^{\uparrow M}\\ N^{A} & \text{pu}_{M}^{\uparrow N}\bef(t\rightarrow\bbnum 0^{:A}+\text{merge}\,(t))\bef\text{pu}_{M} -\end{array}\,=\,\begin{array}{|c||c|} +\end{array}\\ + & =\,\,\begin{array}{|c||c|} & M^{A+N^{A}}\\ \hline A & (a\rightarrow a+\bbnum 0^{:N^{A}})\bef\text{pu}_{M}\\ N^{A} & (n\rightarrow\bbnum 0^{:A}+\text{merge}\,(n\triangleright\text{pu}_{M}^{\uparrow N}))\bef\text{pu}_{M} \end{array}\\ - & =\,\,\begin{array}{|c||c|} + & =\,\begin{array}{|c||c|} & A+N^{A}\\ \hline A & a\rightarrow a+\bbnum 0^{:N^{A}}\\ N^{A} & n\rightarrow\bbnum 0^{:A}+n\triangleright\text{pu}_{M}^{\uparrow N}\bef\text{merge} @@ -32873,7 +32947,8 @@ The first rows of the matrices for the two sides of the law are now equal \begin_inset Formula \begin{align*} & (t\rightarrow\bbnum 0+\text{merge}\,(t))\bef\text{sw}\overset{?}{=}\text{ftn}_{M}^{\uparrow N}\bef(t\rightarrow\bbnum 0+\text{merge}\,(t))\bef\text{pu}_{M}\quad,\\ -\text{apply both sides to }t:\quad & (\bbnum 0+t\triangleright\text{merge})\,\gunderline{\triangleright\,\text{sw}}\overset{?}{=}\big(\bbnum 0+t\triangleright\text{ftn}_{M}^{\uparrow N}\triangleright\text{merge}\big)\triangleright\text{pu}_{M}\quad,\\ + & \text{apply both sides to }t:\quad\\ + & (\bbnum 0+t\triangleright\text{merge})\,\gunderline{\triangleright\,\text{sw}}\overset{?}{=}\big(\bbnum 0+t\triangleright\text{ftn}_{M}^{\uparrow N}\triangleright\text{merge}\big)\triangleright\text{pu}_{M}\quad,\\ \text{apply }\text{sw}:\quad & (\bbnum 0+t\triangleright\text{merge}\triangleright\text{merge})\triangleright\text{pu}_{M}\overset{?}{=}\big(\bbnum 0+t\triangleright\text{ftn}_{M}^{\uparrow N}\triangleright\text{merge}\big)\triangleright\text{pu}_{M}\quad. \end{align*} @@ -32893,9 +32968,10 @@ merge has the following property: \begin_inset Formula -\[ -\text{merge}\bef\text{merge}=\text{ftn}_{M}^{\uparrow N}\bef\text{merge}\quad,\quad\text{or}:\quad\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}=\text{ftn}_{M}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}\quad. -\] +\begin{align*} + & \text{merge}\bef\text{merge}=\text{ftn}_{M}^{\uparrow N}\bef\text{merge}\quad,\\ +\text{or}:\quad & \text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}=\text{ftn}_{M}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}\quad. +\end{align*} \end_inset @@ -32983,8 +33059,10 @@ flift \begin{align*} \text{expect to equal }\text{ftn}_{M}^{\uparrow N}\bef\text{merge}:\quad & \text{merge}\bef\text{merge}=\text{flift}_{K}^{\uparrow N}\bef\gunderline{\text{ftn}_{N}\bef\text{flift}_{K}^{\uparrow N}}\bef\text{ftn}_{N}\\ \text{naturality of }\text{ftn}_{N}:\quad & =\text{flift}_{K}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N\uparrow N}\bef\gunderline{\text{ftn}_{N}\bef\text{ftn}_{N}}\\ -\text{associativity law of }\text{ftn}_{N}:\quad & =\gunderline{\text{flift}_{K}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N\uparrow N}\bef\text{ftn}_{N}^{\uparrow N}}\bef\text{ftn}_{N}=(\gunderline{\text{flift}_{K}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}})^{\uparrow N}\bef\text{ftn}_{N}\\ -\text{lifting law~(\ref{eq:free-pointed-transformer-use-lifting-derivation1})}:\quad & =\gunderline{(\text{ftn}_{M}\bef\text{flift}_{K})^{\uparrow N}}\bef\text{ftn}_{N}=\text{ftn}_{M}^{\uparrow N}\bef\gunderline{\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}}=\text{ftn}_{M}^{\uparrow N}\bef\text{merge}\quad. +\text{associativity law of }\text{ftn}_{N}:\quad & =\gunderline{\text{flift}_{K}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N\uparrow N}\bef\text{ftn}_{N}^{\uparrow N}}\bef\text{ftn}_{N}\\ + & =(\gunderline{\text{flift}_{K}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}})^{\uparrow N}\bef\text{ftn}_{N}\\ +\text{lifting law~(\ref{eq:free-pointed-transformer-use-lifting-derivation1})}:\quad & =\gunderline{(\text{ftn}_{M}\bef\text{flift}_{K})^{\uparrow N}}\bef\text{ftn}_{N}=\text{ftn}_{M}^{\uparrow N}\bef\gunderline{\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}}\\ + & =\text{ftn}_{M}^{\uparrow N}\bef\text{merge}\quad. \end{align*} \end_inset @@ -43189,7 +43267,7 @@ flift \begin_inset Formula $g^{:H^{C}\rightarrow A\rightarrow T_{L}^{M,C}}$ \end_inset - ignore their first arguments, we may write + ignore their first arguments, we may write: \begin_inset Formula \[ f\,\tilde{\diamond}_{_{T}}g=k^{:H^{C}}\rightarrow f(...)\diamond_{_{T_{L}}}g(...)=(\_^{:H^{C}})\rightarrow f(...)\diamond_{_{T_{L}}}g(...)\quad. diff --git a/sofp-src/lyx/sofp-traversable.lyx b/sofp-src/lyx/sofp-traversable.lyx index 936e72981..96cb38fdc 100644 --- a/sofp-src/lyx/sofp-traversable.lyx +++ b/sofp-src/lyx/sofp-traversable.lyx @@ -437,7 +437,7 @@ reduce \end_inset method. - In this way, we will obtain a complete understanding of the + In this way, we will conclude the study of the \begin_inset listings inline true status open @@ -1007,7 +1007,7 @@ Omitting the argument \begin_inset Formula $b_{0}$ \end_inset -, we may visualize the computation performed by +, we visualize the computation performed by \begin_inset listings inline true status open @@ -1245,51 +1245,51 @@ status open \begin_layout Plain Layout -Seq[_] +Seq \end_layout \end_inset - by an arbitrary type constructor + by an arbitrary functor \begin_inset listings inline true status open \begin_layout Plain Layout -L[_] +L \end_layout \end_inset . - We call -\series bold -foldable -\series default - -\begin_inset Index idx + We call a functor +\begin_inset listings +inline true status open \begin_layout Plain Layout -foldable functor + +L \end_layout \end_inset - a type constructor -\begin_inset listings -inline true + +\series bold +foldable +\series default + +\begin_inset Index idx status open \begin_layout Plain Layout - -L[_] +foldable functor \end_layout \end_inset - for which the + if a \begin_inset listings inline true status open @@ -1848,7 +1848,8 @@ traverse \end_inset -, we will look at some examples of its practical use. +, we will look at some examples of implementing that function for various + type constructors and using it in practice. \end_layout \begin_layout Section @@ -1935,11 +1936,11 @@ traverse lines 0 placement l overhang 0in -width "50col%" +width "40col%" status open \begin_layout Plain Layout -\begin_inset VSpace -100baselineskip% +\begin_inset VSpace -50baselineskip% \end_inset @@ -2019,7 +2020,7 @@ def trav[A, B, F[_]: Applicative : Functor](f: A => F[B]): Option[(A, A)] \end_inset -In the short type notation, this type signature is: +In the type notation, this type signature is written as: \begin_inset Formula \[ \text{trav}_{L}:(A\rightarrow F^{B})\rightarrow(\bbnum 1+A\times A)\rightarrow F^{\bbnum 1+B\times B}\quad. @@ -2183,8 +2184,7 @@ def trav[A, B, F[_]: Applicative : Functor](f: A => F[B]): Option[(A, A)] \begin_layout Plain Layout - case None => Applicative[F].pure(None) // No other - choice here. + case None => Applicative[F].pure(None) \end_layout \begin_layout Plain Layout @@ -3663,7 +3663,7 @@ status open \begin_layout Plain Layout -L[_] +L \end_layout \end_inset @@ -4771,7 +4771,7 @@ val t2 = Branch(Leaf(8), Branch(Branch(Leaf(3), Leaf(5)), Leaf(4))) \begin_layout Plain Layout -scala> t2.map(x => x + 20) // Assuming a Functor instance for T2[_]. +scala> t2.map(x => x + 20) // Assuming a Functor instance for T2. \end_layout \begin_layout Plain Layout @@ -5095,13 +5095,12 @@ def zipWithIndexDF[A](tree: T2[A]): T2[(A, Int)] = { \begin_layout Plain Layout - val afterTraverse: St[T2[(A, Int)]] = trav[A, (A, Int), St](computeIndex)(tree -) + val result: St[T2[(A, Int)]] = trav[A, (A, Int), St](computeIndex)(tree) \end_layout \begin_layout Plain Layout - afterTraverse.run(0)._1 // Run the State monad and get the result value. + result.run(0)._1 // Run the State monad and get the result value. \end_layout \begin_layout Plain Layout @@ -5372,7 +5371,7 @@ status open \begin_layout Plain Layout -F[_] +F \end_layout \end_inset @@ -5426,9 +5425,22 @@ toListBFS \end_inset - shown in the previous section is not sufficient for that purpose, because - the tree structure cannot be reproduced if we only have a list of leaf - values. + shown in Section +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand ref +reference "subsec:Aggregating-tree-like-data-bfs" +plural "false" +caps "false" +noprefix "false" + +\end_inset + + is not sufficient for that purpose, because the tree structure cannot be + reproduced if we only have a list of leaf values. Even the nested list computed by \begin_inset listings inline true @@ -6138,7 +6150,7 @@ t2ToTD \end_inset -, which (if implemented correctly) should yield the following: +, which (if implemented correctly) should give this: \begin_inset listings inline false status open @@ -7114,7 +7126,7 @@ status open \begin_layout Plain Layout -TD[_] +TD \end_layout \end_inset @@ -7126,7 +7138,7 @@ status open \begin_layout Plain Layout -TD[_] +TD \end_layout \end_inset @@ -7535,13 +7547,12 @@ def zipWithIndexBF[A](tree: T2[A]): T2[(A, Int)] = { \begin_layout Plain Layout - val afterTraverse: St[T2[(A, Int)]] = travBF[A, (A, Int), St](computeIndex)(tr -ee) + val result: St[T2[(A, Int)]] = travBF[A, (A, Int), St](computeIndex)(tree) \end_layout \begin_layout Plain Layout - afterTraverse.run(0)._1 // Run the State monad and get the result value. + result.run(0)._1 // Run the State monad and get the result value. \end_layout \begin_layout Plain Layout @@ -7906,7 +7917,7 @@ status open \begin_layout Plain Layout -L[_] +L \end_layout \end_inset @@ -8297,7 +8308,7 @@ status open \begin_layout Plain Layout -F[_] +F \end_layout \end_inset @@ -8332,8 +8343,15 @@ traverse \end_layout \begin_layout Standard -The first example is the depth labeling of a tree: each leaf gets a value - equal to its depth. +The first example is the +\begin_inset Quotes eld +\end_inset + +depth labeling +\begin_inset Quotes erd +\end_inset + + of a tree: each leaf gets a value equal to its depth. For instance, the tree \begin_inset Preview @@ -8496,7 +8514,7 @@ traverse \end_layout \begin_layout Standard -Depth labeling can be implemented as a special operation such as +Depth labeling can be implemented as a recursive function \begin_inset listings inline true status open @@ -8506,18 +8524,6 @@ status open zipWithDepth \end_layout -\end_inset - - for the tree type -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -T2 -\end_layout - \end_inset : @@ -8774,10 +8780,10 @@ T2 This type refers recursively to itself in two places. To express that, define a bifunctor -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset -: + like this: \begin_inset Formula \[ S^{A,R}\triangleq A+R\times R\quad. @@ -8834,7 +8840,7 @@ bifunctor \end_inset -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset is called the @@ -8964,15 +8970,7 @@ printLaTeXSubtree \end_inset -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -pls -\end_layout - +\begin_inset Formula $\text{pls}$ \end_inset @@ -9297,7 +9295,7 @@ It is important that the function \end_inset is parametric in the recursion scheme -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset and the result type @@ -9306,7 +9304,7 @@ It is important that the function (which is not required to be a monoid). Different recursion schemes -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset may be used to define lists, trees, and other recursive data types. @@ -9316,7 +9314,7 @@ It is important that the function will work for all those data types, as long as we have the recursion scheme -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset and the corresponding lifting function ( @@ -9386,7 +9384,7 @@ List \end_inset type and the corresponding recursion scheme -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset may be defined by: @@ -9454,13 +9452,13 @@ TreeN \end_inset For rose trees, the recursion scheme -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset is itself a recursively defined type because it uses the non-empty list (NEL). This is not a problem: -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset is still polynomial, which guarantees that any value of type @@ -9545,7 +9543,7 @@ Fix \end_inset that takes the recursion scheme -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset as a type parameter: @@ -10164,8 +10162,8 @@ It remains to assure that the recursion reaches the base case with every \end_inset . - Note that -\begin_inset Formula $S^{A,R}$ + This +\begin_inset Formula $S$ \end_inset is non-polynomial due to the function type @@ -10268,7 +10266,7 @@ f \begin_layout Standard It seems that we need to restrict recursion schemes -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset to @@ -10277,18 +10275,19 @@ polynomial \emph default bifunctors. Such -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset - will define recursive polynomial functors + will define recursive polynomial data types \begin_inset Formula $L^{A}$ \end_inset - that support no delayed evaluation of stored values of type + that are eager data structures (supporting no delayed evaluation of stored + values of type \begin_inset Formula $A$ \end_inset -. +). So, any value \begin_inset listings inline true @@ -10329,11 +10328,11 @@ fold(f)(x) \end_layout \begin_layout Standard -Rather than working with the general +Instead of working with the general function \begin_inset Formula $\text{fold}_{S}$ \end_inset - function and redefine all recursive types via + and defining all recursive types via \begin_inset listings inline true status open @@ -10404,7 +10403,7 @@ status open \begin_layout Plain Layout -TreeN[A] +TreeN \end_layout \end_inset @@ -10421,7 +10420,7 @@ status open \begin_layout Plain Layout -TreeN[A] +TreeN \end_layout \end_inset @@ -10514,7 +10513,7 @@ status open \begin_layout Plain Layout -TreeN[A] +TreeN \end_layout \end_inset @@ -10554,7 +10553,7 @@ foldTreeN \end_inset - now allows us to implement that computation: + now allows us to implement that: \begin_inset listings inline false status open @@ -10571,8 +10570,12 @@ def maxBranching[A]: TreeN[A] => Int = foldTreeN[A, Int] { \begin_layout Plain Layout - case Right(nel) => math.max(nel.max, nel.length) // NEL must have `max` - and `length` methods. + case Right(nel) => math.max(nel.max, nel.length) +\end_layout + +\begin_layout Plain Layout + +// Assuming that we implemented `max` and `length` methods for NEL. \end_layout \begin_layout Plain Layout @@ -10630,7 +10633,7 @@ unfolding \end_inset method that uses an arbitrary recursion scheme -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset and an arbitrary function of type @@ -10673,12 +10676,13 @@ unfold \end_inset - operation generalizes that computation to an arbitrary recursive type -\begin_inset Formula $L^{A}$ + operation generalizes that computation to an arbitrary recursive type construct +or +\begin_inset Formula $L$ \end_inset whose recursion scheme -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset is given. @@ -10829,7 +10833,7 @@ status open \begin_layout Plain Layout -List[A] +List \end_layout \end_inset @@ -11045,7 +11049,7 @@ None \end_inset and -\begin_inset Formula $z^{\prime}=2*z$ +\begin_inset Formula $z^{\prime}=z*2$ \end_inset . @@ -11060,8 +11064,12 @@ status open \begin_layout Plain Layout -def f(n: Long): Long => Option[(Long, Long)] = { z => if (z >= n) None else - Some((z, z * 2)) } +def f(n: Long): Long => Option[(Long, Long)] = +\end_layout + +\begin_layout Plain Layout + + { z => if (z >= n) None else Some((z, z * 2)) } \end_layout \end_inset @@ -11690,7 +11698,7 @@ fullBinaryTree \begin_inset Formula $2$ \end_inset -: +, which we expect to be \begin_inset space ~ \end_inset @@ -11717,7 +11725,7 @@ Tree[ [ 0 1 ] [ 2 3 ] ] \end_inset - +: \begin_inset listings inline false status open @@ -11869,7 +11877,7 @@ Tree[ 3 [ [ 1 0 ] 2 ] ] \end_inset - , while + and \begin_inset listings inline true status open @@ -12591,14 +12599,22 @@ val f: Z => Either[Int, (Z, Z)] = { \begin_layout Plain Layout - case Z(n, false) if n > 0 && n % 2 == 0 => Right((Z(n - 1, false), Z(n, - true))) + case Z(n, false) if n > 0 && n % 2 == 0 => +\end_layout + +\begin_layout Plain Layout + + Right((Z(n - 1, false), Z(n, true))) +\end_layout + +\begin_layout Plain Layout + + case Z(n, false) if n > 0 && n % 2 == 1 => \end_layout \begin_layout Plain Layout - case Z(n, false) if n > 0 && n % 2 == 1 => Right((Z(n, true), Z(n - 1, - false))) + Right((Z(n, true), Z(n - 1, false))) \end_layout \begin_layout Plain Layout @@ -12760,16 +12776,6 @@ co-induction It is related to mathematical induction but is significantly different from the reasoning required to write the code for a folding operation (which is directly modeled on induction). - In co-induction, the base cases are not at the beginning of the computation - but -\begin_inset Quotes eld -\end_inset - -in the future -\begin_inset Quotes erd -\end_inset - -. Note that \begin_inset listings inline true @@ -12829,6 +12835,17 @@ unfold(f)(z) \end_inset to stop the unfolding. + One could say that the base cases in co-induction are not at the beginning + of the computation but +\begin_inset Quotes eld +\end_inset + +in the future +\begin_inset Quotes erd +\end_inset + +. + \end_layout \begin_layout Standard @@ -12881,7 +12898,7 @@ unfold(f) \end_inset - will enter an infinite loop trying to constructing a tree of infinite size. + will enter an infinite loop trying to construct a tree of infinite size. This will, of course, fail since data structures in a computer cannot have infinite size. \end_layout @@ -12912,9 +12929,8 @@ infinite data types \end_inset - which is misleading since no infinite amount of data is involved. - A function may be called many times and produce any number of result values, - but it does not mean that a function stores an infinite amount of data. + which is misleading since only a finite amount of data is ever stored in + memory. \end_layout \begin_layout Standard @@ -13045,8 +13061,8 @@ def unfoldUT[A, Z](f: Z => S[A, Z])(init: Z): UT[A] = f(init) match { \begin_layout Plain Layout - (unfoldUT(f)(z1), unfoldUT(f)(z2)) // `unfold` will delay the evaluation - of further branches. + (unfoldUT(f)(z1), unfoldUT(f)(z2)) // `unfoldUT` will delay the + evaluation of further branches. \end_layout \begin_layout Plain Layout @@ -13430,14 +13446,14 @@ zipWithDepth \end_inset ) parameterized by an arbitrary recursion scheme -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset and an arbitrary functor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset - (not necessarily applicative). + (not necessarily applicative!). \end_layout \begin_layout Standard @@ -14246,7 +14262,7 @@ status open \begin_layout Plain Layout -T2[A] +T2 \end_layout \end_inset @@ -14306,20 +14322,20 @@ zipWithIndex \end_inset - for the type + for \begin_inset listings inline true status open \begin_layout Plain Layout -T2[A] +T2 \end_layout \end_inset . - Verify that + Verify via tests that \begin_inset listings inline true status open @@ -14589,7 +14605,7 @@ status open \begin_layout Plain Layout -T2[A] +T2 \end_layout \end_inset @@ -14666,7 +14682,7 @@ status open \begin_layout Plain Layout -T3[A] +T3 \end_layout \end_inset @@ -14685,7 +14701,7 @@ noprefix "false" \end_inset -, define a recursion scheme and implement a specialized version of +, write a recursion scheme and implement a specialized version of \begin_inset listings inline true status open @@ -14812,7 +14828,8 @@ toList \end_inset . - With suitable naturality laws, these methods are equivalent. + It turns out that all those methods are equivalent when suitable naturality + laws hold. \end_layout \begin_layout Subsection @@ -15723,7 +15740,7 @@ reduceE \end_inset - satisfies the monoidal naturality law. + obeys the monoidal naturality law. \end_layout @@ -15771,7 +15788,7 @@ noprefix "false" \end_inset ) follow from parametricity. - So, these laws will hold automatically when the code of + So, those laws will hold automatically when the code of \begin_inset listings inline true status open @@ -15784,8 +15801,7 @@ foldFn \end_inset is fully parametric. - However, formulating these special laws allows us to prove the equivalence - of + Formulating those special laws allows us to prove the equivalence of \begin_inset listings inline true status open @@ -18091,7 +18107,7 @@ toList \end_inset - are equivalent if expressed through each other as: + are equivalent if expressed via each other as: \begin_inset Formula \begin{align} & \text{toList}:L^{A}\rightarrow\text{List}^{A}\quad,\quad\quad\text{toList}=\text{pu}_{\text{List}}^{\uparrow L}\bef\text{reduceE}^{\text{List}^{A}}\quad,\label{eq:toList-via-reduceE}\\ @@ -18322,7 +18338,7 @@ The bottom-row expression in the last matrix is then rewritten to: \end_inset -Using the monad morphism identity law ( +Using the monoid morphism identity law ( \begin_inset Formula $f(e_{M})=e_{N}$ \end_inset @@ -18711,7 +18727,7 @@ M \end_inset - to the monoid type + to the monoidal type \begin_inset listings inline true status open @@ -19032,7 +19048,7 @@ A . Certainly, programmers expect this property to hold. - But the main intent of + But the main purpose of \begin_inset listings inline true status open @@ -19053,7 +19069,7 @@ toList \end_inset and store them in a list. - Naturality laws do not express this intent. + Naturality laws do not express that purpose. \end_layout \begin_layout Standard @@ -19258,7 +19274,7 @@ toList \end_inset -? Such a law could state that + stating that \begin_inset listings inline true status open @@ -19282,8 +19298,7 @@ p \end_inset -. - If +? If \begin_inset Formula $L$ \end_inset @@ -19389,12 +19404,12 @@ toList \end_inset to be pointed. - So, in general we cannot inject values into + So, in general we cannot inject values into a data structure of type \begin_inset Formula $L^{A}$ \end_inset in a way that is guaranteed to preserve information. - This prevents us from formulating an identity law for + This prevents us from formulating an identity law of \begin_inset listings inline true status open @@ -19452,8 +19467,8 @@ lifting \end_inset are the functor laws (identity and composition). - Could we apply this approach to the folding operations? The type signature - of the + Could we apply that approach to derive laws for folding operations? The + type signature of the \begin_inset listings inline true status open @@ -19526,7 +19541,7 @@ foldMap \begin_inset Formula $M$ \end_inset - is a fixed type) does not support composition since we cannot compose + is a fixed type) does not support composition: we cannot compose \begin_inset Formula $A\rightarrow M$ \end_inset @@ -19546,9 +19561,8 @@ lifting \begin_inset Quotes erd \end_inset - approach also fails to yield a suitable law for folding operations. - However, we will see below that lifting-like laws may be imposed on the - + approach fails to give us laws for folding operations. + However, we will see below that lifting-like laws may be imposed on \begin_inset listings inline true status open @@ -19560,7 +19574,7 @@ traverse \end_inset - operation, whose type signature is a generalization of that of +, whose type signature is a generalization of that of \begin_inset listings inline true status open @@ -19652,7 +19666,7 @@ status open \begin_layout Plain Layout -Option[A] +Option \end_layout \end_inset @@ -19664,7 +19678,7 @@ status open \begin_layout Plain Layout -List[A] +List \end_layout \end_inset @@ -19701,28 +19715,12 @@ polynomial functor!recursive ). Those functors represent data structures that store a finite number of - values of type -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -A -\end_layout - -\end_inset - -. - It is clear what it means to extract + values. + It is clear what it means to \begin_inset Quotes eld \end_inset -all values of type -\begin_inset Formula $A$ -\end_inset - - +extract all values \begin_inset Quotes erd \end_inset @@ -20039,7 +20037,7 @@ noprefix "false" \end_layout \begin_layout Standard -Polynomial functors are built via the five standard type constructions (Table +Polynomial functors are built via five standard type constructions (Table \begin_inset space ~ \end_inset @@ -20123,7 +20121,15 @@ The identity functor . So, we simply define -\begin_inset Formula $\text{toList}\triangleq\text{id}$ +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +toList(x) = List(x) +\end_layout + \end_inset . @@ -20174,7 +20180,7 @@ def toList_M[A]: ((K[A], L[A])) => List[A] = { case (p, q) => toList_K(p) \begin_inset Formula \[ -\text{toList}_{M}:K^{A}\times L^{A}\rightarrow\text{List}^{A}\quad,\quad\quad\text{toList}_{M}\triangleq p^{:K^{A}}\times q^{:L^{A}}\rightarrow\text{toList}_{K}(p)\,\pplus\,\text{toList}_{L}(q)\quad. +\text{toList}_{M}:K^{A}\times L^{A}\rightarrow\text{List}^{A}\quad,\quad\text{toList}_{M}\triangleq p^{:K^{A}}\times q^{:L^{A}}\rightarrow\text{toList}_{K}(p)\,\pplus\,\text{toList}_{L}(q)\quad. \] \end_inset @@ -20300,12 +20306,12 @@ toList \end_inset , where the recursion scheme -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset is a bifunctor that is itself foldable. A bifunctor -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset is @@ -20420,7 +20426,7 @@ toList \end_inset recursively to the second type parameter of -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset . @@ -20642,7 +20648,7 @@ status open \begin_layout Plain Layout -L[A] = List[A] +List \end_layout \end_inset @@ -20668,7 +20674,7 @@ status open \begin_layout Plain Layout -L[A] +L \end_layout \end_inset @@ -21390,15 +21396,7 @@ L^{A}\ar[r]\sb(0.5){\text{trav}_{L}^{F,A,B}(g)} & F^{L^{B}} & & F^{L^{C}} \begin_layout Standard We also need a naturality law with respect to the parameter -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -F -\end_layout - +\begin_inset Formula $F$ \end_inset , which is a type constructor required to be an applicative functor. @@ -21415,15 +21413,7 @@ traverse \end_inset may not inspect the type of -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -F -\end_layout - +\begin_inset Formula $F$ \end_inset directly and make decisions based on that type. @@ -21647,7 +21637,21 @@ applicative morphism \series bold applicative morphisms \series default - (compare to monoid morphisms defined in Section + (first defined in Section +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand ref +reference "subsec:Applicative-morphisms" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +; compare to monoid morphisms defined in Section \begin_inset space ~ \end_inset @@ -21889,7 +21893,7 @@ traverse \begin_inset Formula $L^{A}\triangleq A\times A$ \end_inset - using an applicative morphism between applicative functors + using an applicative morphism between \begin_inset Formula $G^{A}\triangleq\bbnum 1+A$ \end_inset @@ -22052,11 +22056,7 @@ Next, we verify that \end_inset is an applicative morphism. - To check the identity law of -\begin_inset Formula $g$ -\end_inset - -: + To check the identity law: \begin_inset Formula \[ \text{wu}_{G}\triangleright g=\,\begin{array}{|cc|} @@ -23291,15 +23291,7 @@ traverse \end_inset operation needs to be lifted to -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -F -\end_layout - +\begin_inset Formula $F$ \end_inset for the types to match. @@ -23307,7 +23299,7 @@ F \begin_inset Formula $F^{G^{L^{C}}}$ \end_inset -) looks like a +) looks like a result of a \begin_inset listings inline true status open @@ -23904,17 +23896,17 @@ implicit val ZippableF: Zippable[F] = new Zippable[F] { \begin_layout Plain Layout -val f: Int => S[Int] = i => S(j => (i + j, i + j)) +val f1: Int => S[Int] = i => S(j => (i + j, i + j)) \end_layout \begin_layout Plain Layout -val g: Int => S[Int] = f +val f2: Int => S[Int] = f1 \end_layout \begin_layout Plain Layout -val ff: Int => F[Int] = i => F(f(i).map(g)) +val f1f2: Int => F[Int] = i => F(f1(i).map(f2)) \end_layout \begin_layout Plain Layout @@ -25280,7 +25272,7 @@ bifunctor \end_inset -\begin_inset Formula $S^{A,R}$ +\begin_inset Formula $S$ \end_inset . @@ -25302,8 +25294,8 @@ bitraversable \end_inset . - It is not enough if -\begin_inset Formula $S^{A,R}$ + It is not sufficient if +\begin_inset Formula $S$ \end_inset is traversable with respect to each type parameter separately. @@ -25311,7 +25303,7 @@ bitraversable \begin_layout Standard A bifunctor -\begin_inset Formula $S^{A,B}$ +\begin_inset Formula $S$ \end_inset is called @@ -25433,15 +25425,11 @@ noprefix "false" \end_inset will show that all polynomial bifunctors are bitraversable. - So, we are free to use any recursion scheme -\begin_inset Formula $S^{A,R}$ -\end_inset - - as long as + So, we are free to use any polynomial bifunctor \begin_inset Formula $S$ \end_inset - is a polynomial bifunctor. + as a recursion scheme. For now, we assume that a lawful \begin_inset Formula $\text{seq2}_{S}$ \end_inset @@ -27425,11 +27413,11 @@ Prove that \end_inset is a lawful traversable if -\begin_inset Formula $M^{\bullet}$ +\begin_inset Formula $M$ \end_inset and -\begin_inset Formula $N^{\bullet}$ +\begin_inset Formula $N$ \end_inset are traversable functors. @@ -27691,7 +27679,7 @@ Given a monad \emph default -\begin_inset Formula $M^{\bullet}$ +\begin_inset Formula $M$ \end_inset and a monoid morphism @@ -28517,7 +28505,7 @@ zipWithIndex \begin_inset Formula $\text{zwi}_{L}$ \end_inset - for brevity) for any traversable functor +) for any traversable functor \begin_inset Formula $L$ \end_inset @@ -30924,7 +30912,7 @@ For any traversable functor \begin_inset Formula $L$ \end_inset -, any type +, any non-void type \begin_inset Formula $A$ \end_inset @@ -31070,7 +31058,7 @@ Now we define \begin_inset Formula $\text{Int}\rightarrow A$ \end_inset - such that + such that: \begin_inset Formula \[ t_{p}(i)\triangleq a_{i}\quad,\quad i=1,2,...,n\quad. @@ -31212,7 +31200,7 @@ sequence \begin_layout Standard Suppose that -\begin_inset Formula $L^{A}$ +\begin_inset Formula $L$ \end_inset is a contrafunctor. @@ -31256,7 +31244,7 @@ is required for . We note that -\begin_inset Formula $\text{seq}_{L}$ +\begin_inset Formula $\text{seq}_{L}^{F,A}$ \end_inset is covariant in @@ -31264,7 +31252,7 @@ is required for \end_inset because -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset is contravariant. @@ -31410,11 +31398,11 @@ The function \end_inset -effects. - So, the function -\begin_inset Formula $\text{seq}_{L}$ + So, +\begin_inset Formula $\text{seq}_{L}(f)$ \end_inset - will never obtain any information that + could never use any information that \begin_inset Formula $f$ \end_inset @@ -31541,7 +31529,7 @@ perfect-shaped tree \end_inset -\begin_inset Formula $\text{PT}^{A}$ +\begin_inset Formula $\text{PT}$ \end_inset is: @@ -31557,7 +31545,7 @@ This type equation is not of the form \end_inset because the recursive use of -\begin_inset Formula $\text{PT}^{\bullet}$ +\begin_inset Formula $\text{PT}$ \end_inset contains a nontrivial type expression ( @@ -31583,7 +31571,7 @@ noprefix "false" \end_inset -) via a recursion scheme, we may introduce an additional functor +) via a recursion scheme, we introduce an additional functor \begin_inset Formula $P$ \end_inset @@ -31727,15 +31715,15 @@ noprefix "false" \begin_layout Standard Given a bitraversable bifunctor -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset and a traversable functor -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset , define a nested recursive type constructor -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset by: @@ -31895,7 +31883,7 @@ sequence \begin_inset Formula \begin{align*} \text{left-hand side}:\quad & \big(\overline{\text{seq}}_{L}^{F,P^{G^{A}}}\big)^{\uparrow S}\bef\gunderline{\text{seq2}_{S}^{F,G^{A},L^{P^{G^{A}}}}\bef\big((\text{seq}_{P}^{G,A})^{\uparrow L}\bef\overline{\text{seq}}_{L}^{G,P^{A}}\big)^{\uparrow S\uparrow F}}\\ -\text{naturality law of }\text{seq2}_{S}:\quad & =\gunderline{\big(\overline{\text{seq}}_{L}^{F,P^{G^{A}}}\big)^{\uparrow S}\bef\big((\text{seq}_{P}^{G,A})^{\uparrow L}\bef\overline{\text{seq}}_{L}^{G,P^{A}}\big)^{\uparrow F\uparrow S}}\bef\text{seq2}_{S}^{F,G^{A},G^{L^{P^{A}}}}\\ +\text{naturality of }\text{seq2}_{S}:\quad & =\gunderline{\big(\overline{\text{seq}}_{L}^{F,P^{G^{A}}}\big)^{\uparrow S}\bef\big((\text{seq}_{P}^{G,A})^{\uparrow L}\bef\overline{\text{seq}}_{L}^{G,P^{A}}\big)^{\uparrow F\uparrow S}}\bef\text{seq2}_{S}^{F,G^{A},G^{L^{P^{A}}}}\\ \text{composition under }^{\uparrow S}:\quad & =\big(\gunderline{\overline{\text{seq}}_{L}^{F,P^{G^{A}}}\bef(\text{seq}_{P}^{G,A})^{\uparrow L\uparrow F}}\big)^{\uparrow S}\bef\big(\overline{\text{seq}}_{L}^{G,P^{A}}\big)^{\uparrow F\uparrow S}\bef\text{seq2}_{S}^{F,G^{A},G^{L^{P^{A}}}}\quad. \end{align*} @@ -31932,8 +31920,8 @@ As an advanced example of a nested traversable functor, we will derive a status open \begin_layout Plain Layout -This and other advanced examples of nested recursive types are explained - in the paper +This and other advanced examples of designing and using nested recursive + types are explained in the paper \begin_inset Quotes eld \end_inset @@ -32187,8 +32175,8 @@ Sq[A] \end_inset , and so on). - To define an infinite disjunctive type in a program of finite size, we - need to use recursion at type level. + To define an infinite disjunctive type, we normally use recursion at type + level. In a mathematical sense, this recursion will be induction on the size of the matrix. So, let us introduce the size of the matrix as an extra @@ -32602,7 +32590,8 @@ zip \begin_inset Formula $N$ \end_inset -.Note that the type +. + Note that the type \begin_inset listings inline true status open @@ -33463,7 +33452,7 @@ status open \begin_layout Plain Layout -Sq[A] +Sq \end_layout \end_inset diff --git a/sofp-src/lyx/sofp-typeclasses.lyx b/sofp-src/lyx/sofp-typeclasses.lyx index 74f434871..5455ef7d8 100644 --- a/sofp-src/lyx/sofp-typeclasses.lyx +++ b/sofp-src/lyx/sofp-typeclasses.lyx @@ -563,7 +563,7 @@ status open \begin_layout Plain Layout -F[_] +F \end_layout \end_inset @@ -580,7 +580,7 @@ map \end_inset - method, i.e., to be a functor. + method. We can implement \begin_inset listings inline true @@ -609,8 +609,8 @@ F \end_layout \begin_layout Standard -What would that constraint be like? Consider an ordinary function with no - type parameters, e.g.: +What would that constraint look like? For motivation, consider an ordinary + function with no type parameters, e.g.: \begin_inset listings inline false status open @@ -664,7 +664,7 @@ f \end_layout \begin_layout Standard -Using a similar syntax for +Scala supports a similar syntax for \emph on type \emph default @@ -672,7 +672,8 @@ type \emph on parameters \emph default -, we write the type signatures for +. + We may write the type signatures for \begin_inset listings inline true status open @@ -713,7 +714,7 @@ def inject[F[_]: Functor, A, B](a: A, f: F[B]): F[(A, B)] \end_inset -Scala uses the syntax +The syntax \begin_inset listings inline true status open @@ -725,7 +726,7 @@ status open \end_inset - to constrain the type parameter + constrains the type parameter \begin_inset listings inline true status open @@ -746,7 +747,7 @@ fractional numeric \end_inset types. - Similarly, + The syntax \begin_inset listings inline true status open @@ -758,19 +759,19 @@ status open \end_inset - requires the type constructor + means that the type constructor \begin_inset listings inline true status open \begin_layout Plain Layout -F[_] +F \end_layout \end_inset - to be a functor. + must be a functor. Applying \begin_inset listings inline true @@ -797,14 +798,40 @@ inject to types that do not obey those constraints will be a type error detected at compile time. + (Here, +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Fractional +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Functor +\end_layout + +\end_inset + + are custom types that need to be implemented in a special way, as we will + show.) \end_layout \begin_layout Standard In these examples, we are restricting a type parameter to a subset of possible types, because only types from that subset have certain properties that we need. - A subset of types, together with the required properties that those types - must satisfy, is called a + A subset of types, together with the required properties those types must + satisfy, is called a \series bold typeclass \series default @@ -876,8 +903,8 @@ Fractional \begin_layout Standard This chapter focuses on defining and using typeclasses and on understanding their properties. - We will see in detail how to implement typeclasses in Scala and use the - syntax such as + We will see in detail how to implement typeclasses in Scala and to enable + the syntax \begin_inset listings inline true status open @@ -887,6 +914,18 @@ status open [T: Fractional] \end_layout +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +[F[_]: Functor] +\end_layout + \end_inset . @@ -921,7 +960,7 @@ s \end_inset - is clear in this type signature, + is clear in this type signature: \begin_inset listings inline false status open @@ -955,7 +994,7 @@ frac.div(s.sum, frac.fromInt(s.length)) \end_inset -We can view +We may view \begin_inset listings inline true status open @@ -996,7 +1035,7 @@ s \end_inset ) and returns a value. - We can also view + We may also view \begin_inset listings inline true status open @@ -1041,7 +1080,7 @@ Seq[T] => T \end_inset . - We may call functions of this kind + We call functions of this kind \series bold type-to-value \series default @@ -1055,7 +1094,11 @@ type-to-value function \end_inset - functions (TVF). + +\series bold +functions +\series default + (TVF). The syntax for TVFs supported in Scala 3 shows this more clearly: \begin_inset listings inline false @@ -1121,12 +1164,12 @@ status open \begin_layout Plain Layout -Seq[_] +Seq \end_layout \end_inset - can be viewed as a + is a \begin_inset Index idx status open @@ -1140,7 +1183,11 @@ type-to-type function \series bold type-to-type \series default - function (TTF) because it can take any type + +\series bold +function +\series default + (TTF) because it takes any type \begin_inset listings inline true status open @@ -1152,7 +1199,7 @@ T \end_inset - and return a new type + and returns a new type \begin_inset listings inline true status open @@ -1248,7 +1295,7 @@ status open \begin_layout Plain Layout -def f(x:Int):Int +def f(x: Int): Int \end_layout \end_inset @@ -1378,7 +1425,7 @@ types that depend on values \end_inset . - An example in Scala: + An example in Scala is: \begin_inset listings inline false status open @@ -1465,8 +1512,8 @@ Int \end_layout \begin_layout Standard -We will not consider dependent types (VTFs) in this chapter because typeclasses - only require a combination of a TTF and a TVF. +We will not consider dependent types (VTFs) in this book. + Typeclasses only require a combination of a TTF and a TVF. \end_layout \begin_layout Subsection @@ -1534,7 +1581,7 @@ partial type-to-value function \end_layout \begin_layout Standard -In some situations, partial functions are safe to use. +In most situations, partial functions are unsafe to use. For instance, the following partial function \begin_inset listings inline true @@ -1880,12 +1927,12 @@ val xs: NonEmptyList[Int] = ... \begin_layout Plain Layout -val h = xs.head // .head is a total function for a NonEmptyList. +val h = xs.head // _.head is a total function for a NonEmptyList. \end_layout \end_inset -In these cases, we achieve safety by making types more strictly constrained. +In these cases, we achieve safety by making types more constrained. Similarly, partial type-to-value functions (PTVFs) become safe to use if we impose suitable typeclass constraints on the type parameters. Typeclasses can be viewed as a systematic way of using PTVFs safely. @@ -1920,7 +1967,7 @@ avg[T] \end_inset - is applied to an incorrectly chosen type parameter + is applied to an incorrectly chosen type \begin_inset listings inline true status open @@ -2146,7 +2193,15 @@ Frac[Boolean] \end_inset . - The Scala compiler will not detect any errors in the following code: + +\end_layout + +\begin_layout Standard +Note that the Scala compiler will +\emph on +not +\emph default + detect any errors in the following code: \begin_inset listings inline false status open @@ -2316,7 +2371,7 @@ Frac[A] \begin_inset Formula $A$ \end_inset - belonging to the set { + belonging to the set of types { \begin_inset listings inline true status open @@ -2340,8 +2395,8 @@ Double \end_inset -} of types. - We now need to define the function +}. + We need to define the function \begin_inset listings inline true status open @@ -2417,7 +2472,15 @@ Double \end_inset } is equivalent to the requirement that a value of type -\begin_inset Formula $\text{Frac}^{T}$ +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Frac[T] +\end_layout + \end_inset should exist. @@ -2426,7 +2489,15 @@ Double additional argument \emph default of type -\begin_inset Formula $\text{Frac}^{T}$ +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Frac[T] +\end_layout + \end_inset into the type signature of @@ -2521,7 +2592,7 @@ Frac[T] \end_layout \begin_layout Standard -In the Scala compiler, a typeclass instance argument such as +A typeclass instance argument such as \begin_inset listings inline true status open @@ -2578,28 +2649,19 @@ T \end_inset belongs to the type domain of the typeclass. - A typeclass instance value is the same as an + Such evidence values are called \begin_inset Quotes eld \end_inset -evidence value +typeclass instance values \begin_inset Quotes erd \end_inset - in this sense. - For brevity, we will say + or, for brevity, just \begin_inset Quotes eld \end_inset -instance value -\begin_inset Quotes erd -\end_inset - - or just -\begin_inset Quotes eld -\end_inset - -instance +typeclass instances \begin_inset Quotes erd \end_inset @@ -2631,7 +2693,7 @@ status open \begin_layout Plain Layout -Frac[_] +Frac \end_layout \end_inset @@ -2826,6 +2888,8 @@ T . In this way, we implemented the required type domain. + (However, this code does not prevent us from adding more types to that + type domain later.) \end_layout \begin_layout Standard @@ -2932,7 +2996,7 @@ Frac \end_inset - as a named tuple (case class) containing the functions + as a case class containing the functions \begin_inset listings inline true status open @@ -3028,19 +3092,17 @@ status open \begin_layout Plain Layout -def avg[T](s: Seq[T], frac: Frac[T]): T = { // Assuming `s` is a non-empty - sequence. +def avg[T](s: Seq[T], frac: Frac[T]): T = { // Assuming `s` is non-empty. \end_layout \begin_layout Plain Layout - val sum = s.reduce(frac.add) // Here, `reduce` would fail on an empty - sequence `s`. + val sum = s.reduce(frac.add) // `s.reduce` fails if `s` is empty! \end_layout \begin_layout Plain Layout - frac.intdiv(sum, s.length) // Compute `sum/length`. + frac.intdiv(sum, s.length) // Compute `sum/length`. \end_layout \begin_layout Plain Layout @@ -3050,8 +3112,8 @@ def avg[T](s: Seq[T], frac: Frac[T]): T = { // Assuming `s` is a non-empty \end_inset -To use this function, we need to pass an instance value corresponding to - the type +To use this function, we need to pass a typeclass instance for the type + \begin_inset listings inline true status open @@ -3204,7 +3266,7 @@ Frac \end_inset - typeclass via a + typeclass can be written by using a \begin_inset listings inline true status open @@ -3216,14 +3278,14 @@ trait \end_inset - with methods requires this code: + with methods: \begin_inset listings inline false status open \begin_layout Plain Layout -trait Frac[T] { // The trait is not `sealed`. +trait Frac[T] { // The trait should not be `sealed`. \end_layout \begin_layout Plain Layout @@ -3441,17 +3503,20 @@ implicit value \end_inset - declaration is a feature of Scala that makes values automatically available - to any function that declares an + declaration is a feature of Scala that makes arguments automatically available + to functions that declare \begin_inset Quotes eld \end_inset -implicit argument +implicit arguments \begin_inset Quotes erd \end_inset - of the same type. - Scala's syntax for implicit values is: +. +\end_layout + +\begin_layout Standard +Scala's syntax for implicit values is: \begin_inset listings inline false status open @@ -3656,7 +3721,7 @@ T \end_inset - is available, a compile-time error will occur. + is available in the current scope, a compile-time error will occur. If an implicit value of type \begin_inset listings inline true @@ -3669,7 +3734,7 @@ T \end_inset - is available in the current scope, + is available, \begin_inset listings inline true status open @@ -3720,10 +3785,11 @@ res1: String = qqq \begin_layout Standard It is an error to declare more than one implicit value of the same type - in the same scope, because implicit arguments are specified by type alone. + in the same scope, because implicit arguments are distinguished by type + alone. The Scala compiler will not be able to set implicit arguments of functions automatically when the function's outer scope contains more than one implicit - value of a required type, as in this code: + value of the same type, as in this code: \begin_inset listings inline false status open @@ -3779,11 +3845,11 @@ scala> implicitly[Int] \end_inset -But it is not an error to declare several implicit arguments of the +But it is not an error to declare several implicit \emph on -same +arguments \emph default - type, e.g.: + of the same type, e.g.: \begin_inset listings inline false status open @@ -3849,7 +3915,7 @@ y \end_inset - will be set to the same value, + will be set to the same value ( \begin_inset listings inline true status open @@ -3861,7 +3927,7 @@ z \end_inset -. +). A compile-time error will occur if no \begin_inset listings inline true @@ -3970,7 +4036,7 @@ Frac \end_inset - typeclass is implemented using implicit values like this: + typeclass is implemented using implicit values as: \begin_inset listings inline false status open @@ -4036,14 +4102,12 @@ status open \begin_layout Plain Layout -def avg[T](s: Seq[T])(implicit frac: Frac[T]): T = { // Assuming `s` is - a non-empty sequence. +def avg[T](s: Seq[T])(implicit frac: Frac[T]): T = { \end_layout \begin_layout Plain Layout - val sum = s.reduce(frac.add) // Here, `reduce` would fail on an empty - sequence `s`. + val sum = s.reduce(frac.add) \end_layout \begin_layout Plain Layout @@ -4105,15 +4169,7 @@ res1: BigDecimal = 1.5 \end_layout \begin_layout Standard -Scala's -\begin_inset Quotes eld -\end_inset - -typeclass constraint -\begin_inset Quotes erd -\end_inset - - syntax is equivalent to implicit evidence arguments: the code +Scala's typeclass constraint syntax, such as this code: \begin_inset listings inline false status open @@ -4125,7 +4181,7 @@ def f[A: Typeclass1, B: Typeclass2](args...) \end_inset -is equivalent to the longer code: +is equivalent to this longer code: \begin_inset listings inline false status open @@ -4162,19 +4218,19 @@ t2 \end_inset ) of the evidence values. - Those values can be extracted via the standard function + Those values can be extracted using \begin_inset listings inline true status open \begin_layout Plain Layout -implicitly +implicitly[...] \end_layout \end_inset - because all +, because all \begin_inset listings inline true status open @@ -4187,7 +4243,7 @@ implicit \end_inset arguments are automatically made available as implicit values in the scope - of a function's body. + of the function's body. The code of \begin_inset listings inline true @@ -4200,7 +4256,7 @@ avg[T] \end_inset - can then be written as: + can then be rewritten as: \begin_inset listings inline false status open @@ -4280,7 +4336,7 @@ companion object \end_inset - of the typeclass (i.e., the Scala + of the typeclass's constructor (i.e., a Scala \begin_inset listings inline true status open @@ -4292,7 +4348,7 @@ object \end_inset - with the same name as the type constructor): + with the same name as the type constructor representing the typeclass): \begin_inset listings inline false status open @@ -4309,7 +4365,7 @@ final case class Frac[T](add: (T, T) => T, intdiv: (T, Int) => T) \begin_layout Plain Layout object Frac { // The companion object of `Frac[T]` creates some typeclass - instances as `implicit`. + instances as `implicit` values. \end_layout \begin_layout Plain Layout @@ -4367,7 +4423,7 @@ Frac \end_inset - (as well as within the companion object of the type + (as well as within the companion object of the given type \begin_inset listings inline true status open @@ -4430,7 +4486,8 @@ function \begin_inset Quotes erd \end_inset - syntax: arguments are to the right of the function as in + syntax: arguments are to the right of the function. + Examples are: \begin_inset listings inline true status open @@ -4442,7 +4499,7 @@ plus(x, y) \end_inset - or + and \begin_inset listings inline true status open @@ -4503,11 +4560,7 @@ infix method \begin_inset Quotes erd \end_inset - syntax (only applies to functions with two -\emph on -explicit -\emph default - arguments): no dot character is used. + syntax: no dot character is used. For example, \begin_inset listings inline true @@ -5478,7 +5531,7 @@ HasMetadata \end_inset - and declare instances only for + and declare instances for the types \begin_inset listings inline true status open @@ -5783,7 +5836,7 @@ Data4 \end_inset -, to the type domain, we will need to declare a new typeclass instance as +, to the type domain, we will need to create a new typeclass instance as an implicit value of type \begin_inset listings inline true @@ -6135,7 +6188,7 @@ status open \begin_layout Plain Layout -def bump[C]()(...) = ??? +def bump[C](...)(implicit ...) = ??? \end_layout \begin_layout Plain Layout @@ -6695,7 +6748,7 @@ implicit def defaultFunc[A]: HasDefault[A => A] = HasDefault[A => A](identity) \end_layout \begin_layout Standard -Types that have default values are also called +Types with default values are also called \series bold pointed \series default @@ -6720,7 +6773,7 @@ types!pointed \end_inset types. - This book defines the typeclass + But this book defines the typeclass \begin_inset listings inline true status open @@ -6750,7 +6803,7 @@ noprefix "false" \end_inset -) rather than for pointed types. +), not for pointed types. \end_layout \begin_layout Subsubsection @@ -6928,7 +6981,7 @@ List[A] \end_inset -. + by using a typeclass. \end_layout \begin_layout Subparagraph @@ -6958,7 +7011,7 @@ final case class Semigroup[T](combine: (T, T) => T) \end_inset The typeclass instances for the supported types are defined using a short - syntax as: + syntax: \begin_inset listings inline false status open @@ -7717,7 +7770,20 @@ Similarly, the definition \begin_inset Formula $x\oplus y\triangleq y$ \end_inset - gives an associative binary operation for a (different) semigroup. + gives an associative binary operation for a (different) semigroup, based + on the same type +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +T +\end_layout + +\end_inset + +. \end_layout \begin_layout Standard @@ -8417,19 +8483,19 @@ def monoidOf[T: Semigroup : HasDefault]: Monoid[T] = \end_inset -We can also define this function as an +If this function is defined as \begin_inset listings inline true status open \begin_layout Plain Layout -implicit +implicit def \end_layout \end_inset -, so that every type +, every type \begin_inset listings inline true status open @@ -8729,6 +8795,10 @@ Monoid \end_layout \begin_layout Standard +\begin_inset Note Note +status open + +\begin_layout Plain Layout Are there alternative implementations of the \begin_inset listings inline true @@ -8867,6 +8937,11 @@ This implementation defines the monoid operation as , violating one of the identity laws. \end_layout +\end_inset + + +\end_layout + \begin_layout Subsection Typeclasses for type constructors \begin_inset CommandInset label @@ -8916,32 +8991,19 @@ def inject[F[_]: Functor, A, B](a: A, f: F[B]): F[(A, B)] = ??? \end_inset -The +The syntax \begin_inset listings inline true status open \begin_layout Plain Layout -\noindent -Functor -\end_layout - -\end_inset - - typeclass implementing this constraint will use the syntax -\begin_inset listings -inline true -status open - -\begin_layout Plain Layout - -Functor[F[_]] +F[_] \end_layout \end_inset - because the type parameter + indicates that the type parameter \begin_inset listings inline true status open @@ -9054,15 +9116,7 @@ itself \end_inset . - In Scala 2, we need to represent such -\begin_inset Quotes eld -\end_inset - -nested -\begin_inset Quotes erd -\end_inset - - type quantifiers by writing a + In Scala 2, we have to replace nested type quantifiers by a \begin_inset listings inline true status open @@ -9146,7 +9200,7 @@ F[_] \end_inset -, which must be itself a type constructor. +, which is a type constructor. For any type constructor \begin_inset listings inline true @@ -9353,7 +9407,7 @@ abc \begin_inset Quotes erd \end_inset -, Seq(1, 2, 3)) // An implicit Functor[Seq] must be in scope. +, Seq(1, 2, 3)) // Need an implicit Functor[Seq] here. \end_layout \begin_layout Plain Layout @@ -9363,7 +9417,7 @@ res0: Seq[(String, Int)] = List(("abc", 1), ("abc", 2), ("abc", 3)) \end_inset - Just like the + Similarly to the \begin_inset listings inline true status open @@ -9388,7 +9442,7 @@ Functor \end_inset typeclass does not enforce the functor laws on the implementation. - It is the programmer responsibility to verify that the laws hold. + It is the programmer's responsibility to verify that the laws hold. \end_layout \begin_layout Standard @@ -9467,7 +9521,7 @@ status open \begin_layout Plain Layout -F[_] +F \end_layout \end_inset @@ -9566,16 +9620,17 @@ The scalacheck \family default library will substitute a large number of random values into the given - assertions, Note that the laws are being tested only with a finite number - of values and with type parameters set to specific types. + assertions. + But the laws will be tested only with a finite number of values and with + type parameters set to specific types. While it is useful to test laws with \family typewriter scalacheck \family default (we might find a bug), only a symbolic derivation provides a rigorous proof that the laws hold. - One of the main themes of this book is to show how to perform symbolic - derivations efficiently. + One of the main themes of this book is to show how to perform such symbolic + derivations. \end_layout \begin_layout Section @@ -9661,7 +9716,7 @@ If so, derive the code for the new typeclass instance without guessing. \end_layout \begin_layout Itemize -Have assurance that the required typeclass laws will hold for newly constructed +Assure that the required typeclass laws will hold for newly constructed instances. \end_layout @@ -9716,7 +9771,7 @@ final case class HasMetadata[T](getName: T => String, getCount: T => Long) \end_inset -In the type notation, this type constructor is written as +In the type notation, this type constructor is written as: \begin_inset Formula \[ \text{HasMetadata}^{T}\triangleq(T\rightarrow\text{String})\times(T\rightarrow\text{Long})\quad. @@ -9803,7 +9858,7 @@ Extractor \begin_inset Formula $Z$ \end_inset -, we denote the typeclass by +, we denote the typeclass by: \begin_inset Formula \[ \text{Extractor}^{T}\triangleq T\rightarrow Z\quad. @@ -10305,7 +10360,7 @@ Extractor \end_inset type. - For example, the type expression + For example, the type expression: \begin_inset Formula \[ A\times Z+Z\times(P+Z\times Q)+B\times C\times Z @@ -11047,8 +11102,12 @@ f^{:T\rightarrow Z}\rightarrow c^{:C}\times g^{:C\rightarrow Z+P\times T}\righta \end_inset -The new typed hole has a function type. - We can write the code in matrix notation as: +The new typed hole has a function type and is filled using +\begin_inset Formula $f^{:T\rightarrow Z}$ +\end_inset + +. + Then: \begin_inset Formula \[ \text{extractorS}\triangleq f^{:T\rightarrow Z}\rightarrow c^{:C}\times g^{:C\rightarrow Z+P\times T}\rightarrow g(c)\triangleright\,\begin{array}{|c||c|} @@ -11212,8 +11271,7 @@ abc \begin_layout Plain Layout -scala> extractorT.extract(t) // The recursive definition of `extractorT` - terminates. +scala> extractorT.extract(t) // The recursion in extractorT terminates. \end_layout \begin_layout Plain Layout @@ -11271,12 +11329,12 @@ def x: Int = f(x) \begin_layout Plain Layout -scala> x // Infinite loop: f(f(f(f(...))) +scala> x // Infinite loop: f(f(f(f(...))) \end_layout \begin_layout Plain Layout -java.lang.StackOverflowError +java.lang.StackOverflowError: ... \end_layout \end_inset @@ -11454,7 +11512,7 @@ scala> f // Infinite loop: k(k(k(k(...))) \begin_layout Plain Layout -java.lang.StackOverflowError +java.lang.StackOverflowError: ... \end_layout \begin_layout Plain Layout @@ -11468,7 +11526,7 @@ scala> f(4) // Infinite loop: k(k(k(k(...)))(4) \begin_layout Plain Layout -java.lang.StackOverflowError +java.lang.StackOverflowError: ... \end_layout \end_inset @@ -11509,8 +11567,7 @@ geq 0$. \begin_layout Plain Layout -scala> f // We can compute f without an infinite - loop. +scala> f // We can compute f without an infinite loop. \end_layout \begin_layout Plain Layout @@ -11524,8 +11581,7 @@ res0: Int => Int = \begin_layout Plain Layout -scala> f(4) // We can compute f(4) without an - infinite loop. +scala> f(4) // We can compute f(4) without an infinite loop. \end_layout \begin_layout Plain Layout @@ -12514,8 +12570,21 @@ noprefix "false" \end_inset - below) will show which types can have a function that compares values for - equality. + on page +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand pageref +reference "tab:Type-constructions-for-Eq" +plural "false" +caps "false" +noprefix "false" + +\end_inset + +) will show which types can have a function that compares values for equality. \end_layout \begin_layout Paragraph @@ -12615,7 +12684,7 @@ Eq \end_inset - instances and performs the comparisons + instances, which are used to perform the comparisons \begin_inset listings inline true status open @@ -13119,7 +13188,8 @@ def equals[R, A: Eq](f: R => A, g: R => A): Boolean = f(r1) === g(r1) // \end_inset The above code defines a comparison operation that violates the identity - law: there are many functions + law. + There are many unequal functions \begin_inset listings inline true status open @@ -13143,7 +13213,7 @@ g \end_inset - that will give different results for arguments not equal to + that will give the same results when applied to \begin_inset Formula $r_{1}$ \end_inset @@ -13196,7 +13266,7 @@ Eq \end_inset . - This process will end only if the type + This procedure will end only if the type \begin_inset Formula $R$ \end_inset @@ -13352,42 +13422,38 @@ By definition, two functions (say, \end_inset . - Functions have no -\begin_inset listings -inline true + As a rule, we need to use +\begin_inset Index idx status open \begin_layout Plain Layout - -Eq +symbolic calculations \end_layout \end_inset - typeclass instances, so we cannot write code that checks at run time whether - -\begin_inset Formula $f=g$ -\end_inset -. - As a rule, we need to use \emph on +symbolic calculations +\emph default + if we want to prove equality between functions. + We cannot write code that tests at run time whether +\begin_inset Formula $f=g$ +\end_inset -\begin_inset Index idx +, because functions have no +\begin_inset listings +inline true status open \begin_layout Plain Layout -\emph on -symbolic calculations +Eq \end_layout \end_inset -symbolic calculations -\emph default - if we want to prove equality between functions. - + typeclass instances. \end_layout \begin_layout Paragraph @@ -13442,7 +13508,7 @@ Consider a recursive polynomial type \end_inset defined using a polynomial functor -\begin_inset Formula $S^{\bullet}$ +\begin_inset Formula $S$ \end_inset : @@ -13454,7 +13520,7 @@ T\triangleq S^{T}\quad. \end_inset The functor -\begin_inset Formula $S^{\bullet}$ +\begin_inset Formula $S$ \end_inset may use other fixed types that have @@ -13494,7 +13560,7 @@ status open \begin_layout Plain Layout -eqS +eqS[A] \end_layout \end_inset @@ -13515,7 +13581,7 @@ eqS of type: \begin_inset Formula \[ -\text{eqS}:\forall A.\,\text{Eq}^{A}\rightarrow\text{Eq}^{S^{A}}\quad. +\text{eqS}^{A}:\text{Eq}^{A}\rightarrow\text{Eq}^{S^{A}}\quad. \] \end_inset @@ -13552,14 +13618,14 @@ Eq \begin_inset Formula $A$ \end_inset - and of all other types that + and (possibly) of some other types that \begin_inset Formula $S^{A}$ \end_inset depends on. The product and co-product constructions guarantee that it is always possible to implement this function for a polynomial functor -\begin_inset Formula $S^{\bullet}$ +\begin_inset Formula $S$ \end_inset . @@ -13698,7 +13764,7 @@ times A $. \begin_layout Plain Layout - eqEither[Either[Int, A], (Int, (A, A))] // Instance for $ + eqEither[Either[Int, A], (Int, (A, A))] // For $ \backslash color{dkgreen} \backslash @@ -13764,7 +13830,7 @@ bbnum 0)^{ \backslash scriptscriptstyle:T}+ \backslash -bbnum 0$. +bbnum 0$ \end_layout \begin_layout Plain Layout @@ -14588,6 +14654,20 @@ plural "false" caps "false" noprefix "false" +\end_inset + + on page +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand pageref +reference "tab:Type-constructions-for-semigroup" +plural "false" +caps "false" +noprefix "false" + \end_inset . @@ -14814,7 +14894,7 @@ status open \begin_layout Plain Layout -semigroupPair +semigroupPair[A, B] \end_layout \end_inset @@ -14822,7 +14902,7 @@ semigroupPair : \begin_inset Formula \[ -\text{semigroupPair}:\forall(A,B).\,\text{Semigroup}^{A}\times\text{Semigroup}^{B}\rightarrow\text{Semigroup}^{A\times B}\quad. +\text{semigroupPair}^{A,B}:\text{Semigroup}^{A}\times\text{Semigroup}^{B}\rightarrow\text{Semigroup}^{A\times B}\quad. \] \end_inset @@ -14830,7 +14910,7 @@ semigroupPair Writing out the type expressions, we get the type signature: \begin_inset Formula \[ -\text{semigroupPair}:\forall(A,B).\,\left(A\times A\rightarrow A\right)\times\left(B\times B\rightarrow B\right)\rightarrow\left(A\times B\times A\times B\rightarrow A\times B\right)\quad. +\text{semigroupPair}^{A,B}:\left(A\times A\rightarrow A\right)\times\left(B\times B\rightarrow B\right)\rightarrow\left(A\times B\times A\times B\rightarrow A\times B\right)\quad. \] \end_inset @@ -16120,7 +16200,7 @@ Unit \end_inset - type, or other fixed type + type or other fixed type \begin_inset Formula $C$ \end_inset @@ -16370,6 +16450,20 @@ plural "false" caps "false" noprefix "false" +\end_inset + + on page +\begin_inset space ~ +\end_inset + + +\begin_inset CommandInset ref +LatexCommand pageref +reference "tab:Type-constructions-for-monoid" +plural "false" +caps "false" +noprefix "false" + \end_inset . @@ -16467,7 +16561,7 @@ trivial \end_inset are not compatible with monoid's identity laws. - (e.g., with the definition + (E.g., with the definition \begin_inset Formula $x\oplus y=x$ \end_inset @@ -16645,7 +16739,7 @@ action \end_inset . - The operation is defined as + The operation is defined as: \begin_inset Formula \[ (s_{1}\times p_{1})\oplus(s_{2}\times p_{2})=(s_{1}\oplus_{S}s_{2})\times\alpha(s_{2})(p_{1})\quad. @@ -16672,7 +16766,7 @@ Examples of this construction are \begin_inset Formula $S$ \end_inset -, and the second part of the pair is +, and the second part of the pair is: \begin_inset Formula \[ \alpha(s_{2}\oplus_{S}s_{3})(p_{1})=\alpha(s_{3})(\alpha(s_{2})(p_{1}))\quad. @@ -16983,11 +17077,15 @@ noprefix "false" \end_inset ). - We could also use the backward function composition ( + Alternatively, we could use the +\emph on +backward +\emph default + function composition ( \begin_inset Formula $f\circ g$ \end_inset -) to define +) to define a (different) operation \begin_inset Formula $f\oplus g$ \end_inset @@ -17020,7 +17118,7 @@ Monoid \end_inset , where -\begin_inset Formula $S^{\bullet}$ +\begin_inset Formula $S$ \end_inset is some type constructor? As we have seen, products, co-products, and function @@ -17039,7 +17137,7 @@ Monoid instance can be derived. These constructions cover all exponential-polynomial types. - So, let us consider an exponential-polynomial type constructor + So, let us consider an exponential-polynomial type expression \begin_inset Formula $S^{A}$ \end_inset @@ -17049,7 +17147,7 @@ Monoid , primitive types, and other known monoid types. For such type constructors -\begin_inset Formula $S^{\bullet}$ +\begin_inset Formula $S$ \end_inset , we will always be able to implement a function @@ -17087,7 +17185,7 @@ Monoid : \begin_inset Formula \[ -\text{monoidS}:\text{Monoid}^{A}\rightarrow\text{Monoid}^{S^{A}}\quad. +\text{monoidS}^{A}:\text{Monoid}^{A}\rightarrow\text{Monoid}^{S^{A}}\quad. \] \end_inset @@ -17208,12 +17306,11 @@ monoidFunc \end_layout \begin_layout Standard -To illustrate how that works, consider the exponential-polynomial type construct -or -\begin_inset Formula $S^{\bullet}$ +To illustrate how that works, consider the following recursion scheme +\begin_inset Formula $S$ \end_inset - defined as: +: \begin_inset Formula \[ S^{A}\triangleq\left(\text{Int}+A\right)\times\text{Int}+\text{String}\times\left(A\rightarrow\left(A\rightarrow\text{Int}\right)\rightarrow A\right)\quad. @@ -17238,7 +17335,7 @@ type S[A] = Either[(Either[Int, A], Int), (String, A => (A => Int) => A)] \begin_layout Standard It is clear that -\begin_inset Formula $S^{A}$ +\begin_inset Formula $S$ \end_inset is built up from type constructions that preserve monoids at each step. @@ -17998,7 +18095,7 @@ action \end_inset . - The monoidal operation is defined by + The monoidal operation is defined by: \begin_inset Formula \[ (p_{1}\times q_{1})\oplus(p_{2}\times q_{2})=(p_{1}\oplus p_{2})\times(\alpha(p_{2})(q_{1})\oplus q_{2})\quad. @@ -18081,7 +18178,7 @@ Unit \end_inset - type, or primitive types + type or other primitive types \end_layout \end_inset @@ -18530,8 +18627,7 @@ status open \begin_layout Plain Layout -val x: Option[Int] = Some(10) // A non-empty option - that holds a value. +val x: Option[Int] = Some(10) // A non-empty option that holds a value. \end_layout \begin_layout Plain Layout @@ -18544,13 +18640,12 @@ abc \begin_inset Quotes erd \end_inset -) // A list that holds a single value. +) // A list that holds a single value. \end_layout \begin_layout Plain Layout -val z: Try[Int] = Success(200) // A value computed without - errors. +val z: Try[Int] = Success(200) // A value computed without errors. \end_layout \begin_layout Plain Layout @@ -18563,7 +18658,7 @@ OK \begin_inset Quotes erd \end_inset -) // A `Future` value that is already available. +) // A ready `Future` value. \end_layout \end_inset @@ -18687,7 +18782,7 @@ status open \begin_layout Plain Layout -trait PointedF[F[_]] { def pure[A]: A => F[A] } +trait Pointed[F[_]] { def pure[A]: A => F[A] } \end_layout \end_inset @@ -18711,19 +18806,19 @@ status open \begin_layout Plain Layout -implicit val pointedOption = new PointedF[Option] { def pure[A]: A => Option[A] +implicit val pointedOption = new Pointed[Option] { def pure[A]: A => Option[A] = x => Some(x) } \end_layout \begin_layout Plain Layout -implicit val pointedList = new PointedF[List] { def pure[A]: A => List[A] +implicit val pointedList = new Pointed[List] { def pure[A]: A => List[A] = x => List(x) } \end_layout \begin_layout Plain Layout -implicit val pointedTry = new PointedF[Try] { def pure[A]: A => Try[A] +implicit val pointedTry = new Pointed[Try] { def pure[A]: A => Try[A] = x => Success(x) } \end_layout @@ -18748,12 +18843,7 @@ status open \begin_layout Plain Layout -def pure[F[_]: PointedF, A](x: A): F[A] = -\end_layout - -\begin_layout Plain Layout - - implicitly[PointedF[F]].pure(x) +def pure[F[_]: Pointed, A](x: A): F[A] = implicitly[Pointed[F]].pure(x) \end_layout \begin_layout Plain Layout @@ -18995,11 +19085,11 @@ In the \begin_inset Formula $\triangleright$ \end_inset --notation, this law is +-notation, this law is written as \begin_inset Formula $x\triangleright\text{pu}_{F}\triangleright f^{\uparrow F}=x\triangleright f\triangleright\text{pu}_{F}$ \end_inset - or equivalently + or equivalently as \begin_inset Formula $x\triangleright\text{pu}_{F}\bef f^{\uparrow F}=x\triangleright f\bef\text{pu}_{F}$ \end_inset @@ -19016,7 +19106,7 @@ In the and write: \begin_inset Formula \begin{equation} -\text{pu}_{F}\bef f^{\uparrow F}=f\bef\text{pu}_{F}\quad.\label{eq:naturality-law-of-pure} +\text{for any }f^{:A\rightarrow B}:\quad\quad\text{pu}_{F}\bef f^{\uparrow F}=f\bef\text{pu}_{F}\quad.\label{eq:naturality-law-of-pure} \end{equation} \end_inset @@ -19067,11 +19157,7 @@ noprefix "false" \end_inset -) for any function -\begin_inset Formula $f^{:A\rightarrow B}$ -\end_inset - -. +). \end_layout \begin_layout Standard @@ -19137,11 +19223,11 @@ Unit . Both sides of the naturality law may then be applied to the unit value - + ( \begin_inset Formula $1$ \end_inset - and must evaluate to the same result: +) and must evaluate to the same result: \begin_inset Formula \[ 1\triangleright\text{pu}_{F}\triangleright(\_\rightarrow b)^{\uparrow F}=1\triangleright f\triangleright\text{pu}_{F}\quad. @@ -19211,7 +19297,7 @@ noprefix "false" \begin_inset Formula $\text{pu}_{F}$ \end_inset - through one value + through the value \begin_inset Formula $\text{pu}_{F}(1)$ \end_inset @@ -19220,7 +19306,7 @@ noprefix "false" \end_inset . - This value can be viewed as a + That value can be viewed as a \begin_inset Quotes eld \end_inset @@ -19251,7 +19337,7 @@ wrapped unit \end_layout \begin_layout Standard -To perform the same derivation in Scala syntax, we may write +To perform the same derivation in the Scala syntax, we may write: \begin_inset listings inline false status open @@ -19533,11 +19619,11 @@ type equivalence \end_inset : each one can be converted into the other and back without loss of information - (as long as we assume the naturality law of + (as long as the naturality law of \begin_inset Formula $\text{pu}_{F}$ \end_inset -). + holds). We may define a pointed functor equivalently as a functor with a chosen value \begin_inset Formula $\text{wu}_{F}$ @@ -19632,7 +19718,7 @@ val wu2: F[Unit] = Some(((), ())) \begin_inset Formula \[ -\text{wu}_{1F}\triangleq\bbnum 1+\bbnum 0^{:\bbnum 1\times\bbnum 1}\quad,\quad\quad\text{wu}_{2F}\triangleq\bbnum 0+1\times1\quad. +\text{wu}_{1F}\triangleq1+\bbnum 0^{:\bbnum 1\times\bbnum 1}\quad,\quad\quad\text{wu}_{2F}\triangleq\bbnum 0+1\times1\quad. \] \end_inset @@ -19969,7 +20055,7 @@ Pointed \end_inset - instance that will be useful for the application at hand. + instance that will be useful in the application at hand. In the case of \begin_inset listings inline true @@ -19982,7 +20068,7 @@ List \end_inset -, the standard choice +, the standard definitions \begin_inset listings inline true status open @@ -20006,7 +20092,7 @@ pure(x) = List(x) \end_inset - is motivated by the usage of the + are motivated by the usage of the \begin_inset listings inline true status open @@ -20019,7 +20105,7 @@ List \end_inset functor to represent a choice of possibilities, e.g., in a search problem. - Then the + Then a \begin_inset Quotes eld \end_inset @@ -20155,8 +20241,8 @@ status open \begin_layout Plain Layout -def pointedPair[F[_]: Pointed, G[_]: Pointed]: Pointed[L] // Does not - work in Scala. +def pointedPair[F[_]: Pointed, G[_]: Pointed]: Pointed[L] // Does not + work. \end_layout \end_inset @@ -20198,7 +20284,11 @@ G \end_inset - that are defined only within the type signature of the function. + that are defined only +\emph on +within +\emph default + the type signature of the function. To achieve that, we would need somehow to insert a new type alias declaration within the type signature of \begin_inset listings @@ -20219,19 +20309,18 @@ status open \begin_layout Plain Layout -def pointedPair[F[_]: Pointed, G[_]: Pointed]: ( // Not a valid - Scala syntax. +def pointedPair[F[_]: Pointed, G[_]: Pointed]: ( // Not a valid syntax: \end_layout \begin_layout Plain Layout - type L[A] = (F[A], G[A]) // Temporarily define a type constructor L, - and now use it: + type L[A] = (F[A], G[A]) // Temporarily define a type constructor + L. \end_layout \begin_layout Plain Layout - Pointed[L] ) + Pointed[L] ) // Use L here. \end_layout \end_inset @@ -20402,7 +20491,7 @@ literal "false" adds syntax for nameless type constructors. The syntax is similar to defining a nameless function: for instance, the pair functor -\begin_inset Formula $F^{\bullet}\times G^{\bullet}$ +\begin_inset Formula $F\times G$ \end_inset is defined as @@ -20418,7 +20507,7 @@ Lambda[X => (F[X], G[X])] \end_inset . - Such type expressions can be understood as nameless + Such type expressions represent nameless \begin_inset Index idx status open @@ -20704,14 +20793,18 @@ status open \begin_layout Plain Layout -def pointedFoG[F[_]: Pointed : Functor, G[_]: Pointed]: Pointed[Lambda[X - => F[G[X]]]] +def pointedFoG[F[_]: Pointed : Functor, G[_]: Pointed] +\end_layout + +\begin_layout Plain Layout + + : Pointed[Lambda[X => F[G[X]]]] \end_layout \begin_layout Plain Layout - = Pointed[Lambda[X => F[G[X]]]](pure[F, G[Unit]](implicitly[Pointed[G]].wu)) - + = Pointed[Lambda[X => F[G[X]]]](implicitly[Pointed[F]].pure[F, G[Unit]](implici +tly[Pointed[G]].wu)) \end_layout \end_inset @@ -20816,13 +20909,17 @@ status open \begin_layout Plain Layout -def pointedCFoG[F[_]: Pointed : Contrafunctor, G[_]]: Pointed[Lambda[X => - F[G[X]]]] = +def pointedCFoG[F[_]: Pointed : Contrafunctor, G[_]] +\end_layout + +\begin_layout Plain Layout + + : Pointed[Lambda[X => F[G[X]]]] \end_layout \begin_layout Plain Layout - Pointed[Lambda[X => F[G[X]]]](cpure[F, G[Unit]]) + = Pointed[Lambda[X => F[G[X]]]](cpure[F, G[Unit]]) \end_layout \end_inset @@ -20871,14 +20968,18 @@ status open \begin_layout Plain Layout -def pointedFxG[F[_]: Pointed, G[_]: Pointed]: Pointed[Lambda[X => (F[X],G[X])]] - = +def pointedFxG[F[_]: Pointed, G[_]: Pointed] \end_layout \begin_layout Plain Layout - Pointed[Lambda[X => (F[X],G[X])]]((implicitly[Pointed[F]].wu, implicitly[Pointe -d[G]].wu)) + : Pointed[Lambda[X => (F[X],G[X])]] +\end_layout + +\begin_layout Plain Layout + + = Pointed[Lambda[X => (F[X],G[X])]]((implicitly[Pointed[F]].wu, implicitly[Poin +ted[G]].wu)) \end_layout \end_inset @@ -20925,7 +21026,7 @@ If \end_inset , both making -\begin_inset Formula $L^{\bullet}$ +\begin_inset Formula $L$ \end_inset a pointed functor. @@ -20934,7 +21035,7 @@ If \begin_layout Standard It is sufficient if just -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is a pointed functor: @@ -20954,11 +21055,11 @@ Pointed \end_inset typeclass instance for -\begin_inset Formula $F^{\bullet}+G^{\bullet}$ +\begin_inset Formula $F+G$ \end_inset , even if -\begin_inset Formula $G^{\bullet}$ +\begin_inset Formula $G$ \end_inset is not pointed. @@ -20968,13 +21069,17 @@ status open \begin_layout Plain Layout -def pointedEitherFG[F[_]: Pointed, G[_]]: Pointed[Lambda[X => Either[F[X],G[X]]] -] = +def pointedEitherFG[F[_]: Pointed, G[_]] \end_layout \begin_layout Plain Layout - Pointed[Lambda[X => Either[F[X],G[X]]]](Left(implicitly[Pointed[F]].wu)) + : Pointed[Lambda[X => Either[F[X],G[X]]]] +\end_layout + +\begin_layout Plain Layout + + = Pointed[Lambda[X => Either[F[X],G[X]]]](Left(implicitly[Pointed[F]].wu)) \end_layout \end_inset @@ -21067,11 +21172,11 @@ noprefix "false" \end_inset ) assumes a bifunctor -\begin_inset Formula $S^{\bullet,\bullet}$ +\begin_inset Formula $S$ \end_inset and defines a recursive functor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset via the type equation @@ -21080,7 +21185,7 @@ noprefix "false" . The functor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset will be pointed if we can compute a value @@ -21564,7 +21669,7 @@ The type \end_inset from scratch. - The function type + Values of type \begin_inset Formula $(\bbnum 1+A\rightarrow\text{Int})\rightarrow A\times\bbnum 1$ \end_inset @@ -21712,7 +21817,7 @@ Composition of pointed (contra)functors \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{F^{\bullet}}\times\text{Pointed}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{G^{\bullet}}}$ +\begin_inset Formula $\text{Pointed}^{F}\times\text{Pointed}^{G}\rightarrow\text{Pointed}^{F\circ G}$ \end_inset @@ -21747,7 +21852,7 @@ Product of pointed functors \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{F^{\bullet}}\times\text{Pointed}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}\times G^{\bullet}}$ +\begin_inset Formula $\text{Pointed}^{F}\times\text{Pointed}^{G}\rightarrow\text{Pointed}^{F\times G}$ \end_inset @@ -21782,7 +21887,7 @@ Co-product of a pointed functor \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{F^{\bullet}}\times\text{Functor}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}+G^{\bullet}}$ +\begin_inset Formula $\text{Pointed}^{F}\times\text{Functor}^{G}\rightarrow\text{Pointed}^{F+G}$ \end_inset @@ -21817,7 +21922,7 @@ Function from any \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{F^{\bullet}}\times\text{Contrafunctor}^{C^{\bullet}}\rightarrow\text{Pointed}^{C^{\bullet}\rightarrow F^{\bullet}}$ +\begin_inset Formula $\text{Pointed}^{F}\times\text{Contrafunctor}^{C}\rightarrow\text{Pointed}^{C\rightarrow F}$ \end_inset @@ -21844,7 +21949,7 @@ Recursive type \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{F^{\bullet}}\rightarrow\text{Pointed}^{S^{\bullet,F^{\bullet}}}$ +\begin_inset Formula $\text{Pointed}^{F}\rightarrow\text{Pointed}^{S^{\bullet,F}}$ \end_inset where @@ -21993,7 +22098,7 @@ Copointed \end_inset - as + as: \begin_inset listings inline false status open @@ -22038,7 +22143,7 @@ extract \series bold naturality law \series default - (compare to Eq. + (compare with Eq. \begin_inset space ~ \end_inset @@ -22213,7 +22318,7 @@ extract \end_inset and -\begin_inset Formula $f^{:\bbnum 1\rightarrow B}\triangleq(1\rightarrow b)$ +\begin_inset Formula $f^{:\bbnum 1\rightarrow B}\triangleq(\_\rightarrow b)$ \end_inset in the naturality law, both sides will become functions of type @@ -22545,7 +22650,7 @@ If functors is co-pointed). The functor product -\begin_inset Formula $F^{\bullet}\times G^{\bullet}$ +\begin_inset Formula $F\times G$ \end_inset is then made into a co-pointed functor. @@ -22930,7 +23035,7 @@ cpure Finally, we have: \begin_inset Formula \begin{align} - & \text{ex}_{L}\triangleq h^{:C^{A}\rightarrow P^{A}}\rightarrow\text{ex}_{P}(h(\text{cpu}_{C}))\label{eq:def-of-ex-for-C-mapsto-P}\\ + & \text{ex}_{L}\triangleq h^{:C^{A}\rightarrow P^{A}}\rightarrow\text{ex}_{P}(h(\text{cpu}_{C}))\quad,\label{eq:def-of-ex-for-C-mapsto-P}\\ \text{ or equivalently}:\quad & h^{:C^{A}\rightarrow P^{A}}\triangleright\text{ex}_{L}=\text{cpu}_{C}\triangleright h\triangleright\text{ex}_{P}\quad.\nonumber \end{align} @@ -22951,7 +23056,11 @@ To verify the naturality law, we apply both sides to an arbitrary \end_inset -We expect the last expression to equal this one: +We expect the last expression to equal +\begin_inset Formula $h\triangleright\text{ex}_{L}\bef f$ +\end_inset + +, which is rewritten as: \begin_inset Formula \[ h\triangleright\text{ex}_{L}\bef f=\text{cpu}_{C}\triangleright h\triangleright\text{ex}_{P}\triangleright f=\text{cpu}_{C}\triangleright h\bef\text{ex}_{P}\bef f\quad. @@ -22959,8 +23068,8 @@ h\triangleright\text{ex}_{L}\bef f=\text{cpu}_{C}\triangleright h\triangleright\ \end_inset -This is possible only if -\begin_inset Formula $\text{cpu}_{C}\triangleright f^{\downarrow C}=\text{pu}_{C}$ +The equality is possible only if +\begin_inset Formula $\text{cpu}_{C}\triangleright f^{\downarrow C}=\text{cpu}_{C}$ \end_inset for all @@ -23031,13 +23140,13 @@ color{dkgreen} h \backslash rightarrow \backslash -text{pu}_{C} +text{cpu}_{C} \backslash triangleright h \backslash triangleright \backslash -text{ex}_{P} }$ +text{ex}_{P} }$. \end_layout \end_inset @@ -23206,7 +23315,7 @@ Assuming that by recursion: \begin_inset Formula \begin{align*} - & \text{ex}_{F}\triangleq s^{:S^{A,F^{A}}}\rightarrow s\triangleright\big(\text{bimap}_{S}(\text{id})(\text{ex}_{F})\big)\triangleright\text{ex}_{S}\\ + & \text{ex}_{F}\triangleq s^{:S^{A,F^{A}}}\rightarrow s\triangleright\big(\text{bimap}_{S}(\text{id})(\text{ex}_{F})\big)\triangleright\text{ex}_{S}\quad,\\ \text{ or equivalently}:\quad & \text{ex}_{F}\triangleq\text{bimap}_{S}(\text{id})(\text{ex}_{F})\bef\text{ex}_{S}\quad. \end{align*} @@ -23519,7 +23628,7 @@ It remains to consider is pointed. A contrafunctor -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset is pointed if values of type @@ -23673,7 +23782,7 @@ Composition of co-pointed functors \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Copointed}^{F^{\bullet}}\times\text{Copointed}^{G^{\bullet}}\rightarrow\text{Copointed}^{F^{G^{\bullet}}}$ +\begin_inset Formula $\text{Copointed}^{F}\times\text{Copointed}^{G}\rightarrow\text{Copointed}^{F\circ G}$ \end_inset @@ -23708,7 +23817,7 @@ Product of co-pointed functor \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Copointed}^{F^{\bullet}}\times\text{Functor}^{G^{\bullet}}\rightarrow\text{Copointed}^{F^{\bullet}\times G^{\bullet}}$ +\begin_inset Formula $\text{Copointed}^{F}\times\text{Functor}^{G}\rightarrow\text{Copointed}^{F\times G}$ \end_inset @@ -23743,7 +23852,7 @@ Co-product of co-pointed functors \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Copointed}^{F^{\bullet}}\times\text{Copointed}^{G^{\bullet}}\rightarrow\text{Copointed}^{F^{\bullet}+G^{\bullet}}$ +\begin_inset Formula $\text{Copointed}^{F}\times\text{Copointed}^{G}\rightarrow\text{Copointed}^{F+G}$ \end_inset @@ -23778,7 +23887,7 @@ Function from pointed \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{\text{Pointed}^{C^{\bullet}}}\times\text{Copointed}^{F^{\bullet}}\rightarrow\text{Copointed}^{C^{\bullet}\rightarrow F^{\bullet}}$ +\begin_inset Formula $\text{\text{Pointed}^{C}}\times\text{Copointed}^{F}\rightarrow\text{Copointed}^{C\rightarrow F}$ \end_inset @@ -23805,7 +23914,7 @@ Recursive type \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Copointed}^{F^{\bullet}}\rightarrow\text{Copointed}^{S^{\bullet,F^{\bullet}}}$ +\begin_inset Formula $\text{Copointed}^{F}\rightarrow\text{Copointed}^{S^{\bullet,F}}$ \end_inset where @@ -23875,7 +23984,7 @@ name "subsec:Pointed-contrafunctors" \begin_layout Standard In the previous section, the function-type construction required a contrafunctor -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset to have a method @@ -24032,7 +24141,7 @@ noprefix "false" \end_inset So, a pointed contrafunctor instance for -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset is just a chosen value of type @@ -24185,12 +24294,12 @@ Since the identity functor \begin_inset Formula $F^{C^{A}}$ \end_inset - where -\begin_inset Formula $C^{\bullet}$ +, where +\begin_inset Formula $C$ \end_inset is a contrafunctor and -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is a functor. @@ -24224,14 +24333,18 @@ If can be computed). So, -\begin_inset Formula $C^{F^{\bullet}}$ +\begin_inset Formula $C\circ F$ \end_inset is a pointed contrafunctor whenever -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset - is one, for any (not necessarily pointed) functor + is one, for +\emph on +any +\emph default + (not necessarily pointed) functor \begin_inset Formula $F$ \end_inset @@ -24242,13 +24355,17 @@ status open \begin_layout Plain Layout -def pointedCoF[C[_]: Pointed: Contrafunctor, F[_]]: Pointed[Lambda[X => - C[F[X]]]] = +def pointedCoF[C[_]: Pointed: Contrafunctor, F[_]] +\end_layout + +\begin_layout Plain Layout + + : Pointed[Lambda[X => C[F[X]]]] \end_layout \begin_layout Plain Layout - Pointed[Lambda[X => C[F[X]]]](cpure[C, F[Unit]]) + = Pointed[Lambda[X => C[F[X]]]](cpure[C, F[Unit]]) \end_layout \end_inset @@ -24287,15 +24404,15 @@ pure . So, -\begin_inset Formula $F^{C^{\bullet}}$ +\begin_inset Formula $F\circ C$ \end_inset is pointed whenever both -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset and -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset are pointed. @@ -24305,13 +24422,17 @@ status open \begin_layout Plain Layout -def pointedFoC[C[_]: Pointed, F[_]: Pointed : Functor]: Pointed[Lambda[X - => F[C[X]]]] = +def pointedFoC[C[_]: Pointed, F[_]: Pointed : Functor] \end_layout \begin_layout Plain Layout - Pointed[Lambda[X => F[C[X]]]](pure[F, C[Unit]](implicitly[Pointed[C]].wu)) + : Pointed[Lambda[X => F[C[X]]]] +\end_layout + +\begin_layout Plain Layout + + = Pointed[Lambda[X => F[C[X]]]](pure[F, C[Unit]](implicitly[Pointed[C]].wu)) \end_layout \end_inset @@ -24343,11 +24464,11 @@ The construction is the same as for pointed functors: If we have values \end_inset pointed if both -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset and -\begin_inset Formula $D^{\bullet}$ +\begin_inset Formula $D$ \end_inset are pointed contrafunctors. @@ -24360,11 +24481,11 @@ Co-products \begin_layout Standard The construction is the same as for pointed functors: If at least one of the contrafunctors -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset and -\begin_inset Formula $D^{\bullet}$ +\begin_inset Formula $D$ \end_inset is pointed, we can create a @@ -24404,11 +24525,11 @@ The construction is \end_inset , where -\begin_inset Formula $C^{\bullet}$ +\begin_inset Formula $C$ \end_inset is a contrafunctor and -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset is a functor. @@ -24669,7 +24790,7 @@ Composition of functors/contrafunctors \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{F^{\bullet}}\times\text{Pointed}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{G^{\bullet}}}$ +\begin_inset Formula $\text{Pointed}^{F}\times\text{Pointed}^{G}\rightarrow\text{Pointed}^{F\circ G}$ \end_inset @@ -24704,7 +24825,7 @@ Product of contrafunctors \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{F^{\bullet}}\times\text{Pointed}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}\times G^{\bullet}}$ +\begin_inset Formula $\text{Pointed}^{F}\times\text{Pointed}^{G}\rightarrow\text{Pointed}^{F\times G}$ \end_inset @@ -24739,7 +24860,7 @@ Co-product of a pointed \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{F^{\bullet}}\times\text{Contrafunctor}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}+G^{\bullet}}$ +\begin_inset Formula $\text{Pointed}^{F}\times\text{Contrafunctor}^{G}\rightarrow\text{Pointed}^{F+G}$ \end_inset @@ -24770,7 +24891,7 @@ Function type, \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{C^{\bullet}}\times\text{Functor}^{F^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}\rightarrow C^{\bullet}}$ +\begin_inset Formula $\text{Pointed}^{C}\times\text{Functor}^{F}\rightarrow\text{Pointed}^{F\rightarrow C}$ \end_inset @@ -24797,7 +24918,7 @@ Recursive type \begin_layout Plain Layout \size footnotesize -\begin_inset Formula $\text{Pointed}^{C^{\bullet}}\rightarrow\text{Pointed}^{S^{\bullet,C^{\bullet}}}$ +\begin_inset Formula $\text{Pointed}^{C}\rightarrow\text{Pointed}^{S^{\bullet,C}}$ \end_inset , where @@ -24967,7 +25088,7 @@ type F[A] = (A => Int) => A // Define a type constructor. \begin_layout Plain Layout implicit val functorF: Functor[F] = implement // Automatically implement - typeclass instance for F. + a typeclass instance for F. \end_layout \begin_layout Plain Layout @@ -24980,8 +25101,8 @@ implicit val pointedF: Pointed[F] = implement // Automatically use the However, no currently available library provides such functionality. Also, typeclass instances are not always derived uniquely, as we have seen - in several cases (e.g., the co-product construction of monoids or pointed - functors). + in several cases (e.g., the co-product constructions for monoids and for + pointed functors). \end_layout \begin_layout Standard @@ -25521,8 +25642,8 @@ status open \begin_layout Plain Layout -trait HasBitsize[T] { def size: Int } // Declare the trait as `sealed` to - prohibit further instances. +sealed trait HasBitsize[T] { def size: Int } // Declare the trait as `sealed` + to prohibit further instances. \end_layout \begin_layout Plain Layout @@ -26042,7 +26163,7 @@ combine operation for routes. The combined route should respond to all paths that at least one of the - previous routes responds to: + routes responds to: \begin_inset listings inline false status open @@ -26424,8 +26545,7 @@ status open \begin_layout Plain Layout -import $ivy.`org.typelevel::cats-core:1.5.0`, cats.Monoid // Using `ammonite` - for convenience. +import $ivy.`org.typelevel::cats-core:1.5.0`, cats.Monoid // Using `ammonite`. \end_layout \begin_layout Plain Layout @@ -27644,7 +27764,11 @@ Functor \end_inset - instance for recursive type constructor + instance for the recursive type constructor +\begin_inset Formula $Q$ +\end_inset + + defined by \begin_inset Formula $Q^{A}\triangleq\left(\text{Int}\rightarrow A\right)+\text{Int}+Q^{A}$ \end_inset @@ -27946,7 +28070,7 @@ implicit val functorEither = new Functor[L] { \end_inset -We will now to rewrite this code by making +We now need to rewrite this code by making \begin_inset listings inline true status open @@ -27971,7 +28095,7 @@ G \end_inset into type parameters. - To achieve that, we need to use the + For that, we need to use the \begin_inset Quotes eld \end_inset @@ -28165,7 +28289,7 @@ Solution \series bold (a) \series default - We need to implement a function with type signature + We need to implement a function with type signature: \begin_inset Formula \[ \forall(A,B).\,C^{A}+C^{B}\rightarrow C^{A\times B}\quad. @@ -28174,7 +28298,7 @@ Solution \end_inset Begin by looking at the types involved. - We need to relate values + We need to relate values of types \begin_inset Formula $C^{A\times B}$ \end_inset @@ -28466,7 +28590,7 @@ def f[F[_]: Functor, A, B](p: F[(A, B)]): (F[A], F[B]) = \end_inset -A shorter code for +A shorter formula for \begin_inset Formula $f$ \end_inset @@ -28610,12 +28734,12 @@ Implementing the function The resulting type signature is: \begin_inset Formula \[ -(P\rightarrow A)\times(Q\rightarrow B)\rightarrow(P\rightarrow A\times B)+(Q\rightarrow A\times B) +(P\rightarrow A)\times(Q\rightarrow B)\rightarrow(P\rightarrow A\times B)+(Q\rightarrow A\times B)\quad. \] \end_inset -and cannot be implemented by fully parametric code. +This type cannot be implemented by fully parametric code. Indeed, to return a value of type \begin_inset Formula $P\rightarrow A\times B$ \end_inset @@ -28761,7 +28885,7 @@ Russell O'Connor \end_inset - + \begin_inset Foot status open @@ -28808,7 +28932,7 @@ p^{A,B}:A+F^{B}\rightarrow F^{A+B}\quad, \end_inset -additionally satisfying the special laws of identity and associativity: + satisfying the following special laws of identity and associativity: \begin_inset Formula \[ p^{\bbnum 0,B}=(b^{:B}\rightarrow\bbnum 0+b)^{\uparrow F}\quad,\quad\quad p^{A+B,C}=\,\begin{array}{|c||cc|} @@ -28952,12 +29076,8 @@ status open \begin_layout Plain Layout -def p[F[_]: Functor : Pointed, A, B] -\end_layout - -\begin_layout Plain Layout - - : Either[A, F[B]] => F[Either[A, B]] = { +def p[F[_]: Functor : Pointed, A, B]: Either[A, F[B]] => F[Either[A, B]] + = { \end_layout \begin_layout Plain Layout @@ -29015,11 +29135,11 @@ To verify the associativity law, we begin with its right-hand side since it is more complicated: \begin_inset Formula \begin{align*} - & \begin{array}{|c||cc|} +\begin{array}{|c||cc|} & A & F^{B+C}\\ \hline A & \text{id} & \bbnum 0\\ B+F^{C} & \bbnum 0 & p^{B,C} -\end{array}\,\bef p^{A,B+C}=\,\begin{array}{|c||cc|} +\end{array}\,\bef p^{A,B+C} & =\,\begin{array}{|c||cc|} & A & F^{B+C}\\ \hline A & \text{id} & \bbnum 0\\ B+F^{C} & \bbnum 0 & p^{B,C} @@ -29027,20 +29147,13 @@ B+F^{C} & \bbnum 0 & p^{B,C} & F^{A+B+C}\\ \hline A & (a^{:A}\rightarrow a+\bbnum 0^{:B+C})\bef\text{pu}_{F}\\ F^{B+C} & (x^{:B+C}\rightarrow\bbnum 0^{:A}+x)^{\uparrow F} -\end{array} -\end{align*} - -\end_inset - - -\begin_inset Formula -\begin{align*} - & =\,\begin{array}{|c||c|} +\end{array}\\ + & =\,\,\begin{array}{|c||c|} & F^{A+B+C}\\ \hline A & (a^{:A}\rightarrow a+\bbnum 0^{:B+C})\bef\text{pu}_{F}\\ B+F^{C} & p^{B,C}\bef(x^{:B+C}\rightarrow\bbnum 0^{:A}+x)^{\uparrow F} \end{array}\\ - & =\,\,\begin{array}{|c||c|} + & =\,\begin{array}{|c||c|} & F^{A+B+C}\\ \hline A & (a^{:A}\rightarrow a+\bbnum 0^{:B+C})\bef\text{pu}_{F}\\ B & (b^{:B}\rightarrow b+\bbnum 0^{:C})\bef\text{pu}_{F}\bef(x^{:B+C}\rightarrow\bbnum 0^{:A}+x)^{\uparrow F}\\ @@ -29060,11 +29173,12 @@ In the last line, we have expanded the type matrix to three rows corresponding \begin_inset Formula $p^{A+B,C}$ \end_inset -; so let us rewrite +. + So, let us rewrite \begin_inset Formula $p^{A+B,C}$ \end_inset - as a similarly expanded type matrix, using the type isomorphisms such as + as an expanded type matrix with help of the type isomorphisms such as \begin_inset Formula $\bbnum 0^{:A}+\bbnum 0^{:B}\cong\bbnum 0^{:A+B}$ \end_inset @@ -29592,14 +29706,26 @@ Functor \end_inset - instance for + instance for the type constructor +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +F +\end_layout + +\end_inset + + defined by \begin_inset listings inline true status open \begin_layout Plain Layout -type F[T] = Try[Seq[T]] +F[T] = Try[Seq[T]] \end_layout \end_inset @@ -29799,7 +29925,7 @@ Show explicitly that a value \begin_inset Formula $\text{wu}_{C}:C^{\bbnum 1}$ \end_inset - is computationally equivalent to a value + is equivalent to a value \begin_inset Formula $\text{pu}_{C}:\forall A.\,C^{A}$ \end_inset @@ -30115,10 +30241,20 @@ Implement a function with type signature \end_inset rigid +\begin_inset Index idx +status open + +\begin_layout Plain Layout +rigid functors +\end_layout + +\end_inset + + \begin_inset Quotes erd \end_inset -; see Section + in this book; see Section \begin_inset space ~ \end_inset @@ -30385,7 +30521,7 @@ recursive type equation \begin_inset Formula $T\triangleq S^{T}$ \end_inset - where the type constructor + where the recursion scheme \begin_inset Formula $S$ \end_inset @@ -30394,11 +30530,7 @@ recursive type equation functor \emph default . - Type equations with non-functor -\begin_inset Formula $S$ -\end_inset - - (e.g., the equation + Non-covariant recursion schemes (e.g., in the equation \begin_inset Formula $T\triangleq T\rightarrow\text{Int}$ \end_inset @@ -30446,7 +30578,7 @@ fixpoint type \end_inset are equivalent (isomorphic). - We must implement this type isomorphism as two functions, named e.g., + We must implement this type isomorphism as two functions, often named \begin_inset listings inline true status open @@ -30482,7 +30614,7 @@ unfix \end_layout \begin_layout Standard -Given a type constructor +Given a recursion scheme \begin_inset Formula $S$ \end_inset @@ -30490,14 +30622,14 @@ Given a type constructor \begin_inset Formula $T$ \end_inset - with this Scala code: + with this code: \begin_inset listings inline false status open \begin_layout Plain Layout -final case class T(s: S[T]) // Type constructor S[_] must be already defined. +final case class T(s: S[T]) // Type constructor S must be already defined. \end_layout \begin_layout Plain Layout @@ -30777,7 +30909,7 @@ T \end_inset - is void. + (as defined by this code) is void. \begin_inset Index idx status open @@ -30885,8 +31017,8 @@ T \end_layout \begin_layout Standard -For some disjunctive type constructors -\begin_inset Formula $S^{\bullet}$ +For some disjunctive recursion schemes +\begin_inset Formula $S$ \end_inset , values of type @@ -30978,7 +31110,7 @@ y \begin_inset Formula $\text{Int}+\bbnum 0^{:T\times T}$ \end_inset -, which is a disjunctive part of +, which is a disjunctive part of the type \begin_inset Formula $S^{A}$ \end_inset @@ -31025,8 +31157,18 @@ The examples we saw previously, \end_layout \begin_layout Standard -Given a functor -\begin_inset Formula $S^{A}$ +Given a +\begin_inset Index idx +status open + +\begin_layout Plain Layout +recursion scheme +\end_layout + +\end_inset + +recursion scheme +\begin_inset Formula $S$ \end_inset , how can we determine whether the type recursion @@ -31034,7 +31176,7 @@ Given a functor \end_inset terminates? If -\begin_inset Formula $S^{A}$ +\begin_inset Formula $S$ \end_inset is a @@ -31362,8 +31504,8 @@ next \end_inset . - For this reason, an infinite loop is avoided even though the structure - functor + For this reason, an infinite loop is avoided even though the recursion + scheme \begin_inset Formula $S$ \end_inset @@ -31427,11 +31569,11 @@ As another example, consider \begin_inset Formula $P\cong(\bbnum 1\rightarrow P)$ \end_inset -, we could transform +, we could transform the type expression \begin_inset Formula $S^{A}$ \end_inset - into an equivalent functor + into an equivalent type \begin_inset Formula $\tilde{S}^{A}$ \end_inset @@ -31465,7 +31607,7 @@ Although \begin_inset Formula $\tilde{S}^{A}$ \end_inset - are equivalent, the recursive types + are equivalent types, the recursive types \begin_inset Formula $\text{Fix}^{S}$ \end_inset @@ -31525,13 +31667,12 @@ final case class T(e: () => Either[String, (Int, T)]) \begin_layout Plain Layout -val t1: T = T(() => Right((1, t1))) // Stream [1, 1, 1, ...]. +val t1: T = T(() => Right((1, t1))) // [1, 1, 1, ...] \end_layout \begin_layout Plain Layout -def t2(n: Int): T = T(() => Right((n, t2(n+1)))) // Stream [n, n+1, n+2, - ...]. +def t2(n: Int): T = T(() => Right((n, t2(n+1)))) // [n, n+1, n+2, ...] \end_layout \end_inset @@ -31540,7 +31681,7 @@ The type \begin_inset Formula $T\triangleq\text{Fix}^{S}$ \end_inset - also admits bounded streams defined without recursion, for example: + also admits bounded streams, for example: \begin_inset listings inline false status open @@ -31555,7 +31696,7 @@ stop \begin_inset Quotes erd \end_inset -))))) // Stream [10, +))))) // [10, \begin_inset Quotes eld \end_inset @@ -31563,7 +31704,7 @@ stop \begin_inset Quotes erd \end_inset -]. +] \end_layout \end_inset @@ -31649,11 +31790,11 @@ noprefix "false" . This is precisely the condition for -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset to be a pointed functor. - The contrafunctor + The type \begin_inset Formula $C^{A}$ \end_inset @@ -31679,7 +31820,7 @@ consume \end_inset ) as long as -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is a pointed functor. @@ -31698,7 +31839,7 @@ noprefix "false" \end_inset , a functor -\begin_inset Formula $P^{\bullet}$ +\begin_inset Formula $P$ \end_inset is pointed if we can compute a value of type @@ -31733,7 +31874,7 @@ This consideration applies to any sub-expression of the form \begin_inset Formula $C^{A}\rightarrow P^{A}$ \end_inset - within the type constructor + within the type \begin_inset Formula $S^{A}$ \end_inset @@ -31743,7 +31884,7 @@ This consideration applies to any sub-expression of the form \end_inset to exist is that every functor -\begin_inset Formula $P^{A}$ +\begin_inset Formula $P$ \end_inset involved in such sub-expressions should be pointed. @@ -31755,7 +31896,7 @@ This consideration applies to any sub-expression of the form \emph on functions \emph default - within + within the type \begin_inset Formula $S^{A}$ \end_inset @@ -31770,7 +31911,7 @@ functions \begin_layout Standard If the functor -\begin_inset Formula $S^{A}$ +\begin_inset Formula $S$ \end_inset has the property @@ -31862,8 +32003,8 @@ concat \end_inset - function is defined for both lists and arrays, and works similarly for - these data types: + function is defined for both lists and arrays and works similarly for those + data types: \begin_inset listings inline false status open @@ -31894,8 +32035,7 @@ res1: List[Int] = List(1, 2, 3, 4, 5, 6) \end_inset -In this section, we will show rigorously that concatenation is an associative - operation. +In this section, we will prove that concatenation is an associative operation. \end_layout \begin_layout Standard @@ -31991,8 +32131,7 @@ res2: String = c \begin_layout Plain Layout -scala> x(3) // Applying the partial function `x.apply` to the value 3 will - fail: +scala> x(3) // Applying the partial function `x.apply` to the value 3 fails: \end_layout \begin_layout Plain Layout @@ -32084,7 +32223,7 @@ concat \begin_inset Formula $\pplus$ \end_inset -) defined by: +) is defined by: \begin_inset Formula \[ a_{1}^{:\text{Array}_{n_{1}}^{A}}\pplus a_{2}^{:\text{Array}_{n_{2}}^{A}}\triangleq i^{:\text{Int}_{[0,n_{1}+n_{2}-1]}}\rightarrow\begin{cases} @@ -32095,7 +32234,7 @@ n_{1}\leq i Array[A] = +def f2[A: ClassTag]: List[A] => Array[A] = { \end_layout \begin_layout Plain Layout -{ case List() => Array() + case List() => Array() \end_layout \begin_layout Plain Layout - case x :: s => Array(x) ++ f2.apply(s) + case x :: s => Array(x) ++ f2.apply(s) \end_layout \begin_layout Plain Layout @@ -32604,12 +32776,16 @@ List . +\begin_inset Formula $\square$ +\end_inset + + \end_layout \begin_layout Standard -Since arrays and lists are isomorphic as types, the concatenation for lists - is associative as long as we show that the concatenation operation for - lists is isomorphic to that we defined for arrays. +Since arrays and lists are isomorphic as types, it will follow that the + concatenation for lists is associative if we show that the concatenation + operation for lists is isomorphic to that we defined for arrays. \end_layout \begin_layout Subsubsection @@ -32653,12 +32829,7 @@ status open \begin_layout Plain Layout -def concat[A](p: List[A], q: List[A]) -\end_layout - -\begin_layout Plain Layout - - : List[A] = p match { +def concat[A](p: List[A], q: List[A]): List[A] = p match { \end_layout \begin_layout Plain Layout @@ -33949,7 +34120,7 @@ res0: Double = 123.0 \begin_layout Plain Layout -scala> convertNumber(123:Short) +scala> convertNumber(123: Short) \end_layout \begin_layout Plain Layout @@ -34267,7 +34438,7 @@ constant functor!example of use \end_inset - but instead contained values, e.g., of type + but instead contained values of type \begin_inset listings inline true status open @@ -35785,7 +35956,7 @@ Type of instance values \series bold \size small -Inductive form +Symbolic form \end_layout \end_inset @@ -36370,7 +36541,7 @@ Pointed \end_inset , we need to use a higher-order type function denoted by -\begin_inset Formula $P^{A,B,F^{\bullet}}$ +\begin_inset Formula $P^{A,B,F}$ \end_inset in Table @@ -36389,11 +36560,11 @@ noprefix "false" . The type -\begin_inset Formula $P^{A,B,F^{\bullet}}$ +\begin_inset Formula $P^{A,B,F}$ \end_inset is parameterized by a type constructor -\begin_inset Formula $F^{\bullet}$ +\begin_inset Formula $F$ \end_inset as well as by extra type parameters @@ -36618,7 +36789,7 @@ noprefix "false" \end_inset - that develops some advanced techniques for reasoning about + after developing some advanced techniques for reasoning about \begin_inset Formula $P$ \end_inset @@ -36735,7 +36906,7 @@ Consider a recursive type \end_inset , where the functor -\begin_inset Formula $S^{\bullet}$ +\begin_inset Formula $S$ \end_inset @@ -36901,7 +37072,7 @@ triangleq \begin_layout Plain Layout def tcS: TC[A] => TC[S[A]] = ... - // Compute instances for S[A] from instances of A. + // Compute instances for S[A] given instances for A. \end_layout \begin_layout Plain Layout diff --git a/sofp-src/lyx/sofp.lyx b/sofp-src/lyx/sofp.lyx index 197d6b757..b6370a52a 100644 --- a/sofp-src/lyx/sofp.lyx +++ b/sofp-src/lyx/sofp.lyx @@ -602,7 +602,8 @@ The book's topics include working with FP-style collections; reasoning about recursive functions and types; the Curry-Howard correspondence; laws, structura l analysis, and code for functors, monads, and other typeclasses based on exponential-polynomial data types; techniques of symbolic derivation and - proof; free typeclass constructions; and parametricity theorems. + proof; free typeclass constructions; and practical applications of parametricit +y. \begin_inset Newline newline \end_inset diff --git a/sofp-src/scripts/make_pdflatex_sources.sh b/sofp-src/scripts/make_pdflatex_sources.sh index 5d94f547f..017b03fba 100644 --- a/sofp-src/scripts/make_pdflatex_sources.sh +++ b/sofp-src/scripts/make_pdflatex_sources.sh @@ -11,7 +11,7 @@ function add_color { local texsrc="$1" # Insert color comments into displayed equation arrays. Also, in some places the green color was inserted; replace by `greenunder`. # Example of inserted color: {\color{greenunder}\text{outer-interchange law for }M:}\quad & - LC_ALL=C sed -i.bak -e 's|\\color{green}|\\color{greenunder}|; s|^\(.*\\text{[^}]*}.*:\)\( *\\quad \& \)|{\\color{greenunder}\1}\2|; s|\(\& *\\quad\)\(.*\\text{[^}]*}.*: *\)\(\\quad\\\\\)$|\1{\\color{greenunder}\2}\3|' "$texsrc" + LC_ALL=C sed -i.bak -e 's|\\color{green}|\\color{greenunder}|; s|^\(.*\\text{[^}]*}.*:\)\( *\\quad \& \)|{\\color{greenunder}\1}\2|; s|\(\& *\\quad\)\(.*\\text{[^}]*}.*: *\)\(\\quad\(\\nonumber \)*\\\\\)$|\1{\\color{greenunder}\2}\3|' "$texsrc" # Insert color background into all displayed equations. This is disabled because it does not always produce visually good results. if false; then LC_ALL=C sed -i.bak -E -e ' s!\\begin\{(align.?|equation)\}!\\begin{empheq}[box=\\mymathbgbox]{\1}!; s!\\end\{(align.?|equation)\}!\\end{empheq}!; ' "$texsrc" diff --git a/sofp-src/tex/chapter3-picture.pdf b/sofp-src/tex/chapter3-picture.pdf index b12eb0bccc5ad8f981a3a3c6b067a701e1426234..caf3ffc2c23564fec3368778be68a58c0904394a 100644 GIT binary patch delta 197 zcmX?La=>Imod}n?uAym&p^24=<>U?#LuBq@ksrJ!7G}m~hGr?c=84Isn@@8yKk@7^rJ<>HFrVxFnXOYPeV#85o%uKviu1A^w^%*2vA!&CJxr!p+pw z($L(%(Ad<`*wNL|$-vpn#l*?bz|MxCidZf?JFeoA#G;alqSQ1lGb1x|E>%@me>W}w DH5W32 delta 197 zcmX?La=>Imod}nyu7OF2k)f5L`Q#1}LuBq@ksrJksg{Wr=B8%4CPrrFn@@3xvnwy$v8yKk@7^rJ<>HFrVxFnXOYPeV#85o%uKviu1A^w^%*1*Wc%*4sa+|0?$ z(!$i-$<55o)WXco($dAy#L~pY($0pUidZf?JFeoA#G;alqSQ1lGoVwrR8?L5-M9b? CcQQi& diff --git a/sofp-src/tex/sofp-applicative.tex b/sofp-src/tex/sofp-applicative.tex index 8f458a6db..5d512b67f 100644 --- a/sofp-src/tex/sofp-applicative.tex +++ b/sofp-src/tex/sofp-applicative.tex @@ -34,12 +34,9 @@ \subsection{Generalizing the \texttt{zip} method from sequences to other types} \[ \text{zip}:L^{A}\times L^{B}\rightarrow L^{A\times B}\quad. \] -Using this type signature, the \lstinline!zip! operation may be implemented -for many type constructors, not only for \lstinline!List!-like collections. In order to ensure that the implementation of \lstinline!zip! is useful and safe, we will establish and verify the laws of the \lstinline!zip! -operation later in this chapter. For now, let us look at some examples -of implementing a \lstinline!zip! operation. +operation later in this chapter. For now, let us look at some examples. \subsubsection{Example \label{subsec:Example-applicative-not-monad}\ref{subsec:Example-applicative-not-monad}\index{examples (with code)}} @@ -76,10 +73,10 @@ \subsubsection{Example \label{subsec:Example-applicative-tree}\ref{subsec:Exampl should evaluate to the tree {\tiny{}}{\tiny{} \Tree[ [ [ $a\times d$ ] [ $b\times e$ ] ] [ $c\times f$ ] ] }. If a subtree of \lstinline!ta! is a \lstinline!Leaf(x)! while the corresponding subtree of \lstinline!tb! is a \lstinline!Branch!, -the value \lstinline!x! must be replicated to match the subtree of -\lstinline!tb!. So, the result of evaluating {\tiny{}}zip({\tiny{} \Tree[ [ $b$ ] [ $c$ ] ] }, {\tiny{} \Tree[ [ [ $a$ ] [ [ $d$ ] [ $e$ ] ] ] [ $f$ ] ] }) +we should replicate the value \lstinline!x! as needed to match the +subtree of \lstinline!tb!. So, the result of evaluating {\tiny{}}zip({\tiny{} \Tree[ [ $b$ ] [ $c$ ] ] }, {\tiny{} \Tree[ [ [ $a$ ] [ [ $d$ ] [ $e$ ] ] ] [ $f$ ] ] }) should be {\tiny{}}{\tiny{} \Tree[ [ [ $b\times a$ ] [ [ $b\times d$ ] [ $b\times e$ ] ] ] [ $c\times f$ ] ] } -with replicated $b$. +with $b$ replicated 3 times. \subparagraph{Solution} @@ -178,7 +175,7 @@ \subsubsection{Example \label{subsec:Example-applicative-profunctor}\ref{subsec: \subsection{Gathering all errors during computations\label{subsec:Programs-that-accumulate-errors}} A monadic program using pass/fail monads must stop at the first failure: -the code \lstinline!flatMap(x => expr)! cannot start evaluating \lstinline!expr! +the code \lstinline!flatMap(x => expr)! cannot evaluate \lstinline!expr! if a previous computation failed to produce a value for \lstinline!x!. However, if some pass/fail computations are independent of each other\textsf{'}s results, we may wish to run all those computations and gather all @@ -207,7 +204,7 @@ \subsection{Gathering all errors during computations\label{subsec:Programs-that- a \lstinline!map2! function for the type constructor \lstinline!Result!: \begin{lstlisting} def map2[A, B, C](ra: Result[A], rb: Result[B])(f: (A, B) => C): Result[C] = (ra, rb) match { - case (Left(e1), Left(e2)) => Left(e1 + "\n" + e2) // Messages are separated by a newline. + case (Left(e1), Left(e2)) => Left(e1 + "; " + e2) // Messages are separated by a semicolon. case (Left(e1), _) => Left(e1) case (_, Left(e2)) => Left(e2) case (Right(a), Right(b)) => Right(f(a, b)) @@ -220,8 +217,7 @@ \subsection{Gathering all errors during computations\label{subsec:Programs-that- p <- map2(div(1,0), div(2,0)) { (x, y) => (x, y) } z <- add(p._1, p._2) } yield z -res1: Either[String, Int] = Left(error: 1 / 0 -error: 2 / 0) +res1: Either[String, Int] = Left(error: 1 / 0; error: 2 / 0) \end{lstlisting} The result of \lstinline!map2! is used in further monadic computations. In this way, we can combine code that gathers many errors with ordinary @@ -260,10 +256,10 @@ \subsection{Gathering all errors during computations\label{subsec:Programs-that- Instead, our code combines both error messages, preserving more information. Comparing the code of \lstinline!map2! and \lstinline!zip!, we find -only one difference: the code of \lstinline!zip! can be obtained -from the code of \lstinline!map2! if we replace \lstinline!f! by -an identity function. We will see later that this correspondence between -\lstinline!zip! and \lstinline!map2! works for all applicative functors. +that the code of \lstinline!zip! can be obtained from the code of +\lstinline!map2! if we replace \lstinline!f! by an identity function. +We will see later that this correspondence between \lstinline!zip! +and \lstinline!map2! works for all applicative functors. \subsection{Monadic programs with independent effects\label{subsec:Monadic-programs-with-independent-effects-future-applicative}} @@ -443,7 +439,7 @@ \subsection{Data validation with error reporting} For simplicity, assume that the required data structure is a case class, e.g.: \begin{lstlisting} -final case class MyData(userId: Long, userName: String, userEmails: List[String]) +final case class MyData(userId: Long, name: String, emails: List[String]) \end{lstlisting} More generally, consider the type $A\times B\times C$ containing values of some chosen types $A$, $B$, and $C$: @@ -531,8 +527,8 @@ \subsection{Implementing the functions \texttt{map2}, \texttt{map3}, etc. The \[ \text{zip}:L^{A}\times L^{A}\rightarrow L^{A\times A}\quad,\quad\quad\text{zip}_{3}:L^{A}\times L^{A}\times L^{A}\rightarrow L^{A\times A\times A}\quad. \] -We can now implement a general function ($\text{zip}_{n}$) that uses -a list of values of type $L^{A}$: +We can write a general function ($\text{zip}_{n}$) that uses a list +of values of type $L^{A}$: \begin{lstlisting} def zipN[A](xs: List[Either[String, A]]): Either[String, List[A]] = xs match { case Nil => Right(Nil) @@ -546,9 +542,9 @@ \subsection{Implementing the functions \texttt{map2}, \texttt{map3}, etc. The If we wanted to define general functions \lstinline!zipN! and \lstinline!mapN! that could take $N$ arguments of arbitrary types (and not all of -type $A$), we would need to use techniques of dependent-type programming, +type $A$), we would need to use techniques of type-level programming, which is beyond the scope of this book. We will now describe a simpler -solution that implements \lstinline!mapN! via a helper function (\lstinline!ap!) +solution implementing \lstinline!mapN! via a helper function (\lstinline!ap!) that performs an inductive step expressing $\text{map}_{N}$ through $\text{map}_{N-1}$. @@ -589,7 +585,7 @@ \subsection{Implementing the functions \texttt{map2}, \texttt{map3}, etc. The def fmap2[A, B, C](f: A => B => C): L[A] => L[B] => L[C] = { la: L[A] => ap[B, C](la.map(f)) } \end{lstlisting} Written in the point-free style using the code notation, this definition -looks like this: +becomes: \[ \text{fmap}_{2}(f)\triangleq f^{\uparrow L}\bef\text{ap}_{L}\quad. \] @@ -639,7 +635,7 @@ \subsection{Implementing the functions \texttt{map2}, \texttt{map3}, etc. The analogous to \lstinline!map2! must have a different type signature. For instance, consider the type constructor $L$ defined in Example~\ref{subsec:Example-applicative-profunctor}. We cannot define \lstinline!map2! as a composition of \lstinline!zip! -and \lstinline!map! because $L^{A}$ does not have a \lstinline!map! +and \lstinline!map! because $L$ does not have a \lstinline!map! method (as $L$ is not covariant in $A$). Instead, we note that $L$ is a profunctor\index{profunctor} and supports an \lstinline!xmap! method: @@ -662,11 +658,13 @@ \subsection{The applicative \texttt{Reader} functor\label{subsec:The-applicative is applicative and supports a \lstinline!zip! method as well: \begin{lstlisting} type Reader[A] = E => A // The fixed type E must be already defined. -def zip[A, B](ra: Reader[A], rb: Reader[B]): Reader[(A, B)] = { e => (ra(e), rb(e)) } +def zip[A, B](ra: Reader[A], rb: Reader[B]): Reader[(A, B)] = { + e => (ra(e), rb(e)) } \end{lstlisting} The \lstinline!map2! method is implemented similarly: \begin{lstlisting} -def map2[A, B, C](ra: Reader[A], rb: Reader[B])(f: A => B => C): Reader[C] = { e => f(ra(e))(rb(e)) } +def map2[A, B, C](ra: Reader[A], rb: Reader[B])(f: A => B => C): Reader[C] = { + e => f(ra(e))(rb(e)) } \end{lstlisting} These are the \emph{only} fully parametric implementations of the type signatures of \lstinline!zip! and \lstinline!map2! for \lstinline!Reader!. @@ -674,10 +672,11 @@ \subsection{The applicative \texttt{Reader} functor\label{subsec:The-applicative Since \lstinline!Reader! is also a monad (see Section~\ref{subsec:The-Reader-monad}), we may implement the type signature of \lstinline!map2! as: \begin{lstlisting} -def map2[A, B, C](ra: Reader[A], rb: Reader[B])(f: A => B => C): Reader[C] = for { - x <- ra - y <- rb -} yield f(x)(y) +def map2[A, B, C](ra: Reader[A], rb: Reader[B])(f: A => B => C): Reader[C] = + for { + x <- ra + y <- rb + } yield f(x)(y) \end{lstlisting} This code is fully parametric. So, this \lstinline!map2! must be equal to the direct implementation shown above. @@ -688,8 +687,9 @@ \subsection{The applicative \texttt{Reader} functor\label{subsec:The-applicative effect is a dependency on a constant \textsf{``}environment\textsf{''} (a value of type $E$). All \lstinline!Reader! computations in a given functor block will read the same value of the \textsf{``}environment\textsf{''}. So, the \lstinline!Reader! -effects are always independent, and a \lstinline!map2! function does -not need to be implemented separately (it can be expressed via \lstinline!flatMap!). +effects are always independent, and its \lstinline!map2! function +does not need to be implemented separately (it can be expressed via +\lstinline!flatMap!). \subsection{Single-traversal \texttt{fold} operations. I. Applicative \textquotedblleft fold fusion\textquotedblright\label{subsec:Single-traversal-fold-operations-applicative-fold-fusion}} @@ -794,7 +794,7 @@ \subsection{Single-traversal \texttt{fold} operations. I. Applicative \textquote library\footnote{See \texttt{\href{https://github.com/Chymyst/curryhoward}{https://github.com/Chymyst/curryhoward}}} to generate the code automatically: \begin{lstlisting} -import io.chymyst.ch._ // Import some symbols from the `curryhoward` library. +import io.chymyst.ch._ // Import from the `curryhoward` library. def zipFold[Z, R, S](op1: Fold[Z, R], op2: Fold[Z, S]): Fold[Z, (R, S)] = implement \end{lstlisting} @@ -858,7 +858,7 @@ \subsection{Single-traversal \texttt{fold} operations. I. Applicative \textquote def map2[S, B, C](other: FoldOp[Z, S, B])(f: (A, B) => C): FoldOp[Z, (R, S), C] = implement } // The type signatures unambiguously determine the implementations. \end{lstlisting} -The \lstinline!map! and \lstinline!map2! methods exist because \lstinline!FoldOp[Z, R, A]! +The \lstinline!map! method exists because \lstinline!FoldOp[Z, R, A]! is covariant with respect to \lstinline!A!. We can now implement extension methods allowing us to do arithmetic @@ -868,7 +868,7 @@ \subsection{Single-traversal \texttt{fold} operations. I. Applicative \textquote def binaryOp[S](other: FoldOp[Z, S, Double])(f: (Double, Double) => Double): FoldOp[Z, (R, S), Double] = op.map2(other) { case (x, y) => f(x, y) } def +[S](other: FoldOp[Z, S, Double]): FoldOp[Z, (R, S), Double] = op.binaryOp(other)(_ + _) def /[S](other: FoldOp[Z, S, Double]): FoldOp[Z, (R, S), Double] = op.binaryOp(other)(_ / _) -} // May need to define more operations here. +} // We may define more operations here. \end{lstlisting} After these definitions, the following code will work: @@ -1165,7 +1165,7 @@ \subsection{Parsing with applicative and monadic combinators\label{subsec:Parsin def map[B](f: A => B): P[B] = P { s => val (result, rest) = parserA.run(s) (result.map(f), rest) - } // + } def zipLeft [B](parserB: P[B]): P[A] = (parserA zip parserB).map(_._1) def zipRight[B](parserB: P[B]): P[B] = (parserA zip parserB).map(_._2) } @@ -1206,9 +1206,9 @@ \subsection{Parsing with applicative and monadic combinators\label{subsec:Parsin scala> p2.run("121")._1.right.get res9: Int = 11 \end{lstlisting} -The recursion stops only because the operation \textsf{``}\lstinline!or!\textsf{''} -does not evaluate its second parser when the first parser succeeds. -Otherwise the code would go into an infinite loop. +The recursion stops because the operation \textsf{``}\lstinline!or!\textsf{''} does +not evaluate its second parser when the first parser succeeds. Otherwise +the code would go into an infinite loop. We are able to parse the toy language using only applicative combinators. Let us now consider a more complicated language that \emph{requires} @@ -1315,7 +1315,7 @@ \subsection{Exercises\index{exercises}} \subsubsection{Exercise \label{subsec:Exercise-applicative-I}\ref{subsec:Exercise-applicative-I}} Implement \lstinline!map2! (or \lstinline!xmap2! if appropriate) -for the following type constructors $F^{A}$: +for the following type constructors $F$: \textbf{(a)} $F^{A}\triangleq\bbnum 1+A+A\times A$. @@ -1323,13 +1323,13 @@ \subsubsection{Exercise \label{subsec:Exercise-applicative-I}\ref{subsec:Exercis \textbf{(c)} $F^{A}\triangleq Z\times A\rightarrow A$. -\textbf{(d)} $F^{A}\triangleq A\rightarrow A\times Z$ where $Z$ -is a \lstinline!Monoid!. +\textbf{(d)} $F^{A}\triangleq A\rightarrow A\times Z$ when $Z$ has +a \lstinline!Monoid! instance. \subsubsection{Exercise \label{subsec:Exercise-applicative-I-1-1}\ref{subsec:Exercise-applicative-I-1-1}} -Implement a \lstinline!zip! method for a ternary tree \lstinline!T3[A]! -with extra data on branches: +Implement a \lstinline!zip! method for a \textbf{ternary tree}\index{ternary tree} +\lstinline!T3! with extra data on branches: \begin{lstlisting} sealed trait T3[A] case class Leaf[A](a: A) extends T3[A] @@ -1356,10 +1356,10 @@ \subsubsection{Exercise \label{subsec:Exercise-applicative-I-2}\ref{subsec:Exerc \subsubsection{Exercise \label{subsec:Exercise-applicative-I-3}\ref{subsec:Exercise-applicative-I-3}} -Define a \textsf{``}regexp extractor\textsf{''} as a type constructor $R^{A}$ describing -extraction of various data from strings; the extracted data has type -\lstinline!Option[A]!. Implement \lstinline!zip! and \lstinline!map2! -for $R^{A}$. +Define a \textsf{``}regexp extractor\textsf{''} as a type constructor $R$ describing +extraction of various data from a string, given a regular expression. +The extracted data has type \lstinline!Option[A]!. Implement \lstinline!zip! +and \lstinline!map2! for $R$. \subsubsection{Exercise \label{subsec:Exercise-applicative-I-5}\ref{subsec:Exercise-applicative-I-5}} @@ -1414,7 +1414,7 @@ \subsubsection{Statement \label{subsec:Statement-map2-zip-equivalence}\ref{subse to \lstinline!zip! and back). \textbf{(1)} Given any function $\text{map}_{2}:L^{A}\times L^{B}\rightarrow\left(A\times B\rightarrow C\right)\rightarrow L^{C}$ -satisfying the naturality law: +satisfying the following naturality law: \[ \text{map}_{2}\,(p^{:L^{A}}\times q^{:L^{B}})(f^{:A\times B\rightarrow C})\triangleright(g^{:C\rightarrow D})^{\uparrow L}=\text{map}_{2}\,(p\times q)(f\bef g)\quad, \] @@ -1427,8 +1427,8 @@ \subsubsection{Statement \label{subsec:Statement-map2-zip-equivalence}\ref{subse \[ \text{map}_{2}^{\prime}\,(p^{:L^{A}}\times q^{:L^{B}})(f^{:A\times B\rightarrow C})\triangleq(p\times q)\triangleright\text{zip}\triangleright f^{\uparrow L}\quad. \] -Then we need to show that $\text{map}_{2}^{\prime}=\text{map}_{2}$. -We apply $\text{map}_{2}^{\prime}$ to arbitrary arguments and write: +We need to show that $\text{map}_{2}^{\prime}=\text{map}_{2}$. Apply +$\text{map}_{2}^{\prime}$ to arbitrary arguments: \begin{align*} & \text{map}_{2}^{\prime}\,(p\times q)(f)=(p\times q)\triangleright\text{zip}\triangleright f^{\uparrow L}\\ & =\text{map}_{2}\,(p\times q)(\text{id})\,\gunderline{\triangleright\,f^{\uparrow L}}\\ @@ -1445,8 +1445,8 @@ \subsubsection{Statement \label{subsec:Statement-map2-zip-equivalence}\ref{subse \[ \text{zip}^{\prime}\,(p^{:L^{A}}\times q^{:L^{B}})\triangleq\text{map}_{2}\,(p\times q)(\text{id}^{:A\times B\rightarrow A\times B})\quad. \] -Then we need to show that $\text{zip}^{\prime}=\text{zip}$. We apply -$\text{zip}^{\prime}$ to arbitrary arguments $p$, $q$ and write: +We need to show that $\text{zip}^{\prime}=\text{zip}$. Apply $\text{zip}^{\prime}$ +to arbitrary arguments $p$, $q$: \begin{align*} & \text{zip}^{\prime}\,(p^{:L^{A}}\times q^{:L^{B}})=\text{map}_{2}\,(p\times q)(\text{id}^{:A\times B\rightarrow A\times B})=(p\times q)\triangleright\text{zip}\triangleright\text{id}^{\uparrow L}\\ & \quad=(p\times q)\triangleright\text{zip}=\text{zip}\,(p\times q)\quad. @@ -1464,7 +1464,7 @@ \subsubsection{Statement \label{subsec:Statement-map2-zip-equivalence}\ref{subse The equivalence of \lstinline!map2! and \lstinline!zip! follows a pattern similar to one shown in Section~\ref{subsec:Yoneda-identities}: a function with one type parameter (a natural transformation) is equivalent -to a function with two type parameters if a naturality law holds with +to a function with two type parameters obeying a naturality law with respect to one of those type parameters. Here \lstinline!zip! is playing the role of the natural transformation, and \lstinline!map2! is a function obeying a naturality law. The proof of Statement~\ref{subsec:Statement-map2-zip-equivalence} @@ -1473,13 +1473,12 @@ \subsubsection{Statement \label{subsec:Statement-map2-zip-equivalence}\ref{subse However, \lstinline!map2! is not a \textsf{``}lifting\textsf{''} in the sense of Section~\ref{subsec:Yoneda-identities}. The method \textsf{``}\lstinline!ap!\textsf{''} better fits the role of a lifting since its type signature ($\text{ap}:L^{A\rightarrow B}\rightarrow L^{A}\rightarrow L^{B}$) -transforms functions wrapped under $L$ (i.e., functions of type $L^{A\rightarrow B}$) +transforms functions wrapped under $L$ (i.e., values of type $L^{A\rightarrow B}$) to functions of type $L^{A}\rightarrow L^{B}$. So, let us prove that \lstinline!ap! is equivalent to \lstinline!map2!. Since the type signature of \lstinline!ap! is curried, it is more convenient to use the curried version (\lstinline!fmap2!) instead of \lstinline!map2!. -Let us write the relationship between \lstinline!ap! and \lstinline!fmap2! -in the code notation: +The relationship between \lstinline!ap! and \lstinline!fmap2! is: \[ \xymatrix{\xyScaleY{0.4pc}\xyScaleX{2pc} & F^{B\rightarrow C}\ar[rd]\sp(0.5){\text{ap}^{B,C}}\\ F^{A}\ar[ru]\sp(0.45){f^{\uparrow L}}\ar[rr]\sb(0.43){\text{fmap}_{2}\,(f^{:A\rightarrow B\rightarrow C})} & & \left(F^{B}\rightarrow F^{C}\right) @@ -1494,9 +1493,8 @@ \subsubsection{Statement \label{subsec:Statement-map2-zip-equivalence}\ref{subse \subsubsection{Statement \label{subsec:Statement-fmap2-equivalence-to-ap}\ref{subsec:Statement-fmap2-equivalence-to-ap} (equivalence of \lstinline!ap! and \lstinline!fmap2!)} -For any functor $L$ for which \lstinline!fmap2! or \lstinline!ap! -can be implemented, the type of functions \lstinline!ap! (type signature -$\text{ap}:L^{A\rightarrow B}\rightarrow L^{A}\rightarrow L^{B}$) +For any functor $L$, the type of functions \lstinline!ap! (type +signature $\text{ap}:L^{A\rightarrow B}\rightarrow L^{A}\rightarrow L^{B}$) is equivalent to the type of functions \lstinline!fmap2! (type signature $\left(A\rightarrow B\rightarrow C\right)\rightarrow L^{A}\rightarrow L^{B}\rightarrow L^{C}$), assuming that the functions \lstinline!fmap2! satisfy the naturality @@ -1508,7 +1506,7 @@ \subsubsection{Statement \label{subsec:Statement-fmap2-equivalence-to-ap}\ref{su to \lstinline!ap! and back). \textbf{(1)} Given any function $\text{fmap}_{2}:\left(A\rightarrow B\rightarrow C\right)\rightarrow L^{A}\rightarrow L^{B}\rightarrow L^{C}$ -satisfying the naturality law, +satisfying the following naturality law, \[ \text{fmap}_{2}\,(g^{:X\rightarrow A}\bef f^{:A\rightarrow B\rightarrow C})(p^{:L^{X}})=\text{fmap}_{2}\,(f)(p\triangleright g^{\uparrow L})\quad, \] @@ -1521,8 +1519,8 @@ \subsubsection{Statement \label{subsec:Statement-fmap2-equivalence-to-ap}\ref{su \[ \text{fmap}_{2}^{\prime}\,(f^{:A\rightarrow B\rightarrow C})(p^{:L^{A}})\triangleq p\triangleright f^{\uparrow L}\triangleright\text{ap}\quad. \] -Then we need to show that $\text{fmap}_{2}^{\prime}=\text{fmap}_{2}$. -We apply $\text{fmap}_{2}^{\prime}$ to arbitrary arguments and write: +We need to show that $\text{fmap}_{2}^{\prime}=\text{fmap}_{2}$. +Apply $\text{fmap}_{2}^{\prime}$ to arbitrary arguments: \begin{align*} & \text{fmap}_{2}^{\prime}\,(f^{:A\rightarrow B\rightarrow C})(p^{:L^{A}})=p\triangleright f^{\uparrow L}\triangleright\text{ap}\\ & =\text{fmap}_{2}\,(\text{id})(p\triangleright f^{\uparrow L})\\ @@ -1597,8 +1595,8 @@ \subsubsection{Statement \label{subsec:Statement-zip-ap-equivalence}\ref{subsec: \begin{equation} (\text{pair}^{:A\rightarrow B\rightarrow A\times B}\boxtimes\text{id}^{:B\rightarrow B})\bef\text{eval}^{:\left(B\rightarrow A\times B\right)\times B\rightarrow A\times B}=\text{id}^{:A\times B\rightarrow A\times B}\quad.\label{eq:pair-and-eval-property-derivation1} \end{equation} -To prove this property, apply both sides to arbitrary $a^{:A}$ and -$b^{:B}$: +To prove this property, apply both sides to a pair of arbitrary $a^{:A}$ +and $b^{:B}$: \[ (a^{:A}\times b^{:B})\triangleright(\text{pair}\boxtimes\text{id})\bef\text{eval}=\big((z^{:B}\rightarrow a\times z)\times b\big)\triangleright\text{eval}=(z^{:B}\rightarrow a\times z)(b)=a\times b\quad. \] @@ -1611,8 +1609,8 @@ \subsubsection{Statement \label{subsec:Statement-zip-ap-equivalence}\ref{subsec: \[ \text{zip}^{\prime}\,(p^{:L^{A}}\times q^{:L^{B}})\triangleq\text{ap}\,(p\triangleright\text{pair}^{\uparrow L})(q)\quad. \] -Then we need to show that $\text{zip}^{\prime}=\text{zip}$. We apply -$\text{zip}^{\prime}$ to arbitrary arguments and write: +We need to show that $\text{zip}^{\prime}=\text{zip}$. Apply $\text{zip}^{\prime}$ +to arbitrary arguments: \begin{align*} {\color{greenunder}\text{expect to equal }\text{zip}\,(p\times q):}\quad & \text{zip}^{\prime}\,(p^{:L^{A}}\times q^{:L^{B}})=\text{ap}\,(p\triangleright\text{pair}^{\uparrow L})(q)\\ {\color{greenunder}\text{definition of }\text{ap}:}\quad & =\big((p\triangleright\text{pair}^{\uparrow L})\times q\big)\triangleright\text{zip}\bef\text{eval}^{\uparrow L}\\ @@ -1652,7 +1650,7 @@ \subsubsection{Statement \label{subsec:Statement-zip-ap-equivalence}\ref{subsec: \[ \text{ap}^{\prime}\,(r^{:L^{A\rightarrow B}})(p^{:L^{A}})\triangleq(r\times p)\triangleright\text{zip}\triangleright\text{eval}^{\uparrow L}\quad. \] -Then we need to show that $\text{ap}^{\prime}=\text{ap}$. We write: +We need to show that $\text{ap}^{\prime}=\text{ap}$. We write: \begin{align*} & \text{ap}^{\prime}\,(r)(p)=(r\times p)\triangleright\text{zip}\triangleright\text{eval}^{\uparrow L}\\ & =\big(\text{ap}\,(r\triangleright\text{pair}^{\uparrow L})(p)\gunderline{\big)\triangleright\text{eval}^{\uparrow L}}\\ @@ -1714,28 +1712,28 @@ \subsection{The \texttt{Zippable} and \texttt{Applicative} typeclasses\label{sub def ap[A, B](lf: L[A => B], la: L[A]): L[B] = zip(lf, la).map { case (f, a) => f(a) } } \end{lstlisting} -Instead of using \lstinline!zip!, either \lstinline!map2! or \lstinline!ap! +Instead of \lstinline!zip!, either \lstinline!map2! or \lstinline!ap! could be used to implement the other methods. -In addition to these methods, it is helpful to require a \lstinline!pure! -method for the functor \lstinline!L!. The resulting typeclass\index{typeclass!Applicative@\texttt{Applicative}} +In addition to \lstinline!zip!, it is helpful to require that the +functor \lstinline!L! should have a \lstinline!pure! method. The +resulting typeclass\index{typeclass!Applicative@\texttt{Applicative}} is known as \lstinline!Applicative!. The \lstinline!pure! method is equivalent to a \textsf{``}wrapped unit\textsf{''} (denoted \lstinline!wu!, see Section~\ref{subsec:Pointed-functors-motivation-equivalence}). So, the simplest definition of the \lstinline!Applicative! typeclass -contains just these two methods: +contains just those two methods: \begin{lstlisting} trait Applicative[L[_]] { def zip[A, B](la: L[A], lb: L[B]): L[(A, B)] def wu: L[Unit] } \end{lstlisting} -Other methods (\lstinline!map2!, \lstinline!ap!, \lstinline!pure!) -can be defined separately (as extension methods) using the functor -instance for \lstinline!L!. However, this definition of the \lstinline!Applicative! -typeclass can be used also with type constructors \lstinline!L[A]! -that are not covariant in \lstinline!A!. So, we will use this definition -later in this chapter. +The functions \lstinline!map2!, \lstinline!ap!, and \lstinline!pure! +can be defined separately (as extension methods) using \lstinline!L!\textsf{'}s +functor instance. However, this definition of the \lstinline!Applicative! +typeclass can be used also with type constructors that are not covariant. +So, we will prefer this typeclass definition in this book. \subsection{Motivation for the laws of \texttt{map2}\label{subsec:Motivation-for-the-laws-of-map2}} @@ -1888,8 +1886,7 @@ \subsection{Motivation for the laws of \texttt{map2}\label{subsec:Motivation-for \end{align*} To simplify and analyze the laws of \lstinline!map2!, we will now -derive the laws of \lstinline!zip! that will follow once we express -\lstinline!map2! via \lstinline!zip!. +derive the laws of \lstinline!zip!. \subsection{Deriving the laws of \texttt{zip} from the laws of \texttt{map2}\label{subsec:Deriving-the-laws-of-zip}} @@ -1940,8 +1937,8 @@ \subsection{Deriving the laws of \texttt{zip} from the laws of \texttt{map2}\lab $A\times(B\times C)$ to $A\times B\times C$. Since $L$ is a functor, the lifted functions $\varepsilon_{1,23}^{\uparrow L}$ and $\varepsilon_{12,3}^{\uparrow L}$ produce equivalences between the types $L^{(A\times B)\times C}$, -$L^{A\times(B\times C)}$, and $L^{A\times B\times C}$. With these -equivalences in mind, we rewrite the associativity law in a simpler +$L^{A\times(B\times C)}$, and $L^{A\times B\times C}$. With those +equivalences implied, we rewrite the associativity law in a simpler form:\index{associativity law!of zip@of \texttt{zip}} \[ \text{zip}\big(p\times\text{zip}\left(q\times r\right)\big)\cong\text{zip}\big(\text{zip}\left(p\times q\right)\times r\big)\quad. @@ -1956,7 +1953,7 @@ \subsection{Deriving the laws of \texttt{zip} from the laws of \texttt{map2}\lab p\,\,\text{zip}\,\,(q\,\,\text{zip}\,\,r)\cong(p\,\,\text{zip}\,\,q)\,\,\text{zip}\,\,r\quad.\label{eq:zip-associativity-law} \end{equation} In Eq.~(\ref{eq:zip-associativity-law}), the symbol $\cong$ denotes -equality up to the type equivalence. To obtain a real equation, one +equality up to a type equivalence. To obtain a real equation, one would need to apply $\varepsilon_{1,23}^{\uparrow L}$ and $\varepsilon_{12,3}^{\uparrow L}$ at appropriate places. Apart from that, the law~(\ref{eq:zip-associativity-law}) has the usual form of an associativity law for a binary operation. @@ -1965,7 +1962,7 @@ \subsection{Deriving the laws of \texttt{zip} from the laws of \texttt{map2}\lab we help build the intuition about applicative laws. We also save time because we do not write out a number of tuple-swapping functions. To avoid errors, derivations using this technique must first check -that all types match up to tuple-swapping isomorphisms. +that all types match (up to tuple-swapping isomorphisms). Turn to the identity laws. Substitute Eq.~(\ref{eq:express-map2-via-zip}) into \lstinline!map2!\textsf{'}s left identity law: @@ -2019,10 +2016,10 @@ \subsection{Deriving the laws of \texttt{zip} from the laws of \texttt{map2}\lab At the same time, we may apply the function $(1\times b^{:B}\rightarrow a\times b)^{\uparrow L}$, this time with an arbitrary $a^{:A}$, to both sides of Eq.~(\ref{eq:zip-left-identity-law}) and recover Eq.~(\ref{eq:left-identity-zip-derivation1}). So, we -have justified the simplification of the left identity law to Eq.~(\ref{eq:zip-left-identity-law}). +have justified the equivalence of the left identity law and Eq.~(\ref{eq:zip-left-identity-law}). Since $L$ is a functor, the conversion function $\text{ilu}^{\uparrow L}$ -implements the type equivalence $L^{B}\cong L^{\bbnum 1\times B}$. +implements a type equivalence $L^{B}\cong L^{\bbnum 1\times B}$. Denoting this equivalence by $\cong$ and using the infix syntax for \lstinline!zip!, we rewrite the left identity law as: \[ @@ -2046,9 +2043,9 @@ \subsection{Deriving the laws of \texttt{zip} from the laws of \texttt{map2}\lab So, \lstinline!wu! is the \textsf{``}empty value\textsf{''} for the binary operation \lstinline!zip!. -We have shown that the laws of \lstinline!map2! can be simplified -when formulated via \lstinline!zip!. In that formulation, the laws -of applicative functors are similar to the laws of a \emph{monoid}\index{monoid} +We have shown that the laws of \lstinline!map2! are simpler when +formulated via \lstinline!zip!. In that formulation, the laws of +applicative functors are similar to the laws of a \emph{monoid}\index{monoid} (see Example~\ref{subsec:tc-Example-Monoids}): the binary operation is \lstinline!zip! and the empty value is \lstinline!wu!. @@ -2083,7 +2080,7 @@ \subsection{Commutative applicative functors and parallel computation\label{subs \subsubsection{Definition \label{subsec:Definition-commutative-applicative}\ref{subsec:Definition-commutative-applicative}} -An applicative functor $L^{\bullet}$ is \textbf{commutative}\index{applicative functor!commutative} +An applicative functor $L$ is \textbf{commutative}\index{applicative functor!commutative} if its \lstinline!zip! operation satisfies the commutativity law\index{commutativity law!of zip@of \texttt{zip}} (in addition to the standard applicative laws): \begin{align} @@ -2127,16 +2124,17 @@ \subsubsection{Definition \label{subsec:Definition-commutative-applicative}\ref{ from Section~\ref{subsec:Parsing-with-applicative-and-monadic-parsers}. We cannot expect to be able to parse different parts of a file in parallel, because correct parsing often depends on the success or -failure of parsing of previous portions of the data. +failure of parsing of previous text. An example of a data structure that can automatically parallelize computations is Apache Spark\textsf{'}s \lstinline!RDD! class.\index{Spark\textsf{'}s RDD data type@\texttt{Spark}\textsf{'}s \texttt{RDD} data type} -It is important that \lstinline!RDD[_]! is a commutative applicative -functor but \emph{not} a monad.\footnote{The \texttt{Spark} library does not support values of type \lstinline!RDD[RDD[A]]!. -The \lstinline!RDD! class\textsf{'}s \lstinline!cartesian! method has the -type signature corresponding to \lstinline!zip!. A method called -\textsf{``}\lstinline!flatMap!\textsf{''} exists but does not have the type signature -of a monad\textsf{'}s \lstinline!flatMap!. See \texttt{\href{https://spark.apache.org/docs/3.2.1/rdd-programming-guide.html}{https://spark.apache.org/docs/3.2.1/rdd-programming-guide.html}}} This agrees with the intuition that monadic programs are not automatically +It is important that \lstinline!RDD! is a commutative applicative +functor but \emph{not} a monad.\footnote{The \texttt{Spark} library does not support working with values of +type \lstinline!RDD[RDD[A]]!. The class \lstinline!RDD! has a method +called \lstinline!cartesian! with the type signature of \lstinline!zip!. +A method called \textsf{``}\lstinline!flatMap!\textsf{''} exists but does not have +the type signature required for a monad\textsf{'}s \lstinline!flatMap! for +\lstinline!RDD!. See \texttt{\href{https://spark.apache.org/docs/3.2.1/rdd-programming-guide.html}{https://spark.apache.org/docs/3.2.1/rdd-programming-guide.html}}} This agrees with the intuition that monadic programs are not automatically parallelizable. In many cases, a commutative applicative functor will also be a \emph{non-commutative} @@ -2212,8 +2210,8 @@ \subsubsection{Statement \label{subsec:Statement-associativity-law-of-zip-with-c \end{align*} This is the right-hand side of Eq.~(\ref{eq:associativity-law-of-zip-commutative-short}) -\textbf{(2)} Begin with the right-hand side of Eq.~(\ref{eq:associativity-law-of-zip-commutative-short}) -and apply the commutativity law: +\textbf{(2)} Apply the commutativity law to the right-hand side of +Eq.~(\ref{eq:associativity-law-of-zip-commutative-short}): \begin{align*} & r\,\,\text{zip}\,\,(\gunderline q\,\,\text{zip}\,\,\gunderline p)=\gunderline r\,\,\text{zip}\,\,\gunderline{(p\,\,\text{zip}\,\,q)}=(p\,\,\text{zip}\,\,q)\,\,\text{zip}\,\,r\\ {\color{greenunder}\text{use Eq.~(\ref{eq:associativity-law-of-zip-commutative-short})}:}\quad & \overset{!}{=}p\,\,\text{zip}\,\,(q\,\,\text{zip}\,\,r)\quad. @@ -2277,7 +2275,7 @@ \subsection{Constructions of applicative functors\label{subsec:Constructions-of- a constant functor, $L^{A}\triangleq Z$, the identity functor $L^{A}\triangleq A$, and the functor composition, $L^{A}\triangleq F^{G^{A}}$. -Given a fixed monoid type $Z$, the constant functor $L^{A}\triangleq Z$ +Given a fixed \emph{monoid} type $Z$, the constant functor $L^{A}\triangleq Z$ is applicative. To see this, consider the operation \lstinline!zip! of type $L^{A}\times L^{B}\rightarrow L^{A\times B}$, which in this case becomes just $Z\times Z\rightarrow Z$, the monoid $Z$\textsf{'}s \lstinline!combine! @@ -2288,9 +2286,9 @@ \subsection{Constructions of applicative functors\label{subsec:Constructions-of- is commutative. Comparing this with the corresponding monad construction, we note -that $L^{A}\triangleq Z$ is not a monad unless $Z=\bbnum 1$. For -an arbitrary monoid type $Z$, the functor $L^{A}\triangleq Z$ is -only a semimonad. +that $L^{A}\triangleq Z$ is \emph{not} a monad unless $Z=\bbnum 1$. +For an arbitrary monoid type $Z$, the functor $L^{A}\triangleq Z$ +is only a semimonad. The identity functor $L^{A}\triangleq A$ is applicative and commutative: the \lstinline!zip! operation is the identity function of type $A\times B\rightarrow A\times B$, @@ -2333,8 +2331,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-composition}\ref{su {\color{greenunder}\text{right identity law of }\text{zip}_{G}:}\quad & =p\triangleright(\gunderline{g\rightarrow g\,\triangleright}\,\text{iru}^{\uparrow G})^{\uparrow F}=p\triangleright(\text{iru}^{\uparrow G})^{\uparrow F}=p\triangleright\text{iru}^{\uparrow L}\quad. \end{align*} -To verify the associativity law, first substitute the definition of -$\text{zip}_{L}$ into one side: +To verify the associativity law, substitute the definition of $\text{zip}_{L}$ +into one side: \begin{align*} & \quad{\color{greenunder}\text{left-hand side}:}\quad\\ & \text{zip}_{L}\big(p\times\text{zip}_{L}(q\times r)\big)\triangleright\varepsilon_{1,23}^{\uparrow L}=\big(p\times\text{zip}_{L}(q\times r)\big)\triangleright\text{zip}_{F}\bef\text{zip}_{G}^{\uparrow F}\bef\gunderline{\varepsilon_{1,23}^{\uparrow L}}\\ @@ -2345,7 +2343,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-composition}\ref{su & \quad{\color{greenunder}\text{composition under }^{\uparrow F}:}\quad\\ & =\text{zip}_{F}\big(p\times\text{zip}_{F}(q\times r)\big)\triangleright\big(g\times(h\times j)\rightarrow\text{zip}_{G}(g\times\text{zip}_{G}(h\times j))\triangleright\varepsilon_{1,23}^{\uparrow G}\big)^{\uparrow F}\quad. \end{align*} -Now rewrite the right-hand side in a similar way: +Now rewrite the other side in a similar way: \begin{align*} & \quad{\color{greenunder}\text{right-hand side}:}\quad\\ & \text{zip}_{L}\big(\text{zip}_{L}(p\times q)\times r\big)\triangleright\varepsilon_{12,3}^{\uparrow L}=\big(\text{zip}_{L}(p\times q)\times r\big)\triangleright\text{zip}_{F}\bef\text{zip}_{G}^{\uparrow F}\bef\varepsilon_{12,3}^{\uparrow L}\\ @@ -2434,8 +2432,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-product}\ref{subsec The underlined expressions in both sides are equal due to associativity laws of $\text{zip}_{F}$ and $\text{zip}_{G}$. -To verify the commutativity law of $L$ assuming it holds for $F$ -and $G$: +To verify the commutativity law of $L$ when it holds for $F$ and +$G$: \begin{align*} & \quad{\color{greenunder}\text{expect to equal }\text{zip}_{L}\big((p\times q)\times(m\times n)\big):}\quad\\ & \text{zip}_{L}\big((m\times n)\times(p\times q)\big)\triangleright\text{swap}^{\uparrow L}\\ @@ -2450,17 +2448,17 @@ \subsubsection{Statement \label{subsec:Statement-applicative-product}\ref{subsec \paragraph{Co-products} -The co-product $F^{A}+G^{A}$ of two arbitrary applicative functors -$F$ and $G$ is not always applicative (just as with monads). One -case where the \lstinline!zip! method cannot be implemented for $F^{A}+G^{A}$ -was shown in Example~\ref{subsec:tc-Example-10}(b). However, the -following statements demonstrate that the type constructors $L^{A}\triangleq Z+F^{A}$ +The co-product $F+G$ of two arbitrary applicative functors $F$ and +$G$ is not always applicative (just as with monads). One case where +the \lstinline!zip! method cannot be implemented for $F+G$ was shown +in Example~\ref{subsec:tc-Example-10}(b). However, the following +statements demonstrate that the type constructors $L^{A}\triangleq Z+F^{A}$ and $L^{A}\triangleq A+F^{A}$ are applicative functors. \subsubsection{Statement \label{subsec:Statement-co-product-with-constant-functor-applicative}\ref{subsec:Statement-co-product-with-constant-functor-applicative}} -If $F^{\bullet}$ is applicative and $Z$ is a fixed monoid type then -$L^{A}\triangleq Z+F^{A}$ is applicative: +If $F$ is applicative and $Z$ is a fixed monoid type then $L^{A}\triangleq Z+F^{A}$ +is applicative: \begin{align*} & \text{zip}_{L}:(Z+F^{A})\times(Z+F^{B})\rightarrow Z+F^{A\times B}\quad,\\ & \text{zip}_{L}\triangleq\,\begin{array}{|c||cc|} @@ -2473,8 +2471,8 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-constant-functo \end{align*} The \textsf{``}wrapped unit\textsf{''} ($\text{wu}_{L}:Z+F^{\bbnum 1}$) is defined as $\text{wu}_{L}\triangleq\bbnum 0^{:Z}+\text{wu}_{F}$. If $Z$ -is a commutative monoid and $F^{\bullet}$ is commutative then $L^{\bullet}$ -is also commutative. +is a commutative monoid and $F$ is commutative then $L$ is also +commutative. \subparagraph{Proof} @@ -2537,22 +2535,22 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-constant-functo {\color{greenunder}\text{right-hand side}:}\quad & \text{zip}_{L}\big(\text{zip}_{L}(p^{:Z+F^{A}}\times q^{:Z+F^{B}})\times r^{:Z+F^{C}}\big)\triangleright\varepsilon_{12,3}^{\uparrow L}\quad. \end{align*} Since each of the arguments $p$, $q$, $r$ may be in one of the -two parts of the disjunction type $Z+F^{\bullet}$, we have 8 cases. -We note, however, that the code of $\text{zip}_{L}(p\times q)$ will +two parts of the disjunction type $Z+F^{A}$, we have 8 cases. We +note, however, that the code of $\text{zip}_{L}(p\times q)$ will return a value of type $Z+\bbnum 0$ whenever at least one of the arguments ($p$, $q$) is of type $Z+\bbnum 0$. So, a composition of two \lstinline!zip! operations will also return a value of type $Z+\bbnum 0$ whenever at least one of the arguments ($p$, $q$, -$r$) is of type $Z+\bbnum 0$. So, we need to consider the following -two cases: +$r$) is of type $Z+\bbnum 0$. It means that we need to consider +the following two cases: \textbf{(1)} At least one of $p$, $q$, $r$ is of type $Z+\bbnum 0$. -In this case, any arguments of type $\bbnum 0+F^{\bullet}$ are ignored +In this case, any arguments of type $\bbnum 0+F^{A}$ are ignored by $\text{zip}_{L}$, while the arguments of type $Z+\bbnum 0$ are combined using the monoid $Z$\textsf{'}s binary operation ($\oplus$). So, the result of the \lstinline!zip! operation is the same if we replace -any arguments ($p$, $q$, $r$) of type $\bbnum 0+F^{\bullet}$ by -the empty value $e_{Z}$. For example: +any arguments ($p$, $q$, $r$) of type $\bbnum 0+F^{A}$ by the +empty value $e_{Z}$. For example: \[ \text{zip}_{L}\big((z+\bbnum 0)\times(\bbnum 0+k^{:F^{A}})\big)=z+\bbnum 0=\text{zip}_{L}\big((z+\bbnum 0)\times(e_{Z}+\bbnum 0)\big)\quad. \] @@ -2568,7 +2566,7 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-constant-functo \end{align*} \end{comment} -\textbf{(2)} All of $p$, $q$, $r$ are of type $\bbnum 0+F^{\bullet}$. +\textbf{(2)} All of $p$, $q$, $r$ are of type $\bbnum 0+F^{A}$. In this case, $\text{zip}_{L}$ reduces to $\text{zip}_{F}$, which satisfies the associativity law by assumption. @@ -2598,7 +2596,7 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-constant-functo \end{array}\quad. \end{align*} By assumption, $\text{swap}\bef\text{zip}_{F}=\text{zip}_{F}\bef\text{swap}^{\uparrow F}$. -Next, we need the code for the lifted $\text{swap}^{\uparrow L}$: +Next, we need the code for the lifted function $\text{swap}^{\uparrow L}$: \[ \text{swap}^{\uparrow L}=\,\begin{array}{|c||cc|} & Z & F^{B\times A}\\ @@ -2617,12 +2615,11 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-constant-functo \end{array}\quad. \] The difference between the sides disappears if $Z$ is a commutative -monoid ($z_{1}\oplus z_{2}=z_{2}\oplus z_{1}$). $\square$ +monoid ($z_{1}\oplus z_{2}=z_{2}\oplus z_{1}$). \subsubsection{Statement \label{subsec:Statement-co-product-with-identity-applicative}\ref{subsec:Statement-co-product-with-identity-applicative}} -If $F^{\bullet}$ is applicative then $L^{A}\triangleq A+F^{A}$ is -also applicative: +If $F$ is applicative then $L^{A}\triangleq A+F^{A}$ is also applicative: \begin{align*} & \text{zip}_{L}:(A+F^{A})\times(B+F^{B})\rightarrow A\times B+F^{A\times B}\quad,\\ & \text{zip}_{L}\triangleq\,\begin{array}{|c||cc|} @@ -2634,16 +2631,16 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-identity-applic \end{array}\quad. \end{align*} The \lstinline!wu! method is defined by $\text{wu}_{L}\triangleq1+\bbnum 0^{:F^{\bbnum 1}}$. -If $F^{\bullet}$ is commutative then $L^{\bullet}$ is also commutative. +If $F$ is commutative then $L$ is also commutative. \subparagraph{Proof} -We will use Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative}, -where the same properties are demonstrated for a more general functor -$L^{A}\triangleq H^{A}+F^{A}$. We will set $H^{A}\triangleq A$ in -Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative} -and obtain the present statement because the compatibility law holds -automatically for $\text{ex}_{H}\triangleq\text{id}$ and $\text{zip}_{H}\triangleq\text{id}$. +We defer to Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative} +below, where the same properties are demonstrated for a more general +functor $L^{A}\triangleq H^{A}+F^{A}$. We will set $H^{A}\triangleq A$ +in Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative}, +which is justified because the compatibility law holds automatically +for $\text{ex}_{H}\triangleq\text{id}$ and $\text{zip}_{H}\triangleq\text{id}$. $\square$ It is important that Statements~\ref{subsec:Statement-co-product-with-constant-functor-applicative}\textendash \ref{subsec:Statement-co-product-with-identity-applicative} @@ -2659,8 +2656,8 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-identity-applic or as $L^{A}\triangleq A+F^{A}$ with $F^{A}\triangleq Z$ (via Statement~\ref{subsec:Statement-co-product-with-identity-applicative}). The following statement generalizes the construction $L^{A}\triangleq A+F^{A}$ -to $L^{A}\triangleq H^{A}+F^{A}$ where $H^{\bullet}$ is applicative -and at the same time co-pointed (see Section~\ref{subsec:Co-pointed-functors}). +to $L^{A}\triangleq H^{A}+F^{A}$ where $H$ is applicative and at +the same time co-pointed (see Section~\ref{subsec:Co-pointed-functors}). The code for $\text{zip}_{L}$ will use the co-pointed functor\textsf{'}s \lstinline!extract! method. The next statement shows that $\text{zip}_{L}$ will obey the applicative laws if a special \textbf{compatibility law}\index{compatibility law!of extract and zip@of \texttt{extract} and \texttt{zip}} @@ -2684,7 +2681,7 @@ \subsubsection{Statement \label{subsec:Statement-co-pointed-applicative-example} product constructions of applicative functors (Statement~\ref{subsec:Statement-applicative-product}) and co-pointed functors (Section~\ref{subsec:Co-pointed-functors}). To verify the compatibility law~(\ref{eq:compatibility-law-of-extract-and-zip}), -we write the definition of $\text{zip}_{H}$: +take the definition of $\text{zip}_{H}$: \[ \text{zip}_{H}\big((a^{:A}\times g_{1}^{:G^{A}})\times(b^{:B}\times g_{2}^{:G^{B}})\big)=(a\times b)\times\text{zip}_{G}(g_{1}\times g_{2})\quad, \] @@ -2711,13 +2708,13 @@ \subsubsection{Statement \label{subsec:Statement-co-pointed-applicative-example- \subparagraph{Proof} The product construction (Statement~\ref{subsec:Statement-applicative-product}) -shows that $F^{\bullet}$ is applicative: it is a product of a constant -functor ($Z$) and a \lstinline!Reader! functor ($Z\rightarrow A$). -The \lstinline!zip! method is: +shows that $F$ is applicative: it is a product of a constant functor +($Z$) and a \lstinline!Reader! functor ($Z\rightarrow A$). The +\lstinline!zip! method is: \[ \text{zip}_{F}\big((z_{1}^{:Z}\times r_{1}^{:Z\rightarrow A})\times(z_{2}^{:Z}\times r_{2}^{:Z\rightarrow B})\big)\triangleq(z_{1}\oplus z_{2})\times(z^{:Z}\rightarrow r_{1}(z)\times r_{2}(z))\quad. \] - The functor $F^{\bullet}$ is co-pointed because it has a fully parametric + The functor $F$ is co-pointed because it has a fully parametric \lstinline!extract! method defined by: \[ \text{ex}_{F}\triangleq z^{:Z}\times r^{:Z\rightarrow A}\rightarrow r(z)\quad. @@ -2737,11 +2734,11 @@ \subsubsection{Statement \label{subsec:Statement-co-pointed-applicative-example- \subsubsection{Statement \label{subsec:Statement-co-product-with-co-pointed-applicative}\ref{subsec:Statement-co-product-with-co-pointed-applicative}} -The functor $L^{A}\triangleq H^{A}+F^{A}$ is applicative if $F^{\bullet}$ -and $H^{\bullet}$ are applicative and in addition $H^{\bullet}$ -is co-pointed with a method $\text{ex}_{H}:H^{A}\rightarrow A$ that -satisfies the compatibility law~(\ref{eq:compatibility-law-of-extract-and-zip}). -The applicative methods of $L^{\bullet}$ are defined by: +The functor $L^{A}\triangleq H^{A}+F^{A}$ is applicative if $F$ +and $H$ are applicative and in addition $H$ is co-pointed with a +method $\text{ex}_{H}:H^{A}\rightarrow A$ that satisfies the compatibility +law~(\ref{eq:compatibility-law-of-extract-and-zip}). The applicative +methods of $L$ are defined by: \begin{align*} & \text{zip}_{L}:(H^{A}+F^{A})\times(H^{B}+F^{B})\rightarrow H^{A\times B}+F^{A\times B}\quad,\\ & \text{zip}_{L}\triangleq\,\begin{array}{|c||cc|} @@ -2753,13 +2750,12 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-co-pointed-appl \end{array}\quad. \end{align*} The method $\text{wu}_{L}:H^{\bbnum 1}+F^{\bbnum 1}$ is defined by -$\text{wu}_{L}\triangleq\text{wu}_{H}+\bbnum 0$. If $F^{\bullet}$ -and $H^{\bullet}$ are commutative applicative functors then $L^{\bullet}$ -is also commutative. +$\text{wu}_{L}\triangleq\text{wu}_{H}+\bbnum 0$. If $F$ and $H$ +are commutative applicative functors then $L$ is also commutative. \subparagraph{Proof} -The lifting to $L^{\bullet}$ is defined by: +The lifting to $L$ is defined by: \[ (f^{:A\rightarrow B})^{\uparrow L}\triangleq\,\begin{array}{|c||cc|} & H^{B} & F^{B}\\ @@ -2797,7 +2793,7 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-co-pointed-appl \hline H^{B} & h\rightarrow\text{zip}_{H}(\text{wu}_{H}\times h) & \bbnum 0\\ F^{B} & \bbnum 0 & f\rightarrow\text{zip}_{F}((\text{wu}_{H}\triangleright\text{ex}_{H}\triangleright\text{pu}_{F})\times f) \end{array}\\ - & \quad=\,\begin{array}{|c||cc|} + & =\,\begin{array}{|c||cc|} & H^{\bbnum 1\times B} & F^{\bbnum 1\times B}\\ \hline H^{B} & \text{ilu}^{\uparrow H} & \bbnum 0\\ F^{B} & \bbnum 0 & \text{ilu}^{\uparrow F} @@ -2845,8 +2841,8 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-co-pointed-appl \textbf{(2)} The argument $q$ has type $\bbnum 0+F^{B}$. In this case, $\text{zip}_{L}$ reduces to $\text{zip}_{F}$ after converting -arguments of type $H^{\bullet}+0$ to type $F^{\bullet}$ when needed. -We may define this conversion as a helper function \lstinline!toF!: +arguments of type $H^{A}+0$ to type $F^{A}$ when needed. We may +define this conversion as a helper function \textsf{``}\lstinline!toF!\textsf{''}: \begin{align*} & \text{toF}:H^{A}+F^{A}\rightarrow F^{A}\quad,\quad\quad\text{toF}\triangleq\,\begin{array}{|c||c|} & F^{A}\\ @@ -2872,7 +2868,7 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-co-pointed-appl \textbf{(3)} Either $p=\bbnum 0+a^{:F^{A}}$ while $q=b^{:H^{B}}+\bbnum 0$ and $r=c^{:H^{C}}+\bbnum 0$; or $r=\bbnum 0+c^{:F^{C}}$ while $p=a^{:H^{A}}+\bbnum 0$ and $q=b^{:H^{B}}+\bbnum 0$. The two situations are symmetric, so -let us consider the first one: +it suffices to consider the first one: \begin{align*} {\color{greenunder}\text{left-hand side}:}\quad & \text{zip}_{L}(p\times\text{zip}_{L}(q\times r))\triangleright\varepsilon_{1,23}^{\uparrow L}\\ & =\bbnum 0+\text{zip}_{F}\big(\text{toF}\,(p)\times\text{toF}\,(\text{zip}_{H}(b\times c)+\bbnum 0)\big)\triangleright\varepsilon_{1,23}^{\uparrow F}\quad. @@ -2967,31 +2963,30 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-co-pointed-appl The two sides are now equal. $\square$ The constructions shown so far define applicative methods for all -polynomial functors. +polynomial functors: \subsubsection{Statement \label{subsec:Statement-polynomial-functor-applicative}\ref{subsec:Statement-polynomial-functor-applicative}} -\textbf{(a)} Any polynomial functor $L^{A}$ whose fixed types are -monoids can be made into an applicative functor. +\textbf{(a)} Any polynomial functor $L$ whose fixed types are monoids +can be made into an applicative functor. -\textbf{(b)} If $P^{A}$ and $Q^{A}$ are polynomial functors with -monoidal fixed types and $R^{A}$ is any applicative functor then -$L^{A}\triangleq P^{A}+Q^{A}\times R^{A}$ is applicative. +\textbf{(b)} If $P$ and $Q$ are polynomial functors with monoidal +fixed types and $R$ is any applicative functor then $L^{A}\triangleq P^{A}+Q^{A}\times R^{A}$ +is applicative. -\textbf{(c)} A recursive polynomial functor $L^{\bullet}$ defined -by $L^{A}\triangleq S^{A,L^{F^{A}}}$ is applicative if $S^{\bullet,\bullet}$ -and $F^{\bullet}$ are polynomial (bi)functors with monoidal fixed -types. +\textbf{(c)} A recursive polynomial functor $L$ defined by $L^{A}\triangleq S^{A,L^{F^{A}}}$ +is applicative if $S$ and $F$ are polynomial (bi)functors with monoidal +fixed types. \textbf{(d)} If all fixed types used in parts \textbf{(a)}\textendash \textbf{(c)} -are commutative monoids, $L^{\bullet}$ will be also commutative. +are commutative monoids, $L$ will be also commutative. \subparagraph{Proof} -\textbf{(a)} Any polynomial functor $L^{A}$ is built (in at least -one way) by combining fixed types and the type parameter $A$ using -products and co-products. By rearranging the type expression $L^{A}$, -we may bring it to the following equivalent form: +\textbf{(a)} Any polynomial functor $L$ is built (in at least one +way) by combining fixed types and the type parameter $A$ using products +and co-products. By rearranging the type expression $L^{A}$, we may +bring it to the following equivalent form: \[ L^{A}\cong Z_{0}+A\times(Z_{1}+A\times(...\times(Z_{n-1}+A\times Z_{n})...))\quad. \] @@ -3004,41 +2999,40 @@ \subsubsection{Statement \label{subsec:Statement-polynomial-functor-applicative} P^{A}=Z_{1}+A\times S_{1}^{A}\quad,\quad\quad Q^{A}=Z_{2}+A\times S_{2}^{A}\quad, \] with some fixed types $Z_{1}$, $Z_{2}$ and some polynomial functors -$S_{1}^{A}$, $S_{2}^{A}$. We can then rewrite the type $L^{A}$ -equivalently as: +$S_{1}$, $S_{2}$. We can then rewrite the type $L^{A}$ equivalently +as: \[ P^{A}+Q^{A}\times R^{A}=Z_{1}+A\times S_{1}^{A}+(Z_{2}+A\times S_{2}^{A})\times R^{A}\cong(Z_{1}+Z_{2}\times R^{A})+A\times(S_{1}^{A}+S_{2}^{A}\times R^{A})\quad. \] -Since $S_{1}^{A}$ and $S_{2}^{A}$ are polynomial functors of smaller -degree than $P^{A}$ and $Q^{A}$, we may assume by induction that -the property we are proving will already hold for the functor $G^{A}\triangleq S_{1}^{A}+S_{2}^{A}\times R^{A}$. +Since $S_{1}$ and $S_{2}$ are polynomial functors of smaller degree +than $P$ and $Q$, we may assume by induction that the property we +are proving will already hold for the functor $G^{A}\triangleq S_{1}^{A}+S_{2}^{A}\times R^{A}$. Statements~\ref{subsec:Statement-applicative-product}, \ref{subsec:Statement-co-product-with-constant-functor-applicative}, \ref{subsec:Statement-co-product-with-identity-applicative}, and~\ref{subsec:Statement-co-product-with-co-pointed-applicative} then show that $F^{A}\triangleq Z_{1}+Z_{2}\times R^{A}$ is applicative -and $F^{A}+A\times G^{A}\cong L^{A}$ is applicative. +and that $F^{A}+A\times G^{A}\cong L^{A}$ is also applicative. \textbf{(c)} Write the recursive definition $L^{A}\triangleq S^{A,L^{F^{A}}}$. To prove that $S^{A,L^{F^{A}}}$ is an applicative functor, we may -use the inductive assumption that $L^{A}$ is applicative when used -in the recursive position (i.e., as the second argument of $S^{\bullet,\bullet}$). -By Statement~\ref{subsec:Statement-applicative-composition}, $L^{F^{A}}$ -is applicative when used in that position. Denoting $N^{A}\triangleq L^{F^{A}}$, -we now rewrite $L^{A}$ as: +use the inductive assumption that $L$ is applicative when used in +the recursive position (i.e., as the second argument of $S$). By +Statement~\ref{subsec:Statement-applicative-composition}, $L\circ F$ +is applicative when used in that position. Denoting $N\triangleq L\circ F$, +we now rewrite $L$ as: \[ L^{A}=S^{A,L^{F^{A}}}=S^{A,N^{A}}\quad. \] It remains to prove that $S^{A,N^{A}}$ is applicative given that $N$ is applicative. -The polynomial bifunctor $S^{\bullet,\bullet}$ can be expressed as: +The polynomial bifunctor $S$ can be expressed as: \[ S^{A,R}=P_{0}^{A}+R\times(P_{1}^{A}+R\times(...\times(P_{n-1}^{A}+R\times P_{n}^{A})...))\quad, \] -where $P_{0}^{A}$, ..., $P_{n}^{A}$ are some polynomial functors -(with respect to the type parameter $A$) with monoidal fixed types. -We need to set $R=N^{A}$ in the type expression above. Then it follows -from part \textbf{(b)} that $P_{n-1}^{A}+N^{A}\times P_{n}^{A}$ is -applicative. In the same way, it follows that $P_{n-2}^{A}+N^{A}\times(P_{n-1}^{A}+N^{A}\times P_{n}^{A})$ +where $P_{0}$, ..., $P_{n}$ are some polynomial functors with monoidal +fixed types. We need to set $R=N^{A}$ in the type expression above. +Then it follows from part \textbf{(b)} that $P_{n-1}^{A}+N^{A}\times P_{n}^{A}$ +is applicative. In the same way, it follows that $P_{n-2}^{A}+N^{A}\times(P_{n-1}^{A}+N^{A}\times P_{n}^{A})$ is applicative, and so on, until we show that $S^{A,N^{A}}$ is applicative. \textbf{(d)} The proofs of parts \textbf{(a)}\textendash \textbf{(c)} @@ -3051,16 +3045,15 @@ \subsubsection{Statement \label{subsec:Statement-polynomial-functor-applicative} \paragraph{Function types} We have seen in Section~\ref{subsec:The-applicative-Reader-functor} -that the \lstinline!Reader! functor ($L^{A}\triangleq R\rightarrow A$) -has a \lstinline!zip! operation. That \lstinline!zip! operation -can be derived from the monadic methods of the \lstinline!Reader! -monad (which is commutative). Statement~\ref{subsec:Statement-monad-construction-2} +that the \lstinline!Reader! functor has a \lstinline!zip! operation. +That \lstinline!zip! operation can be derived from the monadic methods +of the \lstinline!Reader! monad (which is commutative). Statement~\ref{subsec:Statement-monad-construction-2} generalized the \lstinline!Reader! monad to a wider class of monads -with type $L^{A}\triangleq H^{A}\rightarrow A$, where $H^{\bullet}$ -is an arbitrary contrafunctor. The lawful monad gives up to two definitions +with type $L^{A}\triangleq H^{A}\rightarrow A$, where $H$ is an +arbitrary contrafunctor. The lawful monad gives up to two definitions of a \lstinline!zip! method for the functors $L$ of this type. However, -commutativity is not guaranteed for arbitrary $H^{\bullet}$. One -can implement a \lstinline!zip! method with type signature: +commutativity is not guaranteed for arbitrary $H$. One can implement +a \lstinline!zip! method with type signature: \[ \text{zip}:(H^{A}\rightarrow A)\times(H^{B}\rightarrow B)\rightarrow H^{A\times B}\rightarrow A\times B\quad, \] @@ -3112,8 +3105,9 @@ \subsubsection{Statement \label{subsec:Statement-applicative-recursive-type}\ref because we may assume that $L^{A}$ is already a lawful applicative functor when we use its methods in recursive calls. -Since all these constructions preserve commutative applicative functors, -we conclude that $L$ will be commutative if $F$ and $H$ are. +Since all these constructions preserve commutativity of applicative +functors, we conclude that $L$ will be commutative if $F$ and $H$ +are. The constructions also give us the code for the applicative methods \lstinline!zip! and \lstinline!wu! of $L$. This code is similar @@ -3225,7 +3219,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-recursive-type}\ref It remains to consider the case when the three arguments are of the -form +form: \[ p\triangleq\bbnum 0+h_{1}^{:H^{A}}\times k_{1}^{:F^{L^{A}}}\quad,\quad\quad q\triangleq\bbnum 0+h_{2}^{:H^{B}}\times k_{2}^{:F^{L^{B}}}\quad,\quad\quad r\triangleq\bbnum 0+h_{3}^{:H^{C}}\times k_{3}^{:F^{L^{C}}}\quad. \] @@ -3277,14 +3271,13 @@ \subsubsection{Statement \label{subsec:Statement-applicative-recursive-type}\ref leaf values of type $A$ and branch shapes described by a functor $F$. In addition, each branch may carry a value of type $H^{A}$. -When $H^{A}$ is a constant functor, the resulting $L^{\bullet}$ -is a monad (Statement~\ref{subsec:Statement-monad-construction-4-free-monad}). +When $H$ is a constant functor, the resulting $L$ is a monad (Statement~\ref{subsec:Statement-monad-construction-4-free-monad}). It is important that the applicative implementations of \lstinline!map2! and \lstinline!zip! for this tree-like monad are \emph{not} compatible -with its monad methods. To see this, it is sufficient to note that -even the simplest tree-like monad (the binary tree, $L^{A}\triangleq A+L^{A}\times L^{A}$) +with its monad methods. To see this, it suffices to note that even +a simple tree-like monad (the binary tree, $L^{A}\triangleq A+L^{A}\times L^{A}$) is not commutative. The applicative functor $L$, however, is commutative -because it is built from $H^{A}\triangleq1$ and $F^{A}\triangleq A\times A$; +because it is built from $H^{A}\triangleq\bbnum 1$ and $F^{A}\triangleq A\times A$; those are polynomial functors with commutative monoidal coefficients. The next statement generalizes the \lstinline!List! functor, @@ -3303,20 +3296,19 @@ \subsubsection{Statement \label{subsec:Statement-applicative-recursive-type-1}\r $G$, and $H$ are commutative then $L$ is also commutative. \textbf{(b)} The same properties hold for the functor $P$ defined -by $P^{A}\triangleq F^{G^{A}+H^{A}\times P^{A}}$ instead. +by $P^{A}\triangleq F^{G^{A}+H^{A}\times P^{A}}$. \subparagraph{Proof} \textbf{(a)} We will avoid long derivations if we show that the functor -$L$ is built via known type constructions. At the top level, $L^{A}$ +$L$ is built via known type constructions. At the top level, $L$ is the co-pointed co-product construction (Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative}) -with functors $G^{\bullet}$ and $H^{\bullet}\times F^{L^{\bullet}}$. -The functor $H^{\bullet}\times F^{L^{\bullet}}$ is co-pointed because -$H$ is (Section~\ref{subsec:Co-pointed-functors}). The compatibility -law holds for $H^{\bullet}\times F^{L^{\bullet}}$ due to Exercise~\ref{subsec:Exercise-applicative-II-4-1}. -The functor $F^{L^{\bullet}}$ is applicative because it is a composition -of $F$ and the recursively used $L$. As usual, we may assume that -recursive uses of $L$\textsf{'}s methods will satisfy all required laws. +with functors $G$ and $H\times(F\circ L)$. The functor $H\times(F\circ L)$ +is co-pointed because $H$ is (Section~\ref{subsec:Co-pointed-functors}). +The compatibility law holds for $H\times(F\circ L)$ due to Exercise~\ref{subsec:Exercise-applicative-II-4-1}. +The functor $F\circ L$ is applicative because it is a composition +of $F$ and the recursively used $L$. (As usual, we assume that recursive +uses of $L$\textsf{'}s methods will satisfy all required laws.) The constructions give us the code of $L$\textsf{'}s methods \lstinline!zip! and \lstinline!wu!. Define $N^{A}\triangleq H^{A}\times F^{L^{A}}$ @@ -3350,9 +3342,9 @@ \subsubsection{Statement \label{subsec:Statement-applicative-recursive-type-1}\r \] It suggests that $P^{A}\cong F^{L^{A}}$. Then the required properties hold for $P$ due to the functor composition construction (Statement~\ref{subsec:Statement-applicative-composition}). -To establish the type equivalence $P^{A}\cong F^{L^{A}}$ rigorously, +To establish a type equivalence $P^{A}\cong F^{L^{A}}$ rigorously, we use Statement~\ref{subsec:Statement-unrolling-trick} below, where -we need to set $R^{\bullet}\triangleq F^{\bullet}$, $S^{T}\triangleq G^{A}+H^{A}\times T$, +we need to set $R\triangleq F$, $S^{T}\triangleq G^{A}+H^{A}\times T$, $U\triangleq P^{A}$, and $V\triangleq L^{A}$. $\square$ Note that the construction shown in Statement~\ref{subsec:Statement-applicative-recursive-type-1} @@ -3393,10 +3385,10 @@ \subsubsection{Statement \label{subsec:Statement-applicative-recursive-type-1}\r There could exist other recursive constructions that produce lawful applicative functors. For instance, one could assume an \textsf{``}biapplicative -bifunctor\textsf{''} $P^{A,R}$ having a \lstinline!bizip! method with this -type signature: +bifunctor\textsf{''} $P$ having a \lstinline!bizip! method with this type +signature: \[ -\text{bizip}_{P}^{A,B,F^{\bullet}}:P^{A,F^{A}}\times P^{B,F^{B}}\rightarrow P^{A\times B,F^{A}\times F^{B}}\quad(\text{for all functors }F)\quad. +\text{bizip}_{P}^{A,B,F}:P^{A,F^{A}}\times P^{B,F^{B}}\rightarrow P^{A\times B,F^{A}\times F^{B}}\quad(\text{for all functors }F)\quad. \] The bifunctor $P$ should also have a designated \textsf{``}wrapped unit\textsf{''} value, $\text{wu}_{P}:P^{\bbnum 1,\bbnum 1}$. Then the functor $L$ @@ -3410,19 +3402,18 @@ \subsubsection{Statement \label{subsec:Statement-applicative-recursive-type-1}\r \end{align*} To find what biapplicative bifunctors $P$ exist, one could continue -with structural analysis (considering products $P_{1}^{\bullet,\bullet}\times P_{2}^{\bullet,\bullet}$, -co-products $P_{1}^{\bullet,\bullet}+P_{2}^{\bullet,\bullet}$, and -so on). This book will not pursue that analysis further, because we -already found sufficiently many type constructions required for practical -applications. +with structural analysis (considering products $P_{1}\times P_{2}$, +co-products $P_{1}+P_{2}$, and so on). This book will not pursue +that analysis further, because we already found sufficiently many +type constructions required for practical applications. We conclude this section with a proof of one version of the \textsf{``}unrolling trick\textsf{''} for recursive types:\index{unrolling trick for recursive types!proof}\index{recursive types!unrolling trick!proof} \subsubsection{Statement \label{subsec:Statement-unrolling-trick}\ref{subsec:Statement-unrolling-trick}} -Given two functors $R^{\bullet}$ and $S^{\bullet}$, define two recursive -types $U$ and $V$ by $U\triangleq R^{S^{U}}$ and $V\triangleq S^{R^{V}}$. +Given two functors $R$ and $S$, define two recursive types $U$ +and $V$ by $U\triangleq R^{S^{U}}$ and $V\triangleq S^{R^{V}}$. The \textsf{``}unrolling trick\textsf{''} writes (non-rigorously) $U=R^{S^{R^{S^{\iddots}}}}\!$ and $V=S^{R^{S^{R^{\iddots}}}}\!$, which suggests that $U$ and $R^{V}$ are the same type. In fact, the type $U$ is rigorously equivalent @@ -3481,13 +3472,13 @@ \subsection{Applicative contrafunctors: Laws and constructions} \subsubsection{Definition \label{subsec:Definition-applicative-contrafunctor}\ref{subsec:Definition-applicative-contrafunctor}} -A contrafunctor $C^{\bullet}$ is \textbf{applicative} if there exist -methods \lstinline!zip! and \lstinline!wu! such that: +A contrafunctor $C$ is \textbf{applicative} if there exist methods +\lstinline!zip! and \lstinline!wu! such that: \begin{align} & \text{zip}_{C}:C^{A}\times C^{B}\rightarrow C^{A\times B}\quad,\quad\quad\text{wu}_{C}:C^{\bbnum 1}\quad,\nonumber \\ - & \quad{\color{greenunder}\text{associativity law}:}\quad\\ + & \quad{\color{greenunder}\text{associativity law}:}\quad\nonumber \\ & \text{zip}_{C}(p\times\text{zip}_{C}(q\times r))\triangleright\tilde{\varepsilon}_{1,23}^{\downarrow C}=\text{zip}_{C}(\text{zip}_{C}(p\times q)\times r)\triangleright\tilde{\varepsilon}_{12,3}^{\downarrow C}\quad,\label{eq:applicative-contrafunctor-associativity-law}\\ - & \quad{\color{greenunder}\text{left and right identity laws}:}\quad\\ + & \quad{\color{greenunder}\text{left and right identity laws}:}\quad\nonumber \\ & \text{zip}_{C}(\text{wu}_{C}\times p)\triangleright\text{ilu}^{\downarrow C}=p\quad,\quad\quad\text{zip}_{C}(p\times\text{wu}_{C})\triangleright\text{iru}^{\downarrow C}=p\quad.\label{eq:applicative-contrafunctor-identity-laws} \end{align} Here the tuple-rearranging isomorphisms $\tilde{\varepsilon}_{1,23}$, @@ -3597,8 +3588,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-compo & \text{zip}_{G}(g\times\text{zip}_{G}(h\times j))\triangleright\tilde{\varepsilon}_{1,23}^{\downarrow G}=\text{zip}_{G}(\text{zip}_{G}(g\times h)\times j)\triangleright\tilde{\varepsilon}_{12,3}^{\downarrow G}\quad. \end{align*} -To verify the commutativity law for $C$, we assume that the law holds -for $F$ and $G$: +To verify the commutativity law for $C$, we assume that the same +law holds for $F$ and $G$: \begin{align*} {\color{greenunder}\text{expect to equal }(\text{zip}_{C}\bef\text{swap}^{\downarrow C}):}\quad & \text{swap}\bef\text{zip}_{C}=\gunderline{\text{swap}\bef\text{zip}_{F}}\bef\text{zip}_{G}^{\uparrow F}\\ {\color{greenunder}\text{commutativity law of }F:}\quad & =\text{zip}_{F}\bef\gunderline{\text{swap}^{\uparrow F}\bef\text{zip}_{G}^{\uparrow F}}=\text{zip}_{F}\bef(\gunderline{\text{swap}\bef\text{zip}_{G}})^{\uparrow F}\\ @@ -3613,9 +3604,9 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-compo \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-product}\ref{subsec:Statement-applicative-contrafunctor-product}} -If $F^{\bullet}$ and $G^{\bullet}$ are applicative contrafunctors -then the contrafunctor $C^{A}\triangleq F^{A}\times G^{A}$ is also -applicative. If $F$ and $G$ are commutative then $C$ is also commutative. +If $F$ and $G$ are applicative contrafunctors then the contrafunctor +$C^{A}\triangleq F^{A}\times G^{A}$ is also applicative. If $F$ +and $G$ are commutative then $C$ is also commutative. \subparagraph{Proof} @@ -3627,9 +3618,9 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-produ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-co-product}\ref{subsec:Statement-applicative-contrafunctor-co-product}} -If $F^{\bullet}$ and $G^{\bullet}$ are applicative contrafunctors -then the contrafunctor $C^{A}\triangleq F^{A}+G^{A}$ is also applicative. -If $F$ and $G$ are commutative then $C$ is also commutative. +If $F$ and $G$ are applicative contrafunctors then the contrafunctor +$C^{A}\triangleq F^{A}+G^{A}$ is also applicative. If $F$ and $G$ +are commutative then $C$ is also commutative. \subparagraph{Proof} @@ -3674,7 +3665,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-co-pr \end{array}\quad. \] This function will sometimes ignore its argument when that argument -has type $\bbnum 0+G^{\bullet}$. +has type $\bbnum 0+G^{A}$ or $\bbnum 0+G^{B}$. Looking at the possible implementations of $\text{wu}_{C}$ (of type $F^{\bbnum 1}+G^{\bbnum 1}$), we find two choices: @@ -3687,10 +3678,10 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-co-pr \text{zip}_{C}(\text{wu}_{C}\times p)\triangleright\text{ilu}^{\downarrow C}\overset{?}{=}p\quad. \] We know that $\text{zip}_{C}$ will sometimes ignore its argument -of type $\bbnum 0+G^{\bullet}$, and yet we need to guarantee that -no information is lost from the argument $p$. At the same time, it -is acceptable if $\text{zip}_{C}(\text{wu}_{C}\times p)$ ignored -the argument $\text{wu}_{C}$. So, we need to choose $\text{wu}_{C}\triangleq\bbnum 0+\text{wu}_{G}$. +of type $\bbnum 0+G^{B}$, and yet we need to guarantee that no information +is lost from the argument $p$. At the same time, it is acceptable +if $\text{zip}_{C}(\text{wu}_{C}\times p)$ ignored the argument $\text{wu}_{C}$. +So, we need to choose $\text{wu}_{C}\triangleq\bbnum 0+\text{wu}_{G}$. With this choice, we can now verify the left identity law: \begin{align*} @@ -3701,7 +3692,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-co-pr F^{\bbnum 1}\times G^{B} & p\times\_^{:G^{B}}\rightarrow p\triangleright\pi_{1}^{\downarrow F} & \bbnum 0\\ G^{\bbnum 1}\times F^{B} & \_^{:G^{\bbnum 1}}\times q^{:F^{B}}\rightarrow q\triangleright\pi_{2}^{\downarrow F} & \bbnum 0\\ G^{\bbnum 1}\times G^{B} & \bbnum 0 & \text{zip}_{G} -\end{array}\bef\text{ilu}^{\downarrow C}\\ +\end{array}\,\bef\text{ilu}^{\downarrow C}\\ & =p\triangleright\,\begin{array}{|c||cc|} & F^{\bbnum 1\times B} & G^{\bbnum 1\times B}\\ \hline F^{B} & \pi_{2}^{\downarrow F} & \bbnum 0\\ @@ -3755,18 +3746,18 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-co-pr has eight cases depending on whether $p$, $q$, and $r$ are in the $F$ or in the $G$ parts of their disjunctive types. Let us look at the conditions for the result of a \lstinline!zip! operation to -be of type $F^{\bullet}+\bbnum 0$ or $\bbnum 0+G^{\bullet}$. According -to the code matrix of $\text{zip}_{C}$, the result of computing $\text{zip}_{C}(p\times q)$ -is type $\bbnum 0+G^{\bullet}$ only when both $p$ and $q$ are in -their $G$ parts. In that case, we find that $\text{zip}_{C}$ is -reduced to $\text{zip}_{G}$: +be of type $F^{A\times B}+\bbnum 0$ or $\bbnum 0+G^{A\times B}$. +According to the code matrix of $\text{zip}_{C}$, the result of computing +$\text{zip}_{C}(p\times q)$ is type $\bbnum 0+G^{A\times B}$ only +when both $p$ and $q$ are in their $G$ parts. In that case, we +find that $\text{zip}_{C}$ is reduced to $\text{zip}_{G}$: \[ \text{zip}_{C}\big(p^{:\bbnum 0+G^{A}}\times q^{:\bbnum 0+G^{B}}\big)=\text{zip}_{C}\big((\bbnum 0+g^{:G^{A}})\times(\bbnum 0+h^{:G^{B}})\big)=\bbnum 0^{:F^{A\times B}}+\text{zip}_{G}(g\times h)\quad. \] -So, if all of $p$, $q$, $r$ are of type $\bbnum 0+G^{\bullet}$, -the associativity law of $\text{zip}_{C}$ is reduced to the associativity +So, if all of $p$, $q$, $r$ are of types $\bbnum 0+G^{...}$, the +associativity law of $\text{zip}_{C}$ is reduced to the associativity law of $\text{zip}_{G}$. Similarly, if all of $p$, $q$, $r$ are -of type $F^{\bullet}+\bbnum 0$, the associativity law of $\text{zip}_{C}$ +of types $F^{...}+\bbnum 0$, the associativity law of $\text{zip}_{C}$ is reduced to the associativity law of $\text{zip}_{F}$. Since the laws of $\text{zip}_{F}$ and $\text{zip}_{G}$ hold by assumption, we will not need to consider these two cases any further. @@ -3786,7 +3777,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-co-pr \end{array}\quad. \end{align*} We can now compute $\text{zip}_{C}(p\times\text{zip}_{C}(q\times r))$, -which always returns values of type $F^{\bullet}+\bbnum 0$: +which always returns values of type $F^{A\times(B\times C)}+\bbnum 0$: \begin{align*} & \text{zip}_{C}(p\times\text{zip}_{C}(q\times r))\\ & =(p\times q\times r)\triangleright\,\begin{array}{|c||cc|} @@ -3823,7 +3814,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-co-pr \begin{align*} & \text{zip}_{F}(f\times(h\triangleright\pi_{2}^{\downarrow F}))\triangleright\tilde{\varepsilon}_{1,23}^{\downarrow F}\\ & \quad=\text{zip}_{F}(f\times h)\triangleright(a\times(b\times c)\rightarrow a\times c)^{\downarrow F}\bef(a\times b\times c\rightarrow a\times(b\times c))^{\downarrow F}\\ - & \quad=\text{zip}_{F}(f\times h)\triangleright(a\times b\times c\rightarrow a\times c)^{\downarrow F}\quad.\\ + & \quad=\text{zip}_{F}(f\times h)\triangleright(a\times b\times c\rightarrow a\times c)^{\downarrow F}\quad,\\ & \text{zip}_{F}((f\triangleright\pi_{1}^{\downarrow F})\times h)\triangleright\tilde{\varepsilon}_{12,3}^{\downarrow F}\\ & \quad=\text{zip}_{F}(f\times h)\triangleright((a\times b)\times c\rightarrow a\times c)^{\downarrow F}\bef(a\times b\times c\rightarrow(a\times b)\times c)^{\downarrow F}\\ & \quad=\text{zip}_{F}(f\times h)\triangleright(a\times b\times c\rightarrow a\times c)^{\downarrow F}\quad. @@ -3862,15 +3853,14 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-co-pr The construction $P^{A}\triangleq H^{A}\rightarrow G^{A}$ for applicative contrafunctors has no analog for applicative \emph{functors}. Exercise~\ref{subsec:Exercise-function-type-construction-not-applicative} shows simple examples where a function type construction fails to -produce applicative functors. However, the function type construction -works for a wide class of applicative contrafunctors: +produce applicative functors. However, this construction works for +a wide class of applicative contrafunctors: \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-exponential}\ref{subsec:Statement-applicative-contrafunctor-exponential}} -If $G^{\bullet}$ is an applicative contrafunctor and $H^{\bullet}$ -is \emph{any functor} then the contrafunctor $P^{A}\triangleq H^{A}\rightarrow G^{A}$ -is applicative. If $G^{\bullet}$ is commutative then $P^{\bullet}$ -is also commutative. +If $G$ is an applicative contrafunctor and $H$ is \emph{any functor} +then the contrafunctor $P^{A}\triangleq H^{A}\rightarrow G^{A}$ is +applicative. If $G$ is commutative then $P$ is also commutative. \subparagraph{Proof} @@ -3911,8 +3901,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-expon & =\text{zip}_{G}(p(h)\times\text{wu}_{G})\triangleright\text{iru}^{\downarrow G}=p(h)\quad. \end{align*} -To verify the associativity law, we use properties such as $\varepsilon_{12,3}\bef\pi_{2}=\pi_{3}$ -and so on: +To verify the associativity law, we use properties such as $\varepsilon_{12,3}\bef\pi_{2}=\pi_{3}$, +etc.: \begin{align*} & h^{:H^{A\times B\times C}}\triangleright\tilde{\varepsilon}_{1,23}^{\uparrow H}\triangleright\text{zip}_{P}(p\times\text{zip}_{P}(q\times r))\triangleright\tilde{\varepsilon}_{1,23}^{\downarrow G}\\ & \quad=\text{zip}_{G}\big(p(h\triangleright\gunderline{\tilde{\varepsilon}_{1,23}^{\uparrow H}\bef\pi_{1}^{\uparrow H}})\times\text{zip}_{P}(q\times r)(h\triangleright\gunderline{\tilde{\varepsilon}_{1,23}^{\uparrow H}\bef\pi_{2}^{\uparrow H}})\big)\triangleright\tilde{\varepsilon}_{1,23}^{\downarrow G}\\ @@ -3924,8 +3914,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-contrafunctor-expon The two sides are now equal due to the assumed associativity law of $\text{zip}_{G}$. -It remains to verify the commutativity law, assuming that $\text{zip}_{G}$ -satisfies that law: +It remains to verify the commutativity law when $\text{zip}_{G}$ +obeys that law: \begin{align*} & \text{zip}_{P}(q\times p)=\Delta\bef(\pi_{1}^{\uparrow H}\boxtimes\pi_{2}^{\uparrow H})\bef(q\boxtimes p)\bef\text{zip}_{G}\quad,\\ & \text{zip}_{P}(p\times q)\triangleright\text{swap}^{\downarrow P}=\text{swap}^{\uparrow H}\bef\Delta\bef(\pi_{1}^{\uparrow H}\boxtimes\pi_{2}^{\uparrow H})\bef(p\boxtimes q)\bef\gunderline{\text{zip}_{G}\bef\text{swap}^{\downarrow G}}\\ @@ -3949,8 +3939,9 @@ \subsection{Applicative profunctors: Laws and constructions} constructor $P^{X,Y}$ a profunctor when it is contravariant in $X$ and covariant in $Y$. Given such a profunctor $P^{X,Y}$, we can set $X=Y$ and obtain a type constructor $Q^{A}\triangleq P^{A,A}$, -which is neither covariant nor contravariant in $A$. This new type -constructor ($Q$) is also called a \textsf{``}profunctor\textsf{''} for brevity. +which is neither covariant nor contravariant in $A$. In this book, +such type constructors $Q$ are also called \textsf{``}profunctors\textsf{''} for +brevity. When a given type constructor $Q$ is fully parametric, we can always separate the covariant and the contravariant occurrences of the type @@ -3975,7 +3966,7 @@ \subsection{Applicative profunctors: Laws and constructions} values of type $U^{A}$ cannot be created for arbitrary type parameter $A$ (only for $A=\text{Int}$ or $A=\text{Long}$ or $A=\bbnum 1$). This prevents us from implementing any \lstinline!map!-like methods -for $U^{\bullet}$. +for $U$. Profunctors have an \lstinline!xmap! method instead of a \lstinline!map! method: @@ -3993,8 +3984,8 @@ \subsection{Applicative profunctors: Laws and constructions} \subsubsection{Definition \label{subsec:Definition-applicative-profunctor}\ref{subsec:Definition-applicative-profunctor}} -A profunctor $P^{\bullet}$ is \textbf{applicative} if there exist -methods \lstinline!zip! and \lstinline!wu! such that: +A profunctor $P$ is \textbf{applicative} if there exist methods \lstinline!zip! +and \lstinline!wu! such that: \begin{align*} & \text{zip}_{P}:P^{A}\times P^{B}\rightarrow P^{A\times B}\quad,\quad\quad\text{zip}_{P}(p\times\text{zip}_{P}(q\times r))\cong\text{zip}_{P}(\text{zip}_{P}(p\times q)\times r)\quad,\\ & \text{wu}_{P}:P^{\bbnum 1}\quad,\quad\text{zip}_{P}(\text{wu}_{P}\times p)\cong p\quad,\quad\quad\text{zip}_{P}(p\times\text{wu}_{P})\cong p\quad. @@ -4020,9 +4011,8 @@ \subsubsection{Definition \label{subsec:Definition-applicative-profunctor}\ref{s It is important that the \lstinline!pure! method ($\text{pu}_{P}$) for profunctors is defined via the wrapped unit ($\text{wu}_{P}$). The presence of a value $\text{wu}_{P}:P^{\bbnum 1}$ means that the -profunctor $P^{\bullet}$ is pointed\index{profunctor!pointed} (see -Section~\ref{subsec:Pointed-functors-motivation-equivalence}). For -functors and contrafunctors, the naturality law of \lstinline!pure! +profunctor $P$ is pointed\index{profunctor!pointed} (see Section~\ref{subsec:Pointed-functors-motivation-equivalence}). +For functors and contrafunctors, the naturality law of \lstinline!pure! is enough to enforce the equivalence of \lstinline!pure! and \lstinline!wu!. For profunctors, however, the value $\text{wu}_{P}:P^{\bbnum 1}$ is \emph{not} equivalent to the type of fully parametric functions @@ -4040,14 +4030,14 @@ \subsubsection{Example \label{subsec:Example-profunctor-pure-not-equivalent}\ref We can rewrite the type of $\text{wu}_{P}$ equivalently as $P^{\bbnum 1}=\left(\bbnum 1\rightarrow\bbnum 1\right)\rightarrow\bbnum 1\cong\bbnum 1$. So, there is only one value of this type (a function that ignores its argument and always returns the unit value $1$). The corresponding -\lstinline!pure! method is a function that ignores its argument always -returns the given value: +\lstinline!pure! method is a function that ignores its argument and +always returns the given value: \[ \text{pu}_{P}\triangleq a^{:A}\rightarrow\_^{:A\rightarrow A}\rightarrow a\quad. \] But there are many more functions with the same type signature as -$\text{pu}_{P}$. To see this, it is convenient swap the curried arguments -of $\text{pu}_{P}$ and obtain an equivalent type: +$\text{pu}_{P}$. To see this, we swap the curried arguments of $\text{pu}_{P}$ +and obtain an equivalent type: \[ A\rightarrow P^{A}=A\rightarrow\left(A\rightarrow A\right)\rightarrow A\cong\left(A\rightarrow A\right)\rightarrow A\rightarrow A\quad. \] @@ -4074,10 +4064,10 @@ \subsubsection{Example \label{subsec:Example-profunctor-pure-not-equivalent}\ref \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-composition}\ref{subsec:Statement-applicative-profunctor-composition}} -If $F^{\bullet}$ is an applicative \emph{functor} and $G^{\bullet}$ -is an applicative profunctor then the profunctor $P^{A}\triangleq F^{G^{A}}$ -(equivalently written as $P\triangleq F\circ G$) is applicative. -If both $F$ and $G$ are commutative then so is $P$. +If $F$ is an applicative \emph{functor} and $G$ is an applicative +profunctor then the profunctor $P^{A}\triangleq F^{G^{A}}$ (equivalently +written as $P\triangleq F\circ G$) is applicative. If both $F$ and +$G$ are commutative then so is $P$. \subparagraph{Proof} @@ -4085,9 +4075,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-composit \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-product}\ref{subsec:Statement-applicative-profunctor-product}} -If $G^{\bullet}$ and $H^{\bullet}$ are applicative profunctors then -so is $P^{A}\triangleq G^{A}\times H^{A}$. If both $G$ and $H$ -are commutative then so is $P$. +If $G$ and $H$ are applicative profunctors then so is $P^{A}\triangleq G^{A}\times H^{A}$. +If both $G$ and $H$ are commutative then so is $P$. \subparagraph{Proof} @@ -4154,9 +4143,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-product} \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-product-1}\ref{subsec:Statement-applicative-profunctor-co-product-1}} -If $F^{\bullet}$ is an applicative profunctor and $Z$ is a fixed -monoid type then the profunctor $P^{A}\triangleq Z+F^{A}$ is also -applicative: +If $F$ is an applicative profunctor and $Z$ is a fixed monoid type +then the profunctor $P^{A}\triangleq Z+F^{A}$ is also applicative: \begin{align*} & \text{zip}_{P}:(Z+F^{A})\times(Z+F^{B})\rightarrow Z+F^{A\times B}\quad,\\ & \text{zip}_{P}\triangleq\,\begin{array}{|c||cc|} @@ -4168,8 +4156,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ \end{array}\quad. \end{align*} The method $\text{wu}_{P}:Z+F^{\bbnum 1}$ is defined by $\text{wu}_{P}\triangleq\bbnum 0^{:Z}+\text{wu}_{F}$. -If $Z$ is a commutative monoid and $F^{\bullet}$ is commutative -then $P^{\bullet}$ is also commutative. +If $Z$ is a commutative monoid and $F$ is commutative then $P$ +is also commutative. \subparagraph{Proof} @@ -4234,7 +4222,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ {\color{greenunder}\text{right-hand side}:}\quad & \text{zip}_{P}\big(\text{zip}_{P}(p^{:Z+F^{A}}\times q^{:Z+F^{B}})\times r^{:Z+F^{C}}\big)\triangleright\varepsilon_{12,3}^{\uparrow P}\tilde{\varepsilon}_{12,3}^{\downarrow P}\quad. \end{align*} Since each of the arguments $p$, $q$, $r$ may be in one of the -two parts of the disjunction type $Z+F^{\bullet}$, we have 8 cases. +two parts of the disjunctive types $Z+F^{...}$, we have 8 cases. We note, however, that the code of $\text{zip}_{P}(p\times q)$ will return a value of type $Z+\bbnum 0$ whenever at least one of the arguments ($p$, $q$) is of type $Z+\bbnum 0$. So, a composition @@ -4243,7 +4231,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ $r$) is of type $Z+\bbnum 0$. It remains to consider only two cases: \textbf{(1)} At least one of $p$, $q$, $r$ is of type $Z+\bbnum 0$. -In this case, any arguments of type $\bbnum 0+F^{\bullet}$ are ignored +In this case, any arguments of types $\bbnum 0+F^{...}$ are ignored by $\text{zip}_{P}$, while the arguments of type $Z+\bbnum 0$ are combined using the monoid $Z$\textsf{'}s binary operation ($\oplus$). So, the result of the \lstinline!zip! operation is the same if we replace @@ -4257,7 +4245,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ and the function $\text{zip}_{P}$ reduces to the operation $\oplus$, for which the associativity law holds by assumption. -\textbf{(2)} All of $p$, $q$, $r$ are of type $\bbnum 0+F^{\bullet}$. +\textbf{(2)} All of $p$, $q$, $r$ are of types $\bbnum 0+F^{...}$. In this case, $\text{zip}_{P}$ reduces to $\text{zip}_{F}$, which satisfies the associativity law by assumption. @@ -4308,13 +4296,14 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ \end{array}\quad. \] The difference between the sides disappears if $Z$ is a commutative -monoid ($z_{1}\oplus z_{2}=z_{2}\oplus z_{1}$). $\square$ +monoid: in that case, we have $z_{1}\oplus z_{2}=z_{2}\oplus z_{1}$. +$\square$ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-product-2}\ref{subsec:Statement-applicative-profunctor-co-product-2}} -If $F^{\bullet}$ and $H^{\bullet}$ are applicative profunctors and -$H^{\bullet}$ is co-pointed with the method $\text{ex}_{F}:H^{A}\rightarrow A$ -such that the compatibility law~(\ref{eq:compatibility-law-of-extract-and-zip}) +If $F$ and $H$ are applicative profunctors and $H$ is co-pointed +with the method $\text{ex}_{F}:H^{A}\rightarrow A$ such that the +compatibility law~(\ref{eq:compatibility-law-of-extract-and-zip}) holds, then $P^{A}\triangleq H^{A}+F^{A}$ is also applicative: \begin{align*} & \text{zip}_{P}:(H^{A}+F^{A})\times(H^{B}+F^{B})\rightarrow H^{A\times B}+F^{A\times B}\quad,\\ @@ -4328,13 +4317,12 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ \end{align*} The method $\text{wu}_{P}:H^{\bbnum 1}+F^{\bbnum 1}$ is defined by $\text{wu}_{P}\triangleq\text{wu}_{H}+\bbnum 0^{:F^{\bbnum 1}}$. -If $F^{\bullet}$ and $H^{\bullet}$ are commutative then $P^{\bullet}$ -is also commutative. +If $F$ and $H$ are commutative then $P$ is also commutative. \subparagraph{Proof} -We follow the proof of Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative}. -The lifting to $P^{\bullet}$ is defined by: +Follow the proof of Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative}. +The lifting to $P$ is defined by: \[ (f^{:A\rightarrow B})^{\uparrow P}(g^{:B\rightarrow A})^{\downarrow P}\triangleq\,\begin{array}{|c||cc|} & H^{B} & F^{B}\\ @@ -4399,7 +4387,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ The associativity law is an equation between values of type $H^{A\times B\times C}+F^{A\times B\times C}$: \begin{align*} & \text{zip}_{P}(p^{:H^{A}+F^{A}}\times\text{zip}_{P}(q^{:H^{B}+F^{B}}\times r^{:H^{C}+F^{C}}))\triangleright\varepsilon_{1,23}^{\uparrow P}\tilde{\varepsilon}_{1,23}^{\downarrow P}\\ - & \quad=\text{zip}_{P}(\text{zip}_{P}(p\times q)\times r)\triangleright\varepsilon_{12,3}^{\uparrow P}\tilde{\varepsilon}_{12,3}^{\downarrow P}\quad. + & \quad\overset{?}{=}\text{zip}_{P}(\text{zip}_{P}(p\times q)\times r)\triangleright\varepsilon_{12,3}^{\uparrow P}\tilde{\varepsilon}_{12,3}^{\downarrow P}\quad. \end{align*} The operation $\text{zip}_{P}(p\times q)$ is defined in such a way that it returns a value of type $H^{A\times B}+\bbnum 0$ only when @@ -4423,9 +4411,9 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ \textbf{(2)} The argument $q$ has type $\bbnum 0+F^{B}$. In this case, $\text{zip}_{P}$ reduces to $\text{zip}_{F}$ after converting -arguments of type $H^{\bullet}+0$ to type $F^{\bullet}$ when needed. -We may define this conversion as a helper function \lstinline!toF! -in the same way as in the proof of Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative}. +arguments of types $H^{...}+0$ to types $F^{...}$ when needed. We +may define this conversion as a function \textsf{``}\lstinline!toF!\textsf{''} as +in the proof of Statement~\ref{subsec:Statement-co-product-with-co-pointed-applicative}. The associativity law of $\text{zip}_{P}$ is then reduced to the same law of $\text{zip}_{F}$: \begin{align*} @@ -4489,7 +4477,7 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ It remains to verify the commutativity law in case that law holds for $F$ and $H$: \[ -\text{swap}\bef\text{zip}_{F}\overset{!}{=}\text{zip}_{F}\bef\text{swap}^{\uparrow F}\text{swap}^{\downarrow F}\quad,\quad\text{swap}\bef\text{zip}_{H}\overset{!}{=}\text{zip}_{H}\bef\text{swap}^{\uparrow H}\text{swap}^{\downarrow H}\quad\quad. +\text{swap}\bef\text{zip}_{F}\overset{!}{=}\text{zip}_{F}\bef\text{swap}^{\uparrow F}\text{swap}^{\downarrow F}\quad,\quad\text{swap}\bef\text{zip}_{H}\overset{!}{=}\text{zip}_{H}\bef\text{swap}^{\uparrow H}\text{swap}^{\downarrow H}\quad. \] Begin with the left-hand side of the commutativity law for $\text{zip}_{P}$: \[ @@ -4544,9 +4532,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-co-produ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-exponential}\ref{subsec:Statement-applicative-profunctor-exponential}} -If $G^{\bullet}$ is an applicative profunctor and $H^{\bullet}$ -is \emph{any functor} then the profunctor $P^{A}\triangleq H^{A}\rightarrow G^{A}$ -is applicative. +If $G$ is an applicative profunctor and $H$ is \emph{any functor} +then the profunctor $P^{A}\triangleq H^{A}\rightarrow G^{A}$ is applicative. \subparagraph{Proof} @@ -4612,9 +4599,8 @@ \subsubsection{Statement \label{subsec:Statement-applicative-profunctor-exponent Note that the functor $H$ must be \emph{covariant} in the last construction ($P^{A}\triangleq H^{A}\rightarrow G^{A}$). Otherwise, the profunctor -$P$ will not be applicative even the simple cases such as $P^{A}\triangleq H^{A}\rightarrow A$ -where $H$ is an arbitrary profunctor. (We omit the proof of that -statement.) +$P$ will not be applicative even the simple cases such as $P^{A}\triangleq H^{A}\rightarrow A$. +(We omit the proof of that statement.) \section{Discussion and further developments} @@ -4718,8 +4704,7 @@ \subsection{Equivalence of typeclass methods with laws} we could define a function $p$ like this: \begin{lstlisting} def p[A]: A => A = { - case a: Int => (a + 123) - .asInstanceOf[A] + case a: Int => (a + 123).asInstanceOf[A] case a => a } \end{lstlisting} @@ -4764,8 +4749,8 @@ \subsection{Equivalence of typeclass methods with laws} type parameters while \lstinline!map2! has three; \lstinline!flatten! has one type parameter while \lstinline!flatMap! has two. For this reason, we have systematically derived the laws of all the equivalent -typeclass methods. In many cases, we found a formulation of the laws -that was either conceptually simpler or more straightforward to verify. +typeclass methods. In many cases, we found formulations of the laws +that are simpler and more straightforward to verify. \subsection{Relationship between monads and applicative functors} @@ -4774,11 +4759,11 @@ \subsection{Relationship between monads and applicative functors} we may define the \lstinline!map2! method via \lstinline!map! and \lstinline!flatMap! in two ways (which will give the same code if the monad is commutative). The \lstinline!map2! methods defined in -this way will have the right type signature and will satisfy the laws -of applicative functors. This is due to the fact that the laws of -\lstinline!map2! are derived (as shown in Section~\ref{subsec:Motivation-for-the-laws-of-map2}) -from the monad laws precisely by considering the \lstinline!map2! -method defined via the monad\textsf{'}s \lstinline!flatMap!. +this way will have the right type signature and will satisfy the applicative +laws. This is due to the fact that those laws are derived (as shown +in Section~\ref{subsec:Motivation-for-the-laws-of-map2}) from the +monad laws precisely by considering the \lstinline!map2! method defined +via the monad\textsf{'}s \lstinline!flatMap!. However, in many cases we need to define the \lstinline!map2! method in a different way because expressing \lstinline!map2! via \lstinline!flatMap! @@ -4788,21 +4773,21 @@ \subsection{Relationship between monads and applicative functors} the standard \lstinline!flatMap! methods of those functors. So, it is rarely useful to define an \lstinline!Applicative! typeclass -instance automatically for all monads. In most cases, we need to define -the applicative instance separately from the monad instance. (Automatic -derivation of \lstinline!Applicative! instances is made difficult -also by the fact that many type constructors will admit more than -one lawful implementation of \lstinline!map2!.) +instance automatically for a given monad. In many cases, we need to +define the applicative instance separately from the monad instance. +Automatic derivation of \lstinline!Applicative! instances is difficult +also because many type constructors admit several lawful implementations +of \lstinline!map2!. In addition, some applicative functors are not monads. We have shown in Section~\ref{subsec:Constructions-of-applicative-functors} that a lawful \lstinline!Applicative! instance exists for all polynomial -functors with monoidal fixed types. (Accordingly, all our examples -of non-applicative functors involve non-polynomial functors.) This -does not hold for monads; not all polynomial functors are monadic.\footnote{It is unknown how to characterize or enumerate all polynomial functors -that are monads (see Problems~\ref{par:Problem-monads}\textendash \ref{par:Problem-monads-1}).} Example~\ref{subsec:Example-applicative-not-monad} shows a simple -applicative functor ($L^{A}\triangleq\bbnum 1+A\times A$) that \emph{cannot} -have a lawful monad implementation (Exercise~\ref{subsec:Exercise-1-monads-7-not-a-monad}). +functors with monoidal fixed types. (All our examples of non-applicative +functors involve non-polynomial functors.) But we have seen that not +all polynomial functors are monads.\footnote{It is unknown how to characterize or enumerate all polynomial functors +that are monads (see Problems~\ref{par:Problem-monads-1}\textendash \ref{par:Problem-monads}).} Example~\ref{subsec:Example-applicative-not-monad} shows a simple +applicative functor ($L^{A}\triangleq\bbnum 1+A\times A$) that does +\emph{not} have a lawful monad implementation (Exercise~\ref{subsec:Exercise-1-monads-7-not-a-monad}). We have also seen that the \lstinline!zip! and \lstinline!wu! operations exist for some type constructors that are not covariant. We conclude @@ -4818,7 +4803,7 @@ \subsection{Applicative morphisms\label{subsec:Applicative-morphisms}} as $\text{ex}_{H}^{A}:H^{A}\rightarrow\text{Id}^{A}$, where $\text{Id}^{A}\triangleq A$ is the identity functor (which is also applicative). Viewed in this way, the \lstinline!extract! method is an example of a mapping between -two applicative functors ($H$ and $\text{Id}$). So, the compatibility +two applicative functors ($H$ and $\text{Id}$). The compatibility law requires that the operation $\text{zip}_{H}$ be mapped to the tupling operation, which is the same as the \lstinline!zip! operation of the identity functor: $\text{zip}_{\text{Id}}(a\times b)=a\times b$. @@ -4840,8 +4825,8 @@ \subsection{Applicative morphisms\label{subsec:Applicative-morphisms}} law}: $\text{pu}_{H}\bef\phi=\text{pu}_{K}$.\index{identity laws!of applicative morphisms} Functions $\phi$ satisfying these two laws are called \textbf{applicative morphisms}\index{applicative morphism} between two applicative functors -$H$ and $K$. The two laws make applicative morphisms fully analogous -to monad morphisms (Section~\ref{subsec:Monads-in-category-theory-monad-morphisms}). +$H$ and $K$. The two laws display the similarity between applicative +morphisms and monad morphisms (Section~\ref{subsec:Monads-in-category-theory-monad-morphisms}). \subsection{Deriving the laws of \texttt{ap} using category theory} @@ -4869,14 +4854,14 @@ \subsection{Deriving the laws of \texttt{ap} using category theory} A categorical functor can exist only between two categories. So, we need to show that values of type $L^{A\rightarrow B}$ (\textsf{``}wrapped functions\textsf{''}) can play the role of morphisms in a suitably defined -category. To define that category, we need to produce objects, morphisms, +category. To define that category, we need to define objects, morphisms, the identity morphism, and the composition operation, and prove their laws. Finally, we will need to prove that \lstinline!ap! satisfies the identity and composition laws appropriate for a (categorical) functor. -This section will follow these considerations in order to derive and -verify the laws of \lstinline!ap!. +This section will f derive and verify the laws of \lstinline!ap! +in that way. The first step is to define two categories between which we will establish a categorical functor. The objects of both categories are ordinary @@ -4910,8 +4895,8 @@ \subsubsection{Statement \label{subsec:Statement-ap-category-laws}\ref{subsec:St {\color{greenunder}\text{definition (a) of }\odot:}\quad & p^{:L^{A\rightarrow B}}\odot q^{:L^{B\rightarrow C}}\triangleq(p\times q)\triangleright\text{zip}\triangleright(f^{:A\rightarrow B}\times g^{:B\rightarrow C}\rightarrow f\bef g)^{\uparrow L}\quad,\\ {\color{greenunder}\text{definition (b) of }\odot:}\quad & p^{:L^{A\rightarrow B}}\odot q^{:L^{B\rightarrow C}}\triangleq(q\times p)\triangleright\text{zip}\triangleright(g^{:B\rightarrow C}\times f^{:A\rightarrow B}\rightarrow f\bef g)^{\uparrow L}\quad. \end{align*} -Moreover, the special \textsf{``}wrapped identity\textsf{''} value, \lstinline!wid!, -of type $L^{A\rightarrow A}$, is defined by $\text{wid}_{L}^{A}\triangleq\text{pu}_{L}(\text{id}^{A})$. +Moreover, the special \textsf{``}wrapped identity\textsf{''} value (\lstinline!wid!$:L^{A\rightarrow A}$) +is defined by $\text{wid}_{L}^{A}\triangleq\text{pu}_{L}(\text{id}^{A})$. The following properties then hold for each of the two definitions:\index{identity laws!of applicative composition}\index{associativity law!of applicative composition} \begin{align*} {\color{greenunder}\text{left identity law of }\odot:}\quad & \text{wid}_{L}^{A}\odot p^{:L^{A\rightarrow B}}=p\quad,\\ @@ -4972,7 +4957,7 @@ \subsubsection{Statement \label{subsec:Statement-ap-category-laws}\ref{subsec:St & \big((f\times g)\times h\rightarrow f\bef g\bef h\big)=\varepsilon_{12,3}\bef(f\times g\times h\rightarrow f\bef g\bef h)\quad,\\ & \big(f\times(g\times h)\rightarrow f\bef g\bef h\big)=\varepsilon_{1,23}\bef(f\times g\times h\rightarrow f\bef g\bef h)\quad. \end{align*} -Using these equations, we show that the two sides of the associativity +Using these equations, we find that the two sides of the associativity law of $\odot$ are equal: \begin{align*} {\color{greenunder}\text{left-hand side}:}\quad & \gunderline{\text{zip}\left(\text{zip}\left(p\times q\right)\times r\right)\triangleright\varepsilon_{12,3}}\bef(f\times g\times h\rightarrow f\bef g\bef h)\\ @@ -5022,7 +5007,7 @@ \subsubsection{Statement \label{subsec:Statement-ap-category-laws}\ref{subsec:St & \big(h\times(g\times f)\rightarrow f\bef g\bef h\big)=\varepsilon_{1,23}\bef(h\times g\times f\rightarrow f\bef g\bef h)\quad,\\ & \big((h\times g)\times f\rightarrow f\bef g\bef h\big)=\varepsilon_{12,3}\bef(h\times g\times f\rightarrow f\bef g\bef h)\quad. \end{align*} -Using these equations, we show that the two sides of the associativity +Using these equations, we find that the two sides of the associativity law of $\odot$ are equal: \begin{align*} {\color{greenunder}\text{left-hand side}:}\quad & \gunderline{\text{zip}\left(r\times\text{zip}\left(q\times p\right)\right)\triangleright\varepsilon_{1,23}}\bef(h\times g\times f\rightarrow f\bef g\bef h)^{\uparrow L}\\ @@ -5093,12 +5078,11 @@ \subsubsection{Statement \label{subsec:Statement-ap-functor-laws}\ref{subsec:Sta To verify the composition law~(\ref{eq:composition-law-of-ap}), apply both sides to an arbitrary value $r^{:L^{A}}$: \begin{align*} -{\color{greenunder}\quad\text{left-hand side}:}\quad & r\triangleright\text{ap}\big(p^{:L^{A\rightarrow B}}\odot q^{:L^{B\rightarrow C}}\big)=\text{ap}\,(p\odot q)(r)\\ - & =((p\odot q)\times r)\triangleright\text{zip}\bef\text{eval}^{\uparrow L} +{\color{greenunder}\text{left-hand side}:}\quad & r\triangleright\text{ap}\big(p^{:L^{A\rightarrow B}}\odot q^{:L^{B\rightarrow C}}\big)=\text{ap}\,(p\odot q)(r)\\ + & =((p\odot q)\times r)\triangleright\text{zip}\bef\text{eval}^{\uparrow L}\\ +{\color{greenunder}\text{definition (b) of }\odot:}\quad & =\big(\big(\text{zip}\left(q\times p\right)\triangleright\gunderline{(h\times g\rightarrow g\bef h)^{\uparrow L}}\big)\times r\big)\triangleright\gunderline{\text{zip}}\bef\text{eval}^{\uparrow L} \end{align*} \begin{align*} - & \quad{\color{greenunder}\text{definition (b) of }\odot:}\quad\\ - & =\big(\big(\text{zip}\left(q\times p\right)\triangleright\gunderline{(h\times g\rightarrow g\bef h)^{\uparrow L}}\big)\times r\big)\triangleright\gunderline{\text{zip}}\bef\text{eval}^{\uparrow L}\\ & \quad{\color{greenunder}\text{naturality law of }\text{zip}:}\quad\\ & =\big(\text{zip}\left(q\times p\right)\times r\big)\triangleright\text{zip}\triangleright\gunderline{((h\times g)\times a\rightarrow(g\bef h)\times a)^{\uparrow L}\bef\text{eval}^{\uparrow L}}\\ & \quad{\color{greenunder}\text{composition under }^{\uparrow L}:}\quad\\ @@ -5193,7 +5177,7 @@ \subsection{The pattern of \textquotedblleft functorial\textquotedblright{} type \hline {\footnotesize{}contrafilterable} & {\footnotesize{}$B\rightarrow\bbnum 1+A$} & {\footnotesize{}$\text{liftOpt}:\left(B\rightarrow\bbnum 1+A\right)\rightarrow(F^{A}\rightarrow F^{B})$}\tabularnewline \hline -{\footnotesize{}comonad} & {\footnotesize{}$F^{A}\rightarrow B$} & {\footnotesize{}$\text{coflm}:(F^{A}\rightarrow B)\rightarrow(F^{A}\rightarrow F^{B})$}\tabularnewline +{\footnotesize{}comonad\index{comonad}} & {\footnotesize{}$F^{A}\rightarrow B$} & {\footnotesize{}$\text{coflm}:(F^{A}\rightarrow B)\rightarrow(F^{A}\rightarrow F^{B})$}\tabularnewline \hline \end{tabular} \par\end{centering} @@ -5340,7 +5324,7 @@ \subsubsection{Exercise \label{subsec:Exercise-additional-law-of-ap}\ref{subsec: \subsubsection{Exercise \label{subsec:Exercise-applicative-of-monoid-is-monoid}\ref{subsec:Exercise-applicative-of-monoid-is-monoid}} Show that $P^{S}$ is a monoid if $S$ is a fixed monoidal type and -$P^{\bullet}$ is any applicative functor, contrafunctor, or profunctor. +$P$ is any applicative functor, contrafunctor, or profunctor. \subsubsection{Exercise \label{subsec:Exercise-applicative-II-1}\ref{subsec:Exercise-applicative-II-1}} @@ -5372,9 +5356,9 @@ \subsubsection{Exercise \label{subsec:Exercise-applicative-II-5}\ref{subsec:Exer \subsubsection{Exercise \label{subsec:Exercise-applicative-II-7}\ref{subsec:Exercise-applicative-II-7}} Show that the recursive functor $F^{A}\triangleq\bbnum 1+G^{A\times F^{A}}$ -is applicative if $G^{A}$ is applicative and $\text{wu}_{F}$ is -defined recursively as $\text{wu}_{F}\triangleq\bbnum 0+\text{pu}_{G}\left(1\times\text{wu}_{F}\right)$. -Use applicative functor constructions. +is applicative if $\text{wu}_{F}$ is defined recursively as $\text{wu}_{F}\triangleq\bbnum 0+\text{pu}_{G}\left(1\times\text{wu}_{F}\right)$, +assuming $G$ is an applicative functor. (Use applicative functor +constructions.) \subsubsection{Exercise \label{subsec:Exercise-applicative-profunctor-composition}\ref{subsec:Exercise-applicative-profunctor-composition}} @@ -5387,26 +5371,26 @@ \subsubsection{Exercise \label{subsec:Exercise-profunctor-example}\ref{subsec:Ex Q^{A}\triangleq\left(A\rightarrow\text{Int}\right)\times A\times\left(A\rightarrow A\right)\quad. \] Show that $Q^{A}$ is neither covariant nor contravariant in $A$, -and express $Q^{A}$ via a profunctor. Is $Q^{A}$ applicative? +and express $Q^{A}$ via a profunctor. Is $Q$ applicative? \subsubsection{Exercise \label{subsec:Exercise-applicative-II-11}\ref{subsec:Exercise-applicative-II-11}} -\textbf{(a)} For any given profunctor $P^{A}$, implement a function -of type $A\times P^{B}\rightarrow P^{A\times B}$. +\textbf{(a)} For any given profunctor $P$, implement a function of +type $A\times P^{B}\rightarrow P^{A\times B}$. -\textbf{(b)} Show that, for some profunctors $P^{A}$, one \emph{cannot} +\textbf{(b)} Show that, for some profunctors $P$, one \emph{cannot} implement a fully parametric function of type $A\times P^{B}\rightarrow P^{A}$. \subsubsection{Exercise \label{subsec:Exercise-applicative-II-10}\ref{subsec:Exercise-applicative-II-10}} Implement profunctor and applicative instances for $P^{A}\triangleq A+Z\times G^{A}$ -where $G^{A}$ is a given applicative profunctor and $Z$ is a monoid. +where $G$ is a given applicative profunctor and $Z$ is a monoid. \subsubsection{Exercise \label{subsec:Exercise-profunctor-pure-not-equivalent-1}\ref{subsec:Exercise-profunctor-pure-not-equivalent-1}} For the profunctor $P^{A}\triangleq A\rightarrow A$: -\textbf{(a)} Show that $P^{A}$ is pointed: there exist a value \lstinline!wu! +\textbf{(a)} Show that $P$ is pointed: there exist a value \lstinline!wu! of type $P^{\bbnum 1}$ and a method \lstinline!pure! of type $A\rightarrow P^{A}$. \textbf{(b)} Show that the type of fully parametric functions $\text{pu}_{P}:A\rightarrow P^{A}$ diff --git a/sofp-src/tex/sofp-back-cover-no-bg.tex b/sofp-src/tex/sofp-back-cover-no-bg.tex index 36cd9e17a..61d00eb6d 100644 --- a/sofp-src/tex/sofp-back-cover-no-bg.tex +++ b/sofp-src/tex/sofp-back-cover-no-bg.tex @@ -22,13 +22,13 @@ analysis, and code for functors, monads, and other typeclasses based on exponential-polynomial data types; techniques of symbolic derivation and proof; free typeclass constructions; and -parametricity theorems. +practical applications of parametricity. Long and difficult, yet boring explanations are logically -developed in excruciating detail through 1893 +developed in excruciating detail through 1892 Scala code snippets, 191 statements with step-by-step derivations, 103 diagrams, 223 examples -with tested Scala code, and 300 exercises. Discussions +with tested Scala code, and 308 exercises. Discussions build upon each chapter\textsf{'}s material further. Beginners in FP will find tutorials about the \texttt{map}/\texttt{reduce} diff --git a/sofp-src/tex/sofp-curry-howard.tex b/sofp-src/tex/sofp-curry-howard.tex index 044aae7e5..1e3f08c62 100644 --- a/sofp-src/tex/sofp-curry-howard.tex +++ b/sofp-src/tex/sofp-curry-howard.tex @@ -186,7 +186,7 @@ \subsection{Motivation and outlook\label{subsec:Motivation-and-outlook}} is equivalent to \textsf{``}${\cal CH}(A)$ is true assuming ${\cal CH}(X)$, ${\cal CH}(Y)$, ..., ${\cal CH}(Z)$ are true\textsf{''}. In mathematical logic, a statement of this form is called a \textbf{sequent} and\index{sequent (in logic)} -is denoted using the symbol $\vdash$ (called the \textsf{``}\textbf{turnstile}\textsf{''}):\index{0@$\vdash$ (turnstile) symbol}\index{turnstile (vdash) symbol@turnstile ($\vdash$) symbol} +is denoted using the symbol $\vdash$ (called the \textsf{``}\textbf{turnstile}\textsf{''}):\index{$\triangleright$@$\vdash$ (turnstile) symbol}\index{turnstile (vdash) symbol@turnstile ($\vdash$) symbol} \begin{align} {\color{greenunder}\text{sequent}:}\quad & {\cal CH}(X),{\cal CH}(Y),...,{\cal CH}(Z)\vdash{\cal CH}(A)\quad.\label{eq:ch-example-sequent} \end{align} @@ -1206,7 +1206,7 @@ \subsection{The rules of proof for ${\cal CH}$-propositions\label{subsec:The-rul \[ \frac{\Gamma,\alpha\vdash\beta}{\Gamma\vdash\alpha\Rightarrow\beta}\quad(\text{create function})\quad\quad. \] -The \textbf{turnstile}\index{0@$\vdash$ (turnstile) symbol}\index{turnstile (vdash) symbol@turnstile ($\vdash$) symbol} +The \textbf{turnstile}\index{$\triangleright$@$\vdash$ (turnstile) symbol}\index{turnstile (vdash) symbol@turnstile ($\vdash$) symbol} symbol ($\vdash$) groups weaker than other operators. So, we can write sequents such as $(\Gamma,\alpha)\vdash(\beta\Rightarrow\gamma)$ with fewer parentheses: $\Gamma,\alpha\vdash\beta\Rightarrow\gamma$. @@ -3641,7 +3641,7 @@ \subsubsection{Example \label{subsec:ch-Example-type-identity-5}\ref{subsec:ch-E \] Here we used the symbol $\triangleright$ to separate an argument from a function when the argument is written to the \emph{left} of -the function. The symbol $\triangleright$ (pronounced \textsf{``}pipe\textsf{''}\index{pipe notation}\index{0@$\triangleright$-notation!see \textsf{``}pipe notation\textsf{''}}) +the function. The symbol $\triangleright$ (pronounced \textsf{``}pipe\textsf{''}\index{pipe notation}\index{triangleright-notation@$\triangleright$-notation!see \textsf{``}pipe notation\textsf{''}}) is defined by $x\triangleright f\triangleq f(x)$. In Scala, this operation is available as \lstinline!x.pipe(f)! as of Scala 2.13. diff --git a/sofp-src/tex/sofp-disjunctions.tex b/sofp-src/tex/sofp-disjunctions.tex index a5aaa0d3a..3b2023804 100644 --- a/sofp-src/tex/sofp-disjunctions.tex +++ b/sofp-src/tex/sofp-disjunctions.tex @@ -533,8 +533,8 @@ \subsection{Examples: Pattern matching for disjunctive types\index{examples (wit res0: String = "one real root: 2.0" \end{lstlisting} Each \lstinline!case! pattern will introduce its own pattern variables, -such as \lstinline!r!, \lstinline!x!, \lstinline!y! as in the code -at left. Each pattern variable is defined only within the \emph{local +such as \lstinline!r!, \lstinline!x!, \lstinline!y! in the code +above. Each pattern variable is defined only within the \emph{local scope}\index{local scope}, that is, within the scope of its \lstinline!case! expression. It is impossible to make a mistake where we, say, refer to the variable \lstinline!r! within the code that handles the case diff --git a/sofp-src/tex/sofp-essay2.tex b/sofp-src/tex/sofp-essay2.tex index 1a3e19f78..f51a02b3d 100644 --- a/sofp-src/tex/sofp-essay2.tex +++ b/sofp-src/tex/sofp-essay2.tex @@ -137,14 +137,13 @@ \subsection*{No mathematical formalism guides software development} technical terms they are using (such as \textsf{``}object-oriented\textsf{''} or \textsf{``}module\textsf{'}s responsibilities\textsf{''}). Some of those books\footnote{E.g., \texttt{\href{https://amzn.com/dp/0073376256}{https://amzn.com/dp/0073376256}}} also have almost no program code in them. Some of those books are -written by practitioners such as R.\ C.\ Martin never studied any -formalisms and do not think in terms of formalisms. Instead, they -summarize their programming experience in vaguely formulated heuristic -\textquotedblleft principles\textquotedblright .\footnote{\texttt{\href{https://blog.cleancoder.com/uncle-bob/2016/03/19/GivingUpOnTDD.html}{https://blog.cleancoder.com/uncle-bob/2016/03/19/GivingUpOnTDD.html}}} +written by practitioners such as R.\ C.\ Martin who do not explain +their material in terms of formalisms. Instead, they summarize their +programming experience in heuristically formulated \textquotedblleft principles\textquotedblright .\footnote{\texttt{\href{https://blog.cleancoder.com/uncle-bob/2016/03/19/GivingUpOnTDD.html}{https://blog.cleancoder.com/uncle-bob/2016/03/19/GivingUpOnTDD.html}}} The programmers are told: \textsf{``}code is about detail\textsf{''}, \textsf{``}never abandon the big picture\textsf{''}, \textsf{``}avoid tight coupling in your modules\textsf{''}, \textsf{``}a class must serve a single responsibility\textsf{''}, \textsf{``}strive for good interfaces\textsf{''}, -etc. +and so on. In contrast, textbooks on mechanical or electrical engineering include a significant amount of mathematics. The design of a microwave antenna @@ -176,9 +175,8 @@ \subsection*{No mathematical formalism guides software development} Donald Knuth\textsf{'}s classic textbook \textsf{``}\emph{The Art of Programming}\textsf{''} indeed treats programming as an art and not as a science. Knuth shows -many algorithms and derives their mathematical properties but gives -almost no examples of realistic program code and does not provide -any theory that could guide programmers in actually \emph{writing} +many algorithms and derives their mathematical properties but does +not provide any theory that could guide programmers in actually \emph{writing} programs (say, choosing the data types to be used). Knuth assumes that the reader who understands the mathematical properties of an algorithm will be able \emph{somehow} to write correct code. @@ -213,10 +211,10 @@ \subsection*{No mathematical formalism guides software development} In a sense, program analysis and verification is analogous to writing mathematical equations for the surface of a shoe made by a fashion designer. The resulting \textsf{``}shoe equations\textsf{''} are mathematically rigorous -and can be analyzed or \textsf{``}verified\textsf{''}. But the equations are merely -written after the fact, they do not guide the fashion designers in -actually making shoes. It is understandable that fashion designers -do not study the mathematical theory of surfaces. +and could be analyzed or verified. But the equations are merely written +after the fact, they do not guide the fashion designers in actually +making shoes. It is understandable that fashion designers do not study +the mathematical theory of geometric surfaces. \subsection*{Programmers avoid academic terminology } @@ -259,19 +257,20 @@ \subsection*{Programmers avoid academic terminology } work using artisanal methods, and their education and design processes are typical of a crafts guild. -True software engineering means having a mathematical theory that -guides the process of writing programs, \textemdash{} not just theory -that describes or analyzes programs after they are \emph{somehow} -written. +Software engineering in the proper sense would mean having a mathematical +theory that guides the process of writing programs, \textemdash{} +not just theory that describes or analyzes programs after they are +\emph{somehow} written. -It is not enough that the numerical methods required for physics or -the matrix calculations required for data science are \textsf{``}mathematical\textsf{''}. -These programming tasks are indeed formulated using mathematical theory. +It is true that numerical methods required for physics and matrix +calculations required for data science are \textsf{``}mathematical\textsf{''}. Those +programming tasks are indeed formulated using mathematical theory. However, mathematical \emph{subject matter} (aerospace control, physics -simulations, or statistics) does not mean that mathematics is used -to guide the process of writing code. Data scientists, aerospace engineers, -and physicists almost always work as artisans when converting their -computations into program code. +simulations, or statistics) corresponds to a relatively small part +of written code and does not by itself guide the process of writing +code. Data scientists, aerospace engineers, and physicists almost +always work as artisans when converting their computations into program +code. We expect that software engineers\textsf{'} textbooks should be full of equations and derivations. What theory would those equations represent? @@ -325,8 +324,8 @@ \subsection*{Programmers avoid academic terminology } A recent example of a development in applied functional type theory is the \textsf{``}free applicative functor\textsf{''} construction. It was first described -in a 2014 paper;\footnote{\texttt{\href{https://arxiv.org/pdf/1403.0749.pdf}{https://arxiv.org/pdf/1403.0749.pdf}}} -a couple of years later, a combined free applicative / free monad +in a 2014 paper.\footnote{\texttt{\href{https://arxiv.org/pdf/1403.0749.pdf}{https://arxiv.org/pdf/1403.0749.pdf}}} +A couple of years later, a combined free applicative / free monad data type was designed and its implementation proposed in Scala\footnote{\texttt{\href{https://github.com/typelevel/cats/issues/983}{https://github.com/typelevel/cats/issues/983}}} as well as in Haskell.\footnote{\texttt{\href{https://elvishjerricco.github.io/2016/04/08/applicative-effects-in-free-monads.html}{https://archive.is/kwD2a}}} This technique allows programmers to implement declarative side-effect @@ -373,7 +372,8 @@ \subsection*{Programmers avoid academic terminology } So, it is probably impossible to train as many software engineers in the true sense of the word. Modern computer science courses do not actually train engineers in that sense. Instead, they train academic -researchers who can also work as software artisans and write code. +researchers who will in most cases go on to work as software artisans +writing code. Looking at the situation in construction business in the U.S.A., we find that it employs about $10$ times more construction workers as @@ -386,7 +386,7 @@ \subsection*{Programmers avoid academic terminology } Software practitioners have long bemoaned the permanent state of \textsf{``}crisis\textsf{''} in software development. Code \textsf{``}rots with time\textsf{''}, its complexity grows \textsf{``}out of control\textsf{''}, and operating systems have been notorious -for constantly appearing new security flaws\footnote{\texttt{\href{http://archive.fo/HtQzw}{http://archive.fo/HtQzw}}} +for a steady stream of new security flaws\footnote{\texttt{\href{http://archive.fo/HtQzw}{http://archive.fo/HtQzw}}} despite many thousands of programmers and testers employed. It appears that the growing complexity of software tends to overwhelm the capacity of the human brain for correct \emph{artisanal} programming. @@ -532,33 +532,32 @@ \subsection*{Programmers avoid academic terminology } basic features of Standard ML \textemdash{} immutable polynomial data types, pattern-matching, higher-order functions, and parametric polymorphism with a static type inference \textemdash{} have become standard, so -that many new languages, such as F\#, Scala, Swift, and Rust, include -them, while older languages (Java, C\#, Python) have also added some -of these features. +that many new languages (such as F\#, Scala, Swift, and Rust) include +them by design, while older languages (Java, C\#, Python) have retrofitted +some of these features. \addsec{What is \textquotedblleft declarative programming\textquotedblright} I first encountered the concept of \textsf{``}declarative programming\textsf{''} when -I started studying Haskell and then Prolog. Both languages are claimed -up front to be declarative, as opposed to imperative languages such -as C++ or Java. It was confusing, however, that two languages that -are so different can be both deemed declarative. It was also clear -that Prolog would be quite awkward for, say, numerical calculations, -while Haskell would require a lot of hard-to-read, imperative code -for tasks such as downloading a file from a Web server. The book \textsf{``}Real -World Haskell\textsf{''}\footnote{\texttt{\href{https://amzn.com/dp/B0026OR2FY}{https://amzn.com/dp/B0026OR2FY}}} -shows some examples.\texttt{}\footnote{\texttt{\href{http://book.realworldhaskell.org/read/extended-example-web-client-programming.html}{http://book.realworldhaskell.org/read/extended-example-web-client-programming.html}}} - -So I then tried to understand what people mean by declarative programming. +I started studying Haskell and Prolog. Both languages are claimed +to be declarative, as opposed to imperative languages such as C++ +or Java. It was confusing, however, that two languages that are so +different can be both deemed declarative. It was also clear that Prolog +would be quite awkward for, say, numerical calculations, while Haskell +would require a lot of hard-to-read, imperative code for tasks such +as downloading a file from a Web server. (The book \textsf{``}Real World Haskell\textsf{''}\footnote{\texttt{\href{https://amzn.com/dp/B0026OR2FY}{https://amzn.com/dp/B0026OR2FY}}} +shows some examples.\texttt{}\footnote{\texttt{\href{http://book.realworldhaskell.org/read/extended-example-web-client-programming.html}{http://book.realworldhaskell.org/read/extended-example-web-client-programming.html}}}\texttt{)} + +I tried to understand what people mean by declarative programming. The Wikipedia definition\footnote{\texttt{\href{https://en.wikipedia.org/wiki/Declarative_programming}{https://en.wikipedia.org/wiki/Declarative\_programming}}} essentially says that declarative is \textsf{``}not imperative\textsf{''}, and yet that definition is so vague that one could easily claim that Haskell is imperative while Visual Basic is declarative. Essentially, \textsf{``}declarative\textsf{''} -is understood as a feature of a programming language as a whole, and -any programming language could be argued to be either \textsf{``}declarative\textsf{''} +is understood as a feature of a programming language as a whole, as +if any programming language could be argued to be either \textsf{``}declarative\textsf{''} or not. -I was never satisfied with this definition and kept thinking about +I was never satisfied with that definition and kept thinking about this question until I found a better definition, which I will explain now. @@ -573,11 +572,10 @@ \subsection*{Programmers avoid academic terminology } Haskell to compilers. An important consequence is that the same languages were not suitable -for other problem domains! Prolog was not easily suitable for matrix -multiplication, nor Fortran for expert systems, nor Haskell for GUI -programs. +for other problem domains! Prolog was not suitable for matrix multiplication, +nor Fortran for expert systems, nor Haskell for GUI programs. -Therefore, \textsf{``}declarativeness\textsf{''} is not really a property of a programming +Therefore, \textsf{``}declarativeness\textsf{''} is not a property of a programming language, but a \emph{relation} between a programming language and a problem domain. A language can be declarative for one specific problem domain but not for another. @@ -660,8 +658,8 @@ \subsection*{Programmers avoid academic terminology } require significantly more code, and the code will not syntactically resemble the specification. -By looking at the \textsf{``}silver bullet\textsf{''} examples, we can now formulate -the principle of declarative programming: +By looking at the \textsf{``}silver bullet\textsf{''} examples, we arrive at the following +definition of declarative programming: \begin{quote} \emph{A program is declarative if it syntactically resembles a human-written specification of the required task, expressed in a commonly used specification @@ -680,7 +678,7 @@ \subsection*{Programmers avoid academic terminology } written within the syntactic conventions of a specific programming language. -\addsec{The problem of choosing a specification language} +\addsec{Specification languages} A key question remains: what exactly is a \textsf{``}human-readable specification\textsf{''}? Again, we need to look at known history to understand what specification @@ -706,14 +704,14 @@ \subsection*{Programmers avoid academic terminology } It is safe to say that the task of developing a \textsf{``}good\textsf{''} notation \textemdash{} i.e., an unambiguous, expressive, and yet readable specification -language \textemdash{} for a given problem domain is an extremely -difficult task that may take a long time for newer problem domains. -The main reason for the difficulty is that a successful specification -language must be somehow convenient for human practitioners (whose -detailed behavior, to date, has evaded a formal description). A person -reading a description of a task in a good specification language must -be able to understand the task quickly and should have no further -questions or ambiguities to clarify. +language \textemdash{} for a given problem domain is a difficult task +that may take a long time for newer problem domains. The main reason +for the difficulty is that a successful specification language must +be convenient for human practitioners (whose detailed behavior, to +date, has evaded a formal description). A person reading a description +of a task in a good specification language must be able to understand +the task quickly and should have no further questions or ambiguities +to clarify. It is precisely in areas where the specification language is well understood (for example: mathematical formulas, formal grammars, relational @@ -722,13 +720,13 @@ \subsection*{Programmers avoid academic terminology } cases, the programming language syntactically reproduces the specification language and, in this sense, makes the specifications \textsf{``}executable\textsf{''}. However, blind attempts to use the same language for other problem -domains did not bring any advantages. The widely expressed disappointment -with structural programming, with OOP, or with functional programming -is probably due to the fact that people expected a \textsf{``}declarativeness\textsf{''} -in one domain to transfer to advantages in all other problem domains. -But this does not seem to be the case with any of the attempts so -far. Different problem domains are incompatible and require quite -different specification languages. +domains did not bring any advantages. The widely expressed disappointments +with structural programming, natural-language programming, OOP, or +functional programming is probably due to the fact that people expected +a \textsf{``}declarativeness\textsf{''} in one domain to transfer to advantages in +all other problem domains. But this does not seem to be the case with +any of the attempts so far. Different problem domains are incompatible +and require quite different specification languages. Without an accepted specification language, there is no hope of reaping the full benefits of declarative programming. One domain where a specification @@ -740,12 +738,12 @@ \subsection*{Programmers avoid academic terminology } the same time readily understandable to humans. When people design GUIs, they communicate their designs to each other informally and in multiple stages, gradually resolving the inevitable ambiguities. -(\textquotedbl And what if I now press this button in that window -while the old message box is still visible?\textquotedbl ) As a result, -GUI programming remains a difficult and error-prone exercise. Established -GUI environments (X Window, MS Windows, macOS, iOS, Android) predominantly -use the object-oriented paradigm, which turned out to be not a silver -bullet for complex GUI design. Accordingly, programming a GUI application +(\textsf{``}And what if I now press this button in that window while the old +message box is still visible?\textsf{''}) As a result, GUI programming remains +a difficult and error-prone exercise. Established GUI environments +(X Window, MS Windows, macOS, iOS, Android) predominantly use the +object-oriented paradigm, which turned out to be not a silver bullet +for complex GUI design. Accordingly, programming a GUI application in these environments is a messy and painful affair (I am speaking from first-hand experience). Newer developments based on functional reactive programming are more promising but yet to be proven declarative, @@ -821,7 +819,7 @@ \subsection*{Programmers avoid academic terminology } matrices by writing \lstinline!m * n!, we should not be able to write \lstinline!m * n! by mistake when \lstinline!n! is a telephone number rather than a matrix, only to discover the error much later when the -code is running in production. The host language must have a tight +code is running in production. The host language must maintain strict control over the abstractions behind the DSL. Because of the importance of DSL-friendly features, some programming diff --git a/sofp-src/tex/sofp-essay3.tex b/sofp-src/tex/sofp-essay3.tex index 63c6a3c45..072a3ae3a 100644 --- a/sofp-src/tex/sofp-essay3.tex +++ b/sofp-src/tex/sofp-essay3.tex @@ -488,7 +488,7 @@ components. The component at object \lstinline!X! is a function of type \lstinline!F[X] => G[X]!; this must be defined for all \lstinline!X!. Some programming languages support functions with type parameters. -In Scala, the syntax is +In Scala, the syntax is: \begin{lstlisting} def t[X]: F[X] => G[X] = ... \end{lstlisting} @@ -509,7 +509,7 @@ states that applying the endofunctor \lstinline!F!\textsf{'}s morphism map before a natural transformation \lstinline!t! must be equal to applying the endofunctor \lstinline!G!\textsf{'}s map after \lstinline!t!. In Scala -syntax, the law is written as +syntax, the law is written as: \begin{lstlisting} t(fmap_F(f)(x)) == fmap_G(f)(t(x)) \end{lstlisting} diff --git a/sofp-src/tex/sofp-filterable.tex b/sofp-src/tex/sofp-filterable.tex index 129b9b3b8..2e3617432 100644 --- a/sofp-src/tex/sofp-filterable.tex +++ b/sofp-src/tex/sofp-filterable.tex @@ -20,16 +20,15 @@ \section{Practical uses of filtering\label{sec:Practical-uses-of-filterable-func on sequences, sets, and other data structures. An example of using \lstinline!filter! is the following calculation: \[ -\sum_{x\in\mathbb{Z};\,0\leq x\leq100;\,\cos x>0}\sqrt{\cos\left(x\right)}\approx38.71\quad. +\sum_{x\in\mathbb{Z};\,0\leq x\leq100;\,\cos x>0}\sqrt{\cos x}\approx38.71\quad. \] \begin{lstlisting} scala> (0 to 100).map(x => math.cos(x)).filter(_ > 0).map(math.sqrt).sum res0: Double = 38.71218949848382 \end{lstlisting} -The role of \lstinline!filter! in this computation is to select only -the positive values of $\cos\left(x\right)$. It is safe to apply -the square root function to positive values, so the code will work -correctly. +The role of \lstinline!filter! in this computation is to remove all +non-positive values of $\cos x$. It is safe to apply the square root +function to positive values. The code above is a chain of methods, but the same code can be written using Scala\textsf{'}s \lstinline!for!/\lstinline!yield! syntax, which is @@ -84,7 +83,7 @@ \section{Practical uses of filtering\label{sec:Practical-uses-of-filterable-func the embedded \lstinline!if! keyword, the type constructor must support a method called \lstinline!withFilter! with the same type signature as the \lstinline!filter! method. The type signatures of \lstinline!map! -and \lstinline!withFilter! methods for a type constructor \lstinline!F[_]! +and \lstinline!withFilter! methods for a type constructor \lstinline!F! can be written as: \begin{lstlisting} class F[A] { @@ -99,7 +98,7 @@ \section{Practical uses of filtering\label{sec:Practical-uses-of-filterable-func The main focus of this chapter is to explore filterable functors in detail. Programmers would intuitively expect the filtering operation to have certain properties. We will now examine the expected properties -and translate them into mathematical laws for the function \lstinline!withFilter!. +and translate them into mathematical laws for the function \lstinline!filter!. \subsection{Examples and intuitions for the filtering operation} @@ -117,12 +116,12 @@ \subsection{Examples and intuitions for the filtering operation} \end{lstlisting} In an intuitive view, a functor wraps one or more data values, and the filtering operation may decrease the number of values wrapped. -e.g., the length of the sequence \lstinline!List(64, 128)! is decreased +E.g., the length of the sequence \lstinline!List(64, 128)! is decreased from $2$ to $1$ after filtering with the condition \lstinline!_ > 100!. The \lstinline!Option! functor can wrap at most one value, so filtering with a predicate returning \lstinline!false! will return an empty \lstinline!Option! value (i.e., \lstinline!None!). In all cases, -the resulting collection will not contain values that fail the filtering +the resulting data structure will not store values that fail the filtering predicate. Note that \lstinline!Option[T]! is written in the type notation as @@ -138,7 +137,7 @@ \subsection{Examples and intuitions for the filtering operation} So, we expect that a filterable functor should contain a disjunctive type supporting a different number of values of $T$, including \emph{zero} -values. When the filtering operation \lstinline!.filter(p)! is applied, +values. When the filtering operation \lstinline!_.filter(p)! is applied, some values of type $T$ will fail the predicate \lstinline!p! and will be removed from the collection. This example: \begin{lstlisting} @@ -166,8 +165,8 @@ \subsubsection{Example \label{subsec:Example-filtering-orders-tue-fri}\ref{subse \lstinline!withFilter!: \begin{lstlisting} final case class Orders[A](tue: Option[A], fri: Option[A]) { - def map[B](f: A => B): Orders[B] = Orders(tue.map(f), fri.map(f)) // Functor. - def withFilter(p: A => Boolean): Orders[A] = Orders(tue.filter(p), fri.filter(p)) // Filterable. + def map[B](f: A => B): Orders[B] = Orders(tue.map(f), fri.map(f)) + def withFilter(p: A => Boolean): Orders[A] = Orders(tue.filter(p), fri.filter(p)) } scala> Orders(Some(500), Some(2000)).withFilter(_ < 1000) // Approved if the amount is below $1000. @@ -178,14 +177,14 @@ \subsubsection{Example \label{subsec:Example-filtering-orders-tue-fri}\ref{subse a \textsf{``}source\textsf{''} of data in functor blocks: \begin{lstlisting} scala> for { - x <- Orders(Some(500), Some(2000)) // "Source" of type Orders[Int]. + x <- Orders(Some(500), Some(2000)) // "Source" has type Orders[Int]. y = x - 200 // Apply discount of $200 to each order. - if y < 500 // Orders are approved if the amount is below $500 after discount. + if y < 500 // Orders are approved if amount < 500 after discount. } yield y * 1.10 // Add 10% tax. Result is of type Orders[Double]. res1: Orders[Double] = Orders(Some(330.0), None) \end{lstlisting} -Suppose we are considering an additional business rule, such as: +Suppose we are considering additional business rules, such as: \textbf{(a)} Both orders must be approved, or else no orders can be placed that week. @@ -211,26 +210,23 @@ \subsection{The laws of filtering: Motivation and derivation\label{subsec:Motiva val result = for { // Some computations in the context of the `List` functor. x <- List(...) // For each x in the given list... y = f(x) // ... compute y - if p1(y) // ... impose condition p1: discard all y for which p1(y) == false + if p1(y) // ... impose condition p1; continue only if p1(y) == true if p2(y) // ... same for condition p2 z = g(x, y) // ... compute z - if q(x, y, z) // ... impose another condition -} yield // For all x in the given list, such that all the conditions hold, - k(x, y, z) // compute the values k, put them into a list, and return as the list `result`. + if q(x, y, z) // ... impose another condition q(x, y, z) +} yield // For those x for which all the conditions hold, + k(x, y, z) // compute the list of values k as the `result`. \end{lstlisting} {\footnotesize\par} There are several properties that one intuitively expects such programs -to have. One property is that computing \lstinline!y = f(x)! in line -$3$ and then checking a condition for \lstinline!y!, such as \textsf{``}\lstinline!if p1(y)!\textsf{''} -in line $4$, should be the same as checking the condition \lstinline!p1(f(x))! -and then computing \lstinline!y = f(x)!: since the code says that -\lstinline!y = f(x)!, we expect the conditions \lstinline!p1(y)! -and \lstinline!p1(f(x))! to be equivalent. - -Translating this equivalence into code, we obtain the requirement -that the following two expressions (\lstinline!result1! and \lstinline!result2!) -should be equal to each other: +to have. For example, the code says \lstinline!y = f(x)! in line +$3$. Then we expect that checking a condition for \lstinline!y!, +such as \textsf{``}\lstinline!if p1(y)!\textsf{''} in line $4$, should be the same +as checking the condition \textsf{``}\lstinline!if p1(f(x))!\textsf{''}. Translating +this equivalence into code, we obtain the requirement that the following +two expressions (\lstinline!result1! and \lstinline!result2!) should +be equal: \vspace{0.3\baselineskip} @@ -268,11 +264,11 @@ \subsection{The laws of filtering: Motivation and derivation\label{subsec:Motiva only values that satisfy the condition \lstinline!p1!, and the second filtering operation is applied to the results of the first one, additionally imposing the condition \lstinline!p2!. So, we expect that applying -these two filtering operations is equivalent to filtering by the condition -\textsf{``}\lstinline!if p1(y) && p2(y)!\textsf{''}. +these two filtering operations is equivalent to filtering by the single +condition \textsf{``}\lstinline!if p1(y) && p2(y)!\textsf{''}. -We can translate this expectation into equality of the following two -code expressions: +We translate this expectation into the requirement that the following +values \lstinline!result1! and \lstinline!result2! should be equal: \vspace{0.3\baselineskip} @@ -318,9 +314,9 @@ \subsection{The laws of filtering: Motivation and derivation\label{subsec:Motiva for certain values \lstinline!x!. Then we expect those values \lstinline!x! to be excluded from any computations performed \emph{after} the line \textsf{``}\lstinline!if p(x)!\textsf{''}. In particular, we should be able to use -a partial function safely as long as that function is defined for -\lstinline!x! such that \lstinline!p(x) == true!. To express this -in code, first define a general \textsf{``}factory\textsf{''} for partial functions: +a partial function safely as long as that function is well-defined +for \lstinline!x! such that \lstinline!p(x) == true!. To express +this in code, first define a general \textsf{``}factory\textsf{''} for partial functions: \begin{lstlisting} def if_p[A, B](p: A => Boolean)(f: A => B): A => B = x => p(x) match { case true => f(x) } \end{lstlisting} @@ -366,8 +362,8 @@ \subsection{The laws of filtering: Motivation and derivation\label{subsec:Motiva \begin{lstlisting} val result2 = for { x <- xs - if p(x) - y = fp(x) // Here fp = if_p(p)(f) + if p(x) // def fp = if_p(p)(f) + y = fp(x) } yield y // Rewritten via method chains: val result2 = xs.filter(p).map(fp) @@ -377,7 +373,7 @@ \subsection{The laws of filtering: Motivation and derivation\label{subsec:Motiva \vspace{0\baselineskip} -We found $4$ requirements for the \lstinline!filter! function, written +We found four requirements for the \lstinline!filter! function, written in terms of equal code fragments. These requirements are the four \textsf{``}laws\textsf{''} (i.e., equations) that any reasonable \lstinline!filter! must satisfy. In the code notation, \lstinline!filter! is $\text{filt}_{F}$: @@ -396,7 +392,7 @@ \subsection{The laws of filtering: Motivation and derivation\label{subsec:Motiva \end{align} The following type diagram illustrates the naturality law of \lstinline!filter!: \[ -\xymatrix{\xyScaleY{1.4pc}\xyScaleX{7.0pc}F^{A}\ar[r]\sp(0.5){\text{filt}_{F}(f^{:A\rightarrow B}\bef q^{:B\rightarrow\bbnum 2})}\ar[d]\sb(0.45){(f^{:A\rightarrow B})^{\uparrow F}} & F^{A}\ar[d]\sp(0.45){(f^{:A\rightarrow B})^{\uparrow F}}\\ +\xymatrix{\xyScaleY{1.4pc}\xyScaleX{7.0pc}F^{A}\ar[r]\sp(0.5){\text{filt}_{F}(f^{:A\rightarrow B}\bef q^{:B\rightarrow\bbnum 2})}\ar[d]\sb(0.45){(f^{:A\rightarrow B})^{\uparrow F}} & F^{A}\ar[d]\sp(0.5){(f^{:A\rightarrow B})^{\uparrow F}}\\ F^{B}\ar[r]\sp(0.5){\text{filt}_{F}(q^{:B\rightarrow\bbnum 2})} & F^{B} } \] @@ -415,15 +411,15 @@ \subsection{The laws of filtering: Motivation and derivation\label{subsec:Motiva \end{lstlisting} It is intuitively clear why functors such as \lstinline!Option! and -\lstinline!List! obey the filtering laws: those types can be viewed -as \textsf{``}containers\textsf{''} holding zero or more items of data, and the \lstinline!filter! -operation removes all data that fails the filtering condition. What -about the custom data type \lstinline!Orders! from Example~\ref{subsec:Example-filtering-orders-tue-fri}? +\lstinline!List! obey the filtering laws: those types are \textsf{``}containers\textsf{''} +holding zero or more items of data, and the \lstinline!filter! operation +removes all data that fails the filtering condition. What about the +custom data type \lstinline!Orders! from Example~\ref{subsec:Example-filtering-orders-tue-fri}? In principle, we would need to verify all four laws symbolically, using the code of \lstinline!withFilter! as we implemented it for \lstinline!Orders!. Later in this chapter we will see that the four laws can be simplified, reduced to just two laws, and proved more -quickly. For now, we can use the \texttt{scalacheck} library\index{scalacheck library@\texttt{scalacheck} library}\index{verifying laws with scalacheck@verifying laws with \texttt{scalacheck}}2 +quickly. For now, we can use the \texttt{scalacheck} library\index{scalacheck library@\texttt{scalacheck} library}\index{verifying laws with scalacheck@verifying laws with \texttt{scalacheck}} to implement randomized tests for the four filtering laws: \begin{lstlisting} def checkFilteringLaws[F[_] : Filterable : Functor, A, B](implicit @@ -454,7 +450,7 @@ \subsection{The laws of filtering: Motivation and derivation\label{subsec:Motiva \subsection{Examples of non-filterable functors\label{subsec:Examples-of-non-filterable-functors}} -As usual with typeclasses, the code of the \lstinline!Filterable! +As usual with Scala typeclasses, the code of the \lstinline!Filterable! typeclass fixes the type signature of the \lstinline!filter! function but does not enforce its laws. It is up to the programmer to verify that the implementation of \lstinline!filter! satisfies the laws. @@ -496,11 +492,12 @@ \subsection{Examples of non-filterable functors\label{subsec:Examples-of-non-fil } yield y // But the final result does not correspond to this intuition: res2: Orders[String] = Orders(Some("Amount: 500"), Some("Amount: 2000")) \end{lstlisting} -This computation violates the partial function law because the value -\lstinline!x = 2000! is not excluded from further computations despite -filtering with the predicate \textsf{``}\lstinline!x < 1000!\textsf{''}. This happened -because the code of \lstinline!filter! does not remove the value -\lstinline!x = 2000! from the data structure in that case. +This computation violates the partial function law: the value \lstinline!x = 2000! +is not excluded from further computations despite filtering with the +predicate \textsf{``}\lstinline!x < 1000!\textsf{''}. This happened because the code +of \lstinline!filter! does not remove the value \lstinline!x = 2000! +from the data structure in case there is another value that passes +the predicate. The four laws of filtering are a rigorous formulation of our intuitions about what it means to \textsf{``}filter data\textsf{''}. The type \lstinline!Orders! @@ -511,8 +508,8 @@ \subsection{Examples of non-filterable functors\label{subsec:Examples-of-non-fil that order approval according to rule \textbf{(b)} is not a filtering operation. For instance, applying two order approvals one after another will not give the intuitively expected results. Nevertheless, this -may be acceptable in applications where only one order approval is -ever applied. +may be acceptable in applications where one order approval is never +applied after another. Violations of the filtering laws by business rule \textbf{(b)} also does not mean that the functor \lstinline!Orders! is not filterable. @@ -550,17 +547,17 @@ \subsection{Examples of non-filterable functors\label{subsec:Examples-of-non-fil } \end{lstlisting} This code discards information and violates the identity law: the -result of filtering with an identically \lstinline!true! predicate -is not the identity function of type \lstinline!Option[A] => Option[A]!. +filtering with an identically \lstinline!true! predicate is \emph{not} +the identity function of type \lstinline!Option[A] => Option[A]!. Finally, one could violate the naturality law by defining \lstinline!filter! in a special way when the type parameter $A$ is set to, say, \lstinline!Int!. To obey the naturality law, the \lstinline!filter! function must -be fully parametric and must not use hard-coded values of specific +be fully parametric and may not use hard-coded values of specific types or make decisions based on specific types. -Note that the \lstinline!Boolean! type is equivalent to $\bbnum 2\cong\bbnum 1+\bbnum 1$; -in other words, this type can be expressed via the basic type constructions +Note that the \lstinline!Boolean! type is equivalent to $\bbnum 2\cong\bbnum 1+\bbnum 1$. +In other words, that type can be expressed via the basic type constructions (disjunction and the \lstinline!Unit! type) without using any externally defined values. For this reason, it is allowed to use the \lstinline!Boolean! type in fully parametric functions. @@ -569,7 +566,7 @@ \subsection{Examples: Programming with filterable functors\index{examples (with \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solved-example-1}} -A cluster has $2$ servers; each server needs to have valid credentials, +A cluster has two servers; each server needs to have valid credentials, which expire periodically. If credentials expire for one server, it may copy valid credentials from the other server. If no server has valid credentials, the cluster is down. Is this setup described by @@ -589,8 +586,8 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv case Some((a1, a2)) => ??? } \end{lstlisting} -In line~4, we need to compute a value of type $F^{A}$ using the -given values \lstinline!a1! and \lstinline!a2!. We need to check +In line~4, we need to compute a value of type \lstinline!F[A]! using +the given values \lstinline!a1! and \lstinline!a2!. We need to check whether the predicate \lstinline!p! holds for \lstinline!a1! and \lstinline!a2!. What if \lstinline!p(a1) == false! but \lstinline!p(a2) == true!? We need to remove \lstinline!a1! from the result, or else the filtering @@ -599,14 +596,15 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv requires two such values or none. So, we may return \lstinline!Some((a2, a2))! or \lstinline!None!. -Looking at the business requirements, we see that \lstinline!p(a1) == false! -means the first server\textsf{'}s credentials expired. In that case, if \lstinline!p(a2) == true!, -the first server copies the second server\textsf{'}s valid credentials, \lstinline!a2!. +We may describe the validity of credentials by a predicate \lstinline!p: A => Boolean!. +If \lstinline!p(a1) == false! then the first server\textsf{'}s credentials +expired. In that case, if \lstinline!p(a2) == true!, the first server +will copy the second server\textsf{'}s valid credentials (\lstinline!a2!). So, we must return \lstinline!Some((a2, a2))!. Other cases are handled similarly. The full code is: \begin{lstlisting} def filter[A](p: A => Boolean): F[A] => F[A] = { - case None => None // Cluster is down, no valid credentials. + case None => None // No credentials to validate. case Some((a1, a2)) => (p(a1), p(a2)) match { case (true, true) => Some((a1, a2)) // Both credentials are still valid. case (true, false) => Some((a1, a1)) // Server 2 copies credentials from server 1. @@ -642,7 +640,7 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv The code for the right-hand side of the law, $\text{filt}_{F}(f\bef p)\bef f^{\uparrow F}$, is: \begin{lstlisting} -filter(f andThen p) = { +filter(f andThen p) == { case None => None case Some((a1, a2)) => (p(f(a1)), p(f(a2))) match { case (true, true) => Some((a1, a2)) @@ -650,7 +648,7 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv case (false, true) => Some((a2, a2)) case (false, false) => None } -} andThen fmap(f) = { +} andThen fmap(f) == { case None => None case Some((a1, a2)) => (p(f(a1)), p(f(a2))) match { case (true, true) => Some((f(a1), f(a2))) @@ -663,7 +661,7 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv Since the code is exactly the same, the law holds. This computation illustrates why fully parametric functions such as -\lstinline!filter! obey the naturality law: such functions manipulate +\lstinline!filter! obey the naturality law. Such functions manipulate their arguments purely as symbols of unknown types, without referring to any specific types or values. Applying a lifted function $f^{\uparrow F}$ before \lstinline!filter! is the same as inserting \lstinline!f(...)! @@ -698,10 +696,10 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv need to consider two cases: the cluster is down, or both servers have valid credentials. -In the first case, the value of $F^{A}$ is \lstinline!None! and -remains \lstinline!None! after any filtering operation. (If the cluster -is down, a check of credentials will not bring it up.) So, the composition -law holds for that case. +In the first case, the value is \lstinline!None! (denoted by $1+\bbnum 0^{:A\times A}$) +and remains \lstinline!None! after any filtering operation. (If the +cluster is down, a check of credentials will not bring it up.) So, +the composition law holds for that case. In the second case, we have two credentials $a_{1},a_{2}$ in a value $s\triangleq\bbnum 0+a_{1}\times a_{2}$. The filtering operation @@ -722,11 +720,11 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv \hline {\small{}}\lstinline!true! & {\small{}}\lstinline!false! & {\small{}}\lstinline!true! & {\small{}}\lstinline!true! & {\small{}}\lstinline!true! & {\small{}}\lstinline!false! & {\small{}$\bbnum 0+a_{1}\times a_{1}$} & {\small{}$\bbnum 0+a_{1}\times a_{1}$} & {\small{}$\bbnum 0+a_{1}\times a_{1}$}\tabularnewline \hline -{\small{}}\lstinline!???! & {\small{}}\lstinline!???! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!???! & {\small{}$\bbnum 1+\bbnum 0^{:A\times A}$} & {\small{}$\bbnum 1+\bbnum 0^{:A\times A}$}\tabularnewline +{\small{}}\lstinline!???! & {\small{}}\lstinline!???! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!???! & {\small{}$1+\bbnum 0^{:A\times A}$} & {\small{}$1+\bbnum 0^{:A\times A}$}\tabularnewline \hline -{\small{}}\lstinline!true! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!true! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}$\bbnum 0+a_{1}\times a_{1}$} & {\small{}$\bbnum 1+\bbnum 0^{:A\times A}$} & {\small{}$\bbnum 1+\bbnum 0^{:A\times A}$}\tabularnewline +{\small{}}\lstinline!true! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!true! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}$\bbnum 0+a_{1}\times a_{1}$} & {\small{}$1+\bbnum 0^{:A\times A}$} & {\small{}$1+\bbnum 0^{:A\times A}$}\tabularnewline \hline -{\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!???! & {\small{}}\lstinline!???! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}$\bbnum 1+\bbnum 0^{:A\times A}$} & {\small{}$\bbnum 1+\bbnum 0^{:A\times A}$} & {\small{}$\bbnum 1+\bbnum 0^{:A\times A}$}\tabularnewline +{\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}}\lstinline!???! & {\small{}}\lstinline!???! & {\small{}}\lstinline!false! & {\small{}}\lstinline!false! & {\small{}$1+\bbnum 0^{:A\times A}$} & {\small{}$1+\bbnum 0^{:A\times A}$} & {\small{}$1+\bbnum 0^{:A\times A}$}\tabularnewline \hline \end{tabular} \par\end{center} @@ -745,10 +743,10 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv So, we have proved that all filtering laws hold for the \lstinline!filter! function shown above. If the program\textsf{'}s requirements change, the \lstinline!filter! function will need to be changed. For instance, suppose the first -server is now the only source of credentials. The second server may -copy the first server\textsf{'}s credentials if needed, but the cluster will -go down whenever the first server\textsf{'}s credentials expire. This corresponds -to the code: +server becomes the only source of valid credentials. The second server +may copy the first server\textsf{'}s credentials if needed, but the cluster +will go down whenever the first server\textsf{'}s credentials expire. This +corresponds to the code: \begin{lstlisting} def filter[A](p: A => Boolean): F[A] => F[A] = { @@ -760,7 +758,7 @@ \subsubsection{Example \label{subsec:filt-solved-example-1}\ref{subsec:filt-solv } } \end{lstlisting} -Alternatively, we may have a requirement that credentials \emph{cannot} +Alternatively, we may get a new requirement that credentials \emph{cannot} be copied between servers: \begin{lstlisting} def filter[A](p: A => Boolean): F[A] => F[A] = { @@ -855,6 +853,7 @@ \subsubsection{Example \label{subsec:filt-solved-example-3}\ref{subsec:filt-solv \begin{lstlisting} final case class Server[A](requests: Seq[A]) \end{lstlisting} +Suppose a predicate \lstinline!p: A => Boolean! checks the authentication. The filtering operation truncates the sequence when the predicate \lstinline!p! first returns \lstinline!false!:\index{filterable!defined via takeWhile@defined via \texttt{takeWhile}} \begin{lstlisting} @@ -865,15 +864,15 @@ \subsubsection{Example \label{subsec:filt-solved-example-3}\ref{subsec:filt-solv below). Intuitively, we expect laws to hold because the \lstinline!filter! function always removes values that fail the predicate \lstinline!p!. The filtering function also removes other values that may or may not -fail the predicate, but the filtering laws allow removing \emph{more} -values. +fail the predicate, but the filtering laws do not forbid removing +\emph{more} values than strictly necessary. \subsubsection{Example \label{subsec:filt-solved-example-4}\ref{subsec:filt-solved-example-4}} If possible, implement a \lstinline!Filterable! typeclass instance for: -\textbf{(a)} The functor $F^{T}$ defined by the Scala code: +\textbf{(a)} The functor $F$ defined by the Scala code: \begin{lstlisting} final case class F[T](x: Option[T], yy: Option[(T, T)]) \end{lstlisting} @@ -961,7 +960,7 @@ \subsubsection{Example \label{subsec:filt-solved-example-4}\ref{subsec:filt-solv $\text{Int}\times Z\times A\times A$ pass the filter, we will need to remove both of them and to return a value of type $Z$. Luckily, we have a value of type $Z$ within $\text{Int}\times Z\times A\times A$. -So, we can implement \lstinline!filter! e.g., like this: +So, we may implement \lstinline!filter! like this: \begin{lstlisting} type F[A] = Either[Z, (Int, Z, A, A)] // The type `Z` must be already defined. def filter[A](p: A => Boolean): F[A] => F[A] = { @@ -1027,13 +1026,13 @@ \subsubsection{Exercise \label{subsec:filt-exercise-3}\ref{subsec:filt-exercise- typeclass instance (law checking is optional) for: \textbf{(a)} The functor $Q^{A,Z}$ with respect to the type parameter -$A$, where $Q^{\bullet,\bullet}$ is defined by this Scala code:\texttt{\textcolor{blue}{\footnotesize{}}} +$A$, where $Q$ is defined by this Scala code:\texttt{\textcolor{blue}{\footnotesize{}}} \begin{lstlisting} final case class Q[A, Z](id: Long, user1: Option[(A, Z)], user2: Option[(A, Z)]) \end{lstlisting} {\footnotesize\par} -\textbf{(b)} The functor $R^{A}$ defined by the Scala code: +\textbf{(b)} The functor $R$ defined by the Scala code: \begin{lstlisting} final case class R[A](x: Int, y: Int, z: A, data: List[A]) \end{lstlisting} @@ -1044,8 +1043,7 @@ \subsubsection{Exercise \label{subsec:filt-exercise-3}\ref{subsec:filt-exercise- \textbf{(d)} The functor \lstinline!MyTree[A] = Option[Tree2[A]]!, where \lstinline!Tree2! was defined in Section~\ref{subsec:Binary-trees}. -\textbf{(e)} The functor $\text{Tree22}^{A}$ defined recursively -as: +\textbf{(e)} The functor $\text{Tree22}$ defined recursively as: \[ \text{Tree22}^{A}\triangleq\bbnum 1+A\times A\times\text{Tree22}^{A}\times\text{Tree22}^{A}\quad. \] @@ -1053,9 +1051,9 @@ \subsubsection{Exercise \label{subsec:filt-exercise-3}\ref{subsec:filt-exercise- \subsubsection{Exercise \label{subsec:filt-exercise-4-2}\ref{subsec:filt-exercise-4-2}} -Is the simplest perfect-shaped tree $R^{A}$ defined by $R^{A}\triangleq A+R^{A\times A}$ +Is the perfect-shaped tree $R$ defined by $R^{A}\triangleq A+R^{A\times A}$ filterable? Implement a \lstinline!filter! function for a perfect-shaped\index{perfect-shaped tree} -tree $R^{A}$ defined by: +tree $R$ defined by: \textbf{(a)} $R^{A}\triangleq\bbnum 1+A+R^{A\times A}\quad.\quad$ \textbf{(b)} $R^{A}\triangleq A+R^{(\bbnum 1+A)\times(\bbnum 1+A)}\quad.$ @@ -1111,11 +1109,11 @@ \subsection{Simplifying the filtering laws: Motivation for \texttt{deflate\label & \xymatrix{\xyScaleX{5pc}\xyScaleY{0.8pc}F^{A}\ar[r]\sp(0.45){\text{inflate}} & F^{\bbnum 1+A}\ar[r]\sp(0.55){\big(\text{filt}_{\text{Opt}}(p)\big)^{\uparrow F}} & F^{\bbnum 1+A}\ar[r]\sp(0.55){\text{deflate}} & F^{A}} \end{align*} Here $\text{filt}_{\text{Opt}}$ is the standard \lstinline!filter! -method defined for the \lstinline!Option[_]! types. +method of \lstinline!Option!. We notice that both functions in the composition $\text{inflate}\bef(\text{filt}_{\text{Opt}}(p))^{\uparrow F}$ -are some lifted functions in the functor $F$, and so we can simplify -that composition to a single lifted function: +are lifted to the functor $F$. So, we can simplify that composition +to a single lifted function: \begin{align*} & \gunderline{\text{inflate}}\bef(\text{filt}_{\text{Opt}}(p))^{\uparrow F}\\ {\color{greenunder}\text{definition of }\text{inflate}:}\quad & =(x^{:A}\rightarrow\bbnum 0^{:\bbnum 1}+x)^{\uparrow F}\bef(\text{filt}_{\text{Opt}}(p))^{\uparrow F}\\ @@ -1156,7 +1154,7 @@ \subsection{Simplifying the filtering laws: Motivation for \texttt{deflate\label _.filter(_.nonEmpty).map(_.get) \end{lstlisting} \begin{align} - & \text{deflate}:\xymatrix{\xyScaleX{4.5pc}F^{\bbnum 1+A}\ar[r]\sp(0.5){\text{filt}_{F}(\text{nonEmpty)}} & F^{\bbnum 1+A}\ar[r]\sp(0.5){\text{get}^{\uparrow F}} & F^{A}} + & \text{deflate}:\xymatrix{\xyScaleX{7.0pc}F^{\bbnum 1+A}\ar[r]\sp(0.5){\text{filt}_{F}(\text{nonEmpty)}} & F^{\bbnum 1+A}\ar[r]\sp(0.5){\text{get}^{\uparrow F}} & F^{A}} \nonumber \\ & \text{deflate}^{:F^{\bbnum 1+A}\rightarrow F^{A}}=\text{filt}_{F}(\text{nonEmpty})\bef\text{get}^{\uparrow F}\quad.\label{eq:def-deflate-via-filter} \end{align} @@ -1248,11 +1246,12 @@ \subsubsection{Example \label{subsec:Example-ff-deflate-2}\ref{subsec:Example-ff \text{deflate}_{F}:\bbnum 1+A+\left(\bbnum 1+A\right)\times\left(\bbnum 1+A\right)\times\text{String}\rightarrow A+A\times A\times\text{String}\quad. \] An immediate problem is that we need to map all disjunctive cases, -including $\bbnum 1+0+0$, into a value of type $F^{A}$, which contains -values of type $A$ in every disjunctive case. So, implementing \lstinline!deflate! -requires us to produce a value of type $A$ from scratch, $\forall A.\,\bbnum 1\rightarrow A$, -which is impossible in a fully parametric function. Since \lstinline!deflate! -is not implementable, the functor $F$ is not filterable. +including $1+\bbnum 0+\bbnum 0$, into a value of type $F^{A}$, which +contains values of type $A$ in every disjunctive case. So, implementing +\lstinline!deflate! requires us to produce a value of type $A$ from +a unit value, which is impossible in a fully parametric function. +Since \lstinline!deflate! is not implementable, the functor $F$ +is not filterable. $\square$ These examples show that \lstinline!deflate! is easier to implement and to reason about than \lstinline!filter!. @@ -1280,21 +1279,21 @@ \subsubsection{Statement \label{subsec:Statement-filter-to-deflate-equivalence}\ \subparagraph{Proof} -We need to show that $\text{filt}_{F}(p)$ is the same as $\text{filter}^{\prime}(p)$ +We need to show that $\text{filt}_{F}(p)$ is the same as \lstinline!filter!$^{\prime}(p)$ for any predicate $p^{:A\rightarrow\bbnum 2}$: \begin{align} {\color{greenunder}\text{expect to equal }\text{filt}_{F}(p):}\quad & \text{filter}^{\prime}(p)=\gunderline{\psi_{p}^{\uparrow F}\bef\text{filt}_{F}}(\text{nonEmpty})\bef\text{get}^{\uparrow F}\nonumber \\ {\color{greenunder}\text{naturality law of }\text{filt}_{F}:}\quad & =\text{filt}_{F}(\psi_{p}\bef\text{nonEmpty})\bef\gunderline{\psi_{p}^{\uparrow F}\bef\text{get}^{\uparrow F}}\nonumber \\ {\color{greenunder}\text{composition law of }F:}\quad & =\text{filt}_{F}(\psi_{p}\bef\text{nonEmpty})\bef\big(\psi_{p}\bef\text{get}\big)^{\uparrow F}\quad.\label{eq:filter-prime-derivation-1} \end{align} -To proceed with the calculation, we need to simplify the two expressions +To proceed with the calculation, we need to simplify the expressions $\psi_{p}\bef\text{get}$ and $\psi_{p}\bef\text{nonEmpty}$. Begin -with writing the code for the standard methods \lstinline!nonEmpty! +by writing the code for the standard methods \lstinline!nonEmpty! and \lstinline!get!, using the equivalent type \lstinline!Option[Unit]! (i.e., $\bbnum 1+\bbnum 1$) instead of \lstinline!Boolean! (i.e., $\bbnum 2$): \begin{lstlisting}[mathescape=true] -//Use Option[Unit] instead of Boolean, as Option[Unit] $\color{dkgreen}\cong$ Boolean +// Use Option[Unit] instead of Boolean, as Option[Unit] $\color{dkgreen}\cong$ Boolean def nonEmpty[A]: Option[A] => Option[Unit] = _.map(_ => ()) def get[A]: Option[A] => A = { case Some(a) => a } @@ -1318,7 +1317,7 @@ \subsubsection{Statement \label{subsec:Statement-filter-to-deflate-equivalence}\ These methods are fully parametric since their code uses only the eight standard code constructions (see Section~\ref{subsec:Short-notation-for-eight-code-constructions}). The function $\psi$ is also fully parametric because we can implement -using the type \lstinline!Option[Unit]!: +it using the type \lstinline!Option[Unit]!: \begin{lstlisting}[mathescape=true] //Use Option[Unit] instead of Boolean, as Option[Unit] $\color{dkgreen}\cong$ Boolean def psi[A](p: A => Option[Unit]): A => Option[A] = x => p(x).map(_ => x) @@ -1406,16 +1405,16 @@ \subsubsection{Statement \label{subsec:Statement-deflate-to-filter-equivalence}\ \begin{equation} \text{deflate}^{\prime}=\text{filt}_{F}(\text{nonEmpty})\bef\text{get}^{\uparrow F}=\psi_{\text{nonEmpty}}^{\uparrow F}\bef\text{deflate}\bef\text{get}^{\uparrow F}\overset{?}{=}\text{deflate}\quad.\label{eq:deflate-prime-derivation-2} \end{equation} -The derivation is stuck here: we cannot simplify the last expression -unless we can somehow switch the order of function compositions so -that $\psi_{\text{nonEmpty}}^{\uparrow F}$ and $\text{get}^{\uparrow F}$ -are placed together and the functor composition law of $F$ can be -applied. To achieve that, we need a law that switches the order of -lifted function compositions around \lstinline!deflate!. The naturality -law~(\ref{eq:naturality-law-of-filter}) of \lstinline!filter! has -that form, so we can try deriving a similar naturality law for \lstinline!deflate!. -To switch the order of composition of \lstinline!deflate! with a -lifted function, the law must have the form: +The derivation is stuck here: we cannot prove the last equality unless +we somehow switch the order of function compositions, so that $\psi_{\text{nonEmpty}}^{\uparrow F}$ +and $\text{get}^{\uparrow F}$ are placed together and the functor +composition law of $F$ can be applied. To achieve that, we need a +law that switches the order of lifted function compositions around +\lstinline!deflate!. The naturality law~(\ref{eq:naturality-law-of-filter}) +of \lstinline!filter! has that form, so we can try deriving a similar +naturality law for \lstinline!deflate!. To switch the order of composition +of \lstinline!deflate! with a lifted function, the law must have +the form: \[ \text{deflate}\bef f^{\uparrow F}=(\text{???})^{\uparrow F}\bef\text{deflate}\quad, \] @@ -1558,8 +1557,8 @@ \subsubsection{Statement \label{subsec:Statement-deflate-to-filter-equivalence}\ \subsubsection{Statement \label{subsec:Statement-partial-functionlaw-deflate-to-filter}\ref{subsec:Statement-partial-functionlaw-deflate-to-filter}} -The partial function law always holds for the \lstinline!filter! -function defined via \lstinline!deflate!. +The partial function law always holds for a \lstinline!filter! function +defined via \lstinline!deflate!. \subparagraph{Proof} @@ -1580,7 +1579,8 @@ \subsubsection{Statement \label{subsec:Statement-partial-functionlaw-deflate-to- {\color{greenunder}\text{right-hand side}:}\quad & \psi_{p}^{\uparrow F}\bef\gunderline{\text{deflate}\bef f_{|p}^{\uparrow F}}=\gunderline{\psi_{p}^{\uparrow F}\bef f_{|p}^{\uparrow\text{Opt}\uparrow F}}\bef\text{deflate}=\big(\psi_{p}\bef f_{|p}^{\uparrow\text{Opt}}\big)^{\uparrow F}\bef\text{deflate}\quad. \end{align*} It remains to show that $\psi_{p}\bef f_{|p}^{\uparrow\text{Opt}}=\psi_{p}\bef f^{\uparrow\text{Opt}}$. -Apply the function $\psi_{p}\bef f^{\uparrow\text{Opt}}$ to an $x^{:A}$: +Apply the function $\psi_{p}\bef f^{\uparrow\text{Opt}}$ to an arbitrary +value $x^{:A}$: \begin{lstlisting} psi(p)(x).map(f) == (p(x) match { @@ -1643,10 +1643,10 @@ \subsubsection{Statement \label{subsec:Statement-naturality-law-of-deflate-from- \end{align*} The two sides will be equal if we prove that $f^{\uparrow\text{Opt}}\bef\text{nonEmpty}=\text{nonEmpty}$ and that $f^{\uparrow\text{Opt}}\bef\text{get}=\text{get}\bef f$, -which can be viewed as the two naturality laws specific to these functions. -Use the definitions~(\ref{eq:def-of-get-option}) and~(\ref{eq:def-of-nonempty-option}) -of \lstinline!get! and \lstinline!nonEmpty!, set the type parameters -as needed to match the types, and compute: +which can be viewed as the naturality laws specific to the functions +\lstinline!nonEmpty! and \lstinline!get!. Use the definitions~(\ref{eq:def-of-nonempty-option}) +and~(\ref{eq:def-of-get-option}) of \lstinline!nonEmpty! and \lstinline!get!, +set the type parameters as needed to match the types, and compute: \begin{align*} & f^{\uparrow\text{Opt}}\bef\text{nonEmpty}=\,\begin{array}{|c||cc|} & \bbnum 1 & B\\ @@ -1699,14 +1699,14 @@ \subsubsection{Statement \label{subsec:Statement-naturality-law-of-deflate-from- \subsubsection{Statement \label{subsec:Statement-naturality-for-deflate-entails-naturality-for-filter}\ref{subsec:Statement-naturality-for-deflate-entails-naturality-for-filter}} If the naturality law~(\ref{eq:naturality-law-of-deflate}) holds -for \lstinline!deflate! and the function \lstinline!filter! is defined -via \lstinline!deflate! by Eq.~(\ref{eq:def-filter-through-deflate}) +for \lstinline!deflate! and if \lstinline!filter! is defined via +\lstinline!deflate! by Eq.~(\ref{eq:def-filter-through-deflate}) then the naturality law~(\ref{eq:naturality-law-of-filter}) holds for \lstinline!filter!. \subparagraph{Proof} -Begin by writing the two sides of the naturality law~(\ref{eq:naturality-law-of-filter}): +Begin by writing the two sides of the law~(\ref{eq:naturality-law-of-filter}): \begin{align*} & \quad{\color{greenunder}\text{left-hand side of Eq.~(\ref{eq:naturality-law-of-filter})}:}\quad\\ & f^{\uparrow F}\bef\gunderline{\text{filt}\,(p)}=\gunderline{f^{\uparrow F}\bef\psi_{p}^{\uparrow F}}\bef\text{deflate}=(f\bef\psi_{p})^{\uparrow F}\bef\text{deflate}\quad.\\ @@ -1729,7 +1729,7 @@ \subsubsection{Statement \label{subsec:Statement-naturality-for-deflate-entails- x^{:A}\triangleright\psi_{p}\triangleq x^{:A}\triangleright p^{:A\rightarrow\text{Opt}^{\bbnum 1}}\bef(1\rightarrow x)^{\uparrow\text{Opt}}\quad.\label{eq:def-of-psi} \end{equation} Using this definition of $\psi_{p}$, we can derive Eq.~(\ref{eq:naturality-law-of-psi}) -by applying both sides to an $x^{:A}$: +by applying both sides to an arbitrary value $x^{:A}$: \begin{align*} {\color{greenunder}\text{left-hand side}:}\quad & x\triangleright f\bef\psi_{p}=x\triangleright f\triangleright\psi_{p}\\ {\color{greenunder}\text{use Eq.~(\ref{eq:def-of-psi})}:}\quad & \quad=x\triangleright f\triangleright p\bef(1\rightarrow x\triangleright f)^{\uparrow\text{Opt}}\\ @@ -1795,8 +1795,8 @@ \subsubsection{Statement \label{subsec:Statement-liftOpt-equivalent-to-deflate}\ The types of functions \lstinline!liftOpt! and \lstinline!deflate! are equivalent via Eqs.~(\ref{eq:def-liftOpt-via-deflate})\textendash (\ref{eq:def-deflate-via-liftOpt}), -assuming that a naturality law (Eq.~(\ref{eq:left-naturality-law-of-liftOpt}) -below) holds for \lstinline!liftOpt!. +assuming that \lstinline!liftOpt! obeys a naturality law (Eq.~(\ref{eq:left-naturality-law-of-liftOpt}) +below). \subparagraph{Proof} @@ -1809,11 +1809,11 @@ \subsubsection{Statement \label{subsec:Statement-liftOpt-equivalent-to-deflate}\ Then the new \lstinline!deflate!$^{\prime}$ will be the same function as the initial \lstinline!deflate!. -\textbf{(b)} Given a \lstinline!liftOpt! function, compute a \lstinline!deflate! -function via Eq.~(\ref{eq:def-deflate-via-liftOpt}) and then a new -\lstinline!liftOpt!$^{\prime}$ function via Eq.~(\ref{eq:def-deflate-via-liftOpt}). +\textbf{(b)} Given a \lstinline!liftOpt! function that obeys Eq.~(\ref{eq:left-naturality-law-of-liftOpt}), +compute a \lstinline!deflate! function via Eq.~(\ref{eq:def-deflate-via-liftOpt}) +and then a new \lstinline!liftOpt!$^{\prime}$ function via Eq.~(\ref{eq:def-deflate-via-liftOpt}). The new \lstinline!liftOpt!$^{\prime}$ will be the same as the initial -\lstinline!liftOpt!, assuming Eq.~(\ref{eq:left-naturality-law-of-liftOpt}). +\lstinline!liftOpt!. Proof for \textbf{(a)} directly derives the formula \lstinline!deflate!$^{\prime}\negmedspace=\,$\lstinline!deflate! by this calculation: @@ -1856,6 +1856,7 @@ \subsubsection{Statement \label{subsec:Statement-liftOpt-equivalent-to-deflate}\ {\color{greenunder}\text{expect to equal }\text{liftOpt}\,(f):}\quad & \text{liftOpt}^{\prime}(f)=\gunderline{f^{\uparrow F}}\bef\text{liftOpt}\,(\gunderline{\text{id}})\\ & =\text{liftOpt}\,(\gunderline{f\bef\text{id}})=\text{liftOpt}\,(f)\quad. \end{align*} +$\square$ Since \lstinline!deflate! is equivalent to \lstinline!filter! (Statement~\ref{subsec:Statement-deflate-to-filter-equivalence}), it follows that \lstinline!filter! is equivalent to \lstinline!liftOpt!. @@ -1892,8 +1893,8 @@ \subsubsection{Statement \label{subsec:Statement-liftOpt-equivalent-to-deflate}\ \text{filt}\,(\_\rightarrow\text{true})=\text{liftOpt}\,(\psi_{(\_\rightarrow\text{true})})=\text{liftOpt}\,(x^{:A}\rightarrow\bbnum 0+x)\quad. \] The function $\psi_{(\_\rightarrow\text{true})}$ is equivalent to -a simpler function $x^{:A}\rightarrow\bbnum 0+x$ (i.e., \lstinline!x => Some(x)! -in Scala): +a simpler function $x^{:A}\rightarrow\bbnum 0+x$ (in Scala, this +is \lstinline!x => Some(x)!): \begin{align} {\color{greenunder}\text{use Eq.~(\ref{eq:def-of-psi})}:}\quad & x^{:A}\triangleright\psi_{(\_\rightarrow\text{true})}\nonumber \\ & =x^{:A}\triangleright(\_^{:A}\rightarrow\gunderline{\text{true}^{:\text{Opt}^{\bbnum 1}}})\bef(1\rightarrow x)^{\uparrow\text{Opt}}\\ @@ -1928,7 +1929,7 @@ \subsubsection{Statement \label{subsec:Statement-identity-law-of-liftOpt}\ref{su \subparagraph{Proof} -\textbf{(a)} Compute the identity law of \lstinline!liftOpt!: +\textbf{(a)} Verify the identity law of \lstinline!liftOpt!: \begin{align*} {\color{greenunder}\text{expect to equal }\text{id}^{A}:}\quad & \text{liftOpt}\,(x^{:A}\rightarrow\bbnum 0+x)\\ & =\gunderline{(x^{:A}\rightarrow\bbnum 0+x)^{\uparrow F}\bef\text{filt}^{\bbnum 1+A}}(\text{nonEmpty})\bef\text{get}^{\uparrow F}\\ @@ -1938,18 +1939,18 @@ \subsubsection{Statement \label{subsec:Statement-identity-law-of-liftOpt}\ref{su {\color{greenunder}\text{compute composition}:}\quad & =(\text{id}^{A})^{\uparrow F}=\text{id}^{A}\quad. \end{align*} -\textbf{(b)} Compute the identity law of \lstinline!filter! using +\textbf{(b)} Verify the identity law of \lstinline!filter! using Eq.~(\ref{eq:psi-of-true-equals-Some-derivation}): \begin{align*} {\color{greenunder}\text{use Eq.~(\ref{eq:filter-via-liftOpt})}:}\quad & \text{filt}\,(\_\rightarrow\text{true})=\text{liftOpt}\,(\psi_{(\_\rightarrow\text{true})})\\ {\color{greenunder}\text{use Eq.~(\ref{eq:psi-of-true-equals-Some-derivation})}:}\quad & =\text{liftOpt}\,(x\rightarrow\bbnum 0+x)\\ {\color{greenunder}\text{use Eq.~(\ref{eq:identity-law-of-liftOpt})}:}\quad & =\text{id}\quad. \end{align*} -This completes the proof. +$\square$ The function $x^{:A}\rightarrow\bbnum 0+x$ plays the role of the \lstinline!pure! method for the \lstinline!Option! type, if we view -\lstinline!Option[_]! as a pointed functor (see Section~\ref{subsec:Pointed-functors-motivation-equivalence}). +\lstinline!Option! as a pointed functor (see Section~\ref{subsec:Pointed-functors-motivation-equivalence}). Denote that \lstinline!pure! method for brevity by $\text{pu}_{\text{Opt}}$: \[ \text{pu}_{\text{Opt}}^{:A\rightarrow\bbnum 1+A}\triangleq x^{:A}\rightarrow\bbnum 0+x\quad. @@ -2005,14 +2006,15 @@ \subsubsection{Statement \label{subsec:Statement-identity-law-of-liftOpt}\ref{su correct: a value \lstinline!x: A! will be present within the \lstinline!Option[Option[A]]! wrapper only if \emph{both} \lstinline!p1(x)! and \lstinline!p2(x)! return \lstinline!true!. To convert the result to the required type -\lstinline!Option[A]!, we apply \lstinline!Option!\textsf{'}s method \lstinline!flatten!: +\lstinline!Option[A]!, we apply \lstinline!flatten!: \begin{lstlisting} -psi(p) == x => Some(x).filter(p1).map { y => Some(y).filter(p2) }.flatten // Use flatMap instead. +psi(p) == x => Some(x).filter(p1).map { y => Some(y).filter(p2) }.flatten + // Use flatMap instead. == x => Some(x).filter(p1).flatMap { y => Some(y).filter(p2) } - == psi(p1) andThen (_.flatMap(psi(p2))) // Using standard flatten and flatMap for Option. + == psi(p1) andThen (_.flatMap(psi(p2))) // Use the standard methods (flatten and flatMap) for Option. \end{lstlisting} Denote this combination of the functions $\psi_{p_{1}}$ and $\psi_{p_{2}}$ -by the symbol $\diamond_{_{\text{Opt}}}$, so that we may write: +by the symbol $\diamond_{_{\text{Opt}}}$: \[ \psi_{p}=\psi_{p_{1}}\diamond_{_{\text{Opt}}}\psi_{p_{2}}\triangleq x^{:A}\rightarrow x\triangleright\psi_{p_{1}}\triangleright\text{flm}_{\text{Opt}}(\psi_{p_{2}})=\psi_{p_{1}}\bef(y\rightarrow y\triangleright\text{flm}_{\text{Opt}}(\psi_{p_{2}}))\quad. \] @@ -2042,8 +2044,9 @@ \subsubsection{Statement \label{subsec:Statement-identity-law-of-liftOpt}\ref{su The Kleisli composition $f\diamond_{_{\text{Opt}}}g$ yields a function of type $A\rightarrow\bbnum 1+C$ and is similar to the ordinary composition $f\bef g$ except for using \textsf{``}twisted\textsf{''} types, e.g., $A\rightarrow\text{Opt}^{B}$ -instead of $A\rightarrow B$. (The \textsf{``}twisted\textsf{''} functions cannot -be composed as $f\bef g$ because their types do not match.) +instead of $A\rightarrow B$. The \textsf{``}twisted\textsf{''} functions cannot be +composed via the ordinary composition operation ($f\bef g$) because +the types would not match. We can now derive the composition law of \lstinline!liftOpt! starting from Eq.~(\ref{eq:composition-law-of-filter}): @@ -2114,17 +2117,17 @@ \subsubsection{Statement \label{subsec:Statement-2-laws-of-liftOpt-entail-other- \begin{equation} f^{:A\rightarrow\bbnum 1+B}\diamond_{_{\text{Opt}}}g^{:B\rightarrow\bbnum 1+C}\triangleq f\bef\text{flm}_{\text{Opt}}(g)\quad.\label{eq:def-of-Kleisli-product} \end{equation} -Then we compute $f\diamond_{_{\text{Opt}}}g$ using this definition -of $\diamond_{_{\text{Opt}}}$ as: +We use this definition to compute $f\diamond_{_{\text{Opt}}}g$: \begin{align*} {\color{greenunder}\text{definition of }f:}\quad & \gunderline f\diamond_{_{\text{Opt}}}g=(h\bef\text{pu}_{\text{Opt}})\,\gunderline{\diamond_{_{\text{Opt}}}}\,g\\ - & =h\bef\gunderline{\text{pu}_{\text{Opt}}\bef\text{flm}_{\text{Opt}}}(g)\\ +{\color{greenunder}\text{use Eq.~(\ref{eq:def-of-Kleisli-product})}:}\quad & =h\bef\gunderline{\text{pu}_{\text{Opt}}\bef\text{flm}_{\text{Opt}}}(g)\\ {\color{greenunder}\text{compute composition (see below)}:}\quad & =h\bef g\quad. \end{align*} The result of composing \lstinline!pure! and \lstinline!flatMap! -for \lstinline!Option! is not obvious, but it turns out that $\text{pu}_{\text{Opt}}$ -followed by $\text{flm}_{\text{Opt}}(g)$ is equal to just $g$. To -verify that, let us first use the syntax of Scala: +for \lstinline!Option! is perhaps not obvious, but it turns out that +$\text{pu}_{\text{Opt}}$ followed by $\text{flm}_{\text{Opt}}(g)$ +is equal to just $g$. To verify that, let us first use the syntax +of Scala: \begin{lstlisting} pure(x) == Some(x) // By definition of `pure` for `Option`. p.flatMap(g) == p match { // By definition of `flatMap` for `Option`. @@ -2133,8 +2136,8 @@ \subsubsection{Statement \label{subsec:Statement-2-laws-of-liftOpt-entail-other- } pure(x).flatMap(g) == Some(x).flatMap(g) == g(x) \end{lstlisting} -The same symbolic computation is written in the code notation like -this: + +Now we write the same symbolic computation in the code notation: \begin{align} \text{pu}_{\text{Opt}}=\,\begin{array}{|c||cc|} & \bbnum 1 & A\\ @@ -2254,7 +2257,7 @@ \subsubsection{Exercise \label{subsec:Exercise-derive-composition-law-for-liftOp \subsection{Constructions of filterable functors\label{subsec:Constructions-of-filterable-functors}} -How can we recognize a filterable functor $F^{A}$ by its type expression, +How can we recognize a filterable functor $F$ by its type expression, without having to prove laws? One intuition is that the type $F^{A}$ must be able to accommodate replacing values of $A$ by unit values; this replacement is performed by the function \lstinline!deflate!. @@ -2263,12 +2266,11 @@ \subsection{Constructions of filterable functors\label{subsec:Constructions-of-f new filterable functors out of existing ones while preserving the laws. -To begin, we note that \lstinline!Option[_]!, \lstinline!Either[L, _]!, -\lstinline!Try[_]!, \lstinline!Seq[_]!, and \lstinline!Map[K, _]! -are filterable. Let us now go through all constructions available -for exponential-polynomial types. To check whether a functor is filterable, -it is convenient to use the \lstinline!liftOpt! function and its -two laws~(\ref{eq:combined-naturality-identity-law-of-liftOpt}), +To begin, we note that \lstinline!Option!, \lstinline!Either!, \lstinline!Try!, +\lstinline!Seq!, and \lstinline!Map! are filterable. Let us now +go through all constructions available for exponential-polynomial +types. To check whether a functor is filterable, it is convenient +to use the \lstinline!liftOpt! function and its two laws~(\ref{eq:combined-naturality-identity-law-of-liftOpt}), (\ref{eq:composition-law-of-liftOpt}). \paragraph{Type parameters} @@ -2276,7 +2278,8 @@ \subsection{Constructions of filterable functors\label{subsec:Constructions-of-f There are three constructions that work solely by manipulating type parameters: the identity functor $\text{Id}^{A}\triangleq A$, the constant functor $\text{Const}^{Z,A}\triangleq Z$ (where $Z$ is -a fixed type), and the functor composition, $F^{A}\triangleq G^{H^{A}}$. +a fixed type), and the functor composition, $F^{A}\triangleq G^{H^{A}}$ +(or $F\triangleq G\circ H$). The identity functor is \emph{not} filterable because \lstinline!deflate! of type $\bbnum 1+A\rightarrow A$ cannot be implemented. @@ -2288,20 +2291,20 @@ \subsection{Constructions of filterable functors\label{subsec:Constructions-of-f laws usually hold for an identity function. To verify the laws, note that the lifting to the $\text{Const}$ functor is also an identity function: $f^{\uparrow\text{Const}}=\text{id}^{:Z\rightarrow Z}$ -for any $f^{:A\rightarrow B}$. +for any $f^{:A\rightarrow B}$. We write: \begin{align*} {\color{greenunder}\text{verify law~(\ref{eq:combined-naturality-identity-law-of-liftOpt})}:}\quad & \text{liftOpt}_{\text{Const}}(f\bef\text{pu}_{\text{Opt}})=\text{id}=f^{\uparrow\text{Const}}\quad,\\ {\color{greenunder}\text{verify law~(\ref{eq:composition-law-of-liftOpt})}:}\quad & \text{liftOpt}_{\text{Const}}(f)\bef\text{liftOpt}_{\text{Const}}(g)=\text{id}\bef\text{id}=\text{id}\\ & =\text{liftOpt}_{\text{Const}}(f\diamond_{_{\text{Opt}}}g)\quad. \end{align*} -The functor composition $F^{A}\triangleq G^{H^{A}}$ requires only +The functor composition $F^{A}\triangleq G^{H^{A}}$ requires \emph{only} $H$ to be a filterable functor: \subsubsection{Statement \label{subsec:Statement-filterable-composition-functors}\ref{subsec:Statement-filterable-composition-functors}} -The functor $F^{A}\triangleq G^{H^{A}}$ is filterable when $H^{A}$ -is filterable and $G^{A}$ is \emph{any} functor. +The functor $F^{A}\triangleq G^{H^{A}}$ is filterable when $H$ is +filterable and $G$ is \emph{any} functor. \subparagraph{Proof} @@ -2333,14 +2336,13 @@ \subsubsection{Statement \label{subsec:Statement-filterable-composition-functors \paragraph{Products} To show that the product of two filterable functors is filterable, -we will use a definition of $\text{liftOpt}_{G^{\bullet}\times H^{\bullet}}$ -and a proof quite similar to what we did for the product of functors -(Statement~\ref{subsec:functor-Statement-functor-product}).\index{functor product} +we will use a definition of $\text{liftOpt}_{G\times H}$ and a proof +quite similar to what we did for the product of functors (Statement~\ref{subsec:functor-Statement-functor-product}).\index{functor product} \subsubsection{Statement \label{subsec:Statement-filterable-functor-product}\ref{subsec:Statement-filterable-functor-product}} The functor $F^{A}\triangleq G^{A}\times H^{A}$ is filterable if -$G^{\bullet}$ and $H^{\bullet}$ are filterable functors. +$G$ and $H$ are filterable functors. \subparagraph{Proof} @@ -2370,11 +2372,11 @@ \subsubsection{Statement \label{subsec:Statement-filterable-functor-product}\ref & \quad{\color{greenunder}\text{definition of }\text{liftOpt}_{F}:}\quad\\ & =\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}g)\quad. \end{align*} -In this calculation, we used the composition property: +In this calculation, we used the distributive property: \begin{equation} (f\boxtimes g)\bef(p\boxtimes q)=(f\bef p)\boxtimes(g\bef q)\quad,\label{eq:function-product-distributive-property-over-composition} \end{equation} -which follows from the definition of the pair product operation $\boxtimes$, +which follows from the definition of the pair product operation $\boxtimes$: \begin{align*} & (f\boxtimes g)\bef(p\boxtimes q)\\ & =\big(a\times b\rightarrow f(a)\times g(b)\big)\bef\big(c\times d\rightarrow p(c)\times q(d)\big)\\ @@ -2388,12 +2390,12 @@ \subsubsection{Statement \label{subsec:Statement-filterable-functor-product}\ref There are two constructions that produce new filterable functors involving disjunctive types (co-product types). The first construction is the -filterable co-product $F^{A}\triangleq G^{A}+H^{A}$, where $G^{\bullet}$ -and $H^{\bullet}$ are filterable functors. This is similar to the -functor co-product (Statement~\ref{subsec:functor-Statement-functor-coproduct}). -The second construction is $F^{A}\triangleq\bbnum 1+A\times G^{A}$ -where $G^{\bullet}$ is a filterable functor. This cannot be reduced -to the first construction because $A\times G^{A}$ is not filterable. +filterable co-product $F^{A}\triangleq G^{A}+H^{A}$, where $G$ and +$H$ are filterable functors. This is similar to the functor co-product +(Statement~\ref{subsec:functor-Statement-functor-coproduct}). The +second construction is $F^{A}\triangleq\bbnum 1+A\times G^{A}$, where +$G$ is a filterable functor. This cannot be reduced to the first +construction because $A\times G^{A}$ is never filterable. \subsubsection{Statement \label{subsec:Statement-filterable-coproduct}\ref{subsec:Statement-filterable-coproduct}} @@ -2417,7 +2419,7 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct}\ref{subse H^{A} & \bbnum 0 & \text{liftOpt}_{H}(f) \end{array}\quad. \] -Lifting to the functor $F^{A}$ is defined as in Statement~\ref{subsec:functor-Statement-functor-coproduct}: +Lifting to the functor $F$ is defined as in Statement~\ref{subsec:functor-Statement-functor-coproduct}: \[ f^{\uparrow F}\triangleq\begin{array}{|c||cc|} & G^{A} & H^{A}\\ @@ -2468,8 +2470,8 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct}\ref{subse \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{subsec:Statement-filterable-coproduct-1}} -The functor $F^{A}\triangleq\bbnum 1+A\times G^{A}$ is filterable -if $G$ is a filterable functor. +If $G$ is a filterable functor then $F^{A}\triangleq\bbnum 1+A\times G^{A}$ +is filterable . \subparagraph{Proof} @@ -2579,7 +2581,7 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{sub Additional work is necessary to check \lstinline!liftOpt!\textsf{'}s composition law: \begin{equation} -\text{liftOpt}_{F}(f)\bef\text{liftOpt}_{F}(f^{\prime})=\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}f^{\prime})\quad.\label{eq:liftOpt-composition-law-derivation1} +\text{liftOpt}_{F}(f)\bef\text{liftOpt}_{F}(g)=\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}g)\quad.\label{eq:liftOpt-composition-law-derivation1} \end{equation} Since $\text{liftOpt}_{F}(f)$ is equal to $\text{flm}_{\text{Opt}}(...)$, so we need somehow to transform an expression of the form $\text{flm}_{\text{Opt}}(...)\bef\text{flm}_{\text{Opt}}(...)$ @@ -2603,7 +2605,7 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{sub So, \lstinline!Option!\textsf{'}s \lstinline!flatMap! method equals \lstinline!liftOpt! and obeys a law similar to \lstinline!liftOpt!\textsf{'}s composition law: \begin{equation} -\text{flm}_{\text{Opt}}(f)\bef\text{flm}_{\text{Opt}}(f^{\prime})=\text{flm}_{\text{Opt}}(f\diamond_{_{\text{Opt}}}f^{\prime})=\text{flm}_{\text{Opt}}\big(f\bef\text{flm}_{\text{Opt}}(f^{\prime})\big)\quad.\label{eq:associativity-law-of-flatMap-for-Option} +\text{flm}_{\text{Opt}}(f)\bef\text{flm}_{\text{Opt}}(g)=\text{flm}_{\text{Opt}}(f\diamond_{_{\text{Opt}}}g)=\text{flm}_{\text{Opt}}\big(f\bef\text{flm}_{\text{Opt}}(g)\big)\quad.\label{eq:associativity-law-of-flatMap-for-Option} \end{equation} We call Eq.~(\ref{eq:associativity-law-of-flatMap-for-Option}) the \textbf{associativity law} of \lstinline!flatMap!\index{associativity law!of flatMap for Option@of \texttt{flatMap} for \texttt{Option}} @@ -2650,14 +2652,15 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{sub & (p^{:A\rightarrow B})^{\uparrow\text{Opt}}\bef\text{flm}_{\text{Opt}}(q^{:B\rightarrow\text{Opt}^{C}})=\text{flm}_{\text{Opt}}(\text{???}^{:A\rightarrow\text{Opt}^{C}})\quad,\\ & \text{flm}_{\text{Opt}}(p^{:A\rightarrow\text{Opt}^{B}})\bef(q^{:B\rightarrow C})^{\uparrow\text{Opt}}=\text{flm}_{\text{Opt}}(\text{???}^{:A\rightarrow\text{Opt}^{C}})\quad. \end{align*} -The typed holes must be filled using the only available data (the -functions $p$ and $q$): +The typed holes must be filled using the only available data \textemdash{} +the functions $p$ and $q$: \begin{align} & (p^{:A\rightarrow B})^{\uparrow\text{Opt}}\bef\text{flm}_{\text{Opt}}(q^{:B\rightarrow\text{Opt}^{C}})=\text{flm}_{\text{Opt}}(p\bef q)\quad,\label{eq:left-naturality-flatmap-option}\\ & \text{flm}_{\text{Opt}}(p^{:A\rightarrow\text{Opt}^{B}})\bef(q^{:B\rightarrow C})^{\uparrow\text{Opt}}=\text{flm}_{\text{Opt}}(p\bef q^{\uparrow\text{Opt}})\quad.\label{eq:right-naturality-flatmap-option} \end{align} -We omit the proofs for these \textbf{naturality laws}\index{naturality law!of flatMap for Option@of \texttt{flatMap} for \texttt{Option}} -of \lstinline!flatMap!. With them, we transform Eqs.~(\ref{eq:composition-law-lhs-remaining-derivation1})\textendash (\ref{eq:composition-law-rhs-remaining-derivation1}): +The last two equations are the \textbf{naturality laws}\index{naturality law!of flatMap for Option@of \texttt{flatMap} for \texttt{Option}} +of \lstinline!flatMap! for \lstinline!Option! (see Exercise~\ref{subsec:Exercise-filterable-laws-2-1}). +Using those laws, we transform Eqs.~(\ref{eq:composition-law-lhs-remaining-derivation1})\textendash (\ref{eq:composition-law-rhs-remaining-derivation1}): \begin{align*} & \gunderline{r_{f,g}^{\uparrow\text{Opt}}\bef}\,\text{flm}_{\text{Opt}}\big(a^{\prime}\times g^{\prime}\rightarrow a^{\prime}\triangleright f^{\prime}\bef r_{f^{\prime},g^{\prime}}^{\uparrow\text{Opt}}\big)\\ {\color{greenunder}\text{use Eq.~(\ref{eq:left-naturality-flatmap-option})}:}\quad & =\text{flm}_{\text{Opt}}\big(\gunderline{r_{f,g}}\bef\big(a^{\prime}\times g^{\prime}\rightarrow a^{\prime}\triangleright f^{\prime}\bef r_{f^{\prime},g^{\prime}}^{\uparrow\text{Opt}}\big)\big)\\ @@ -2669,8 +2672,8 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{sub & \text{flm}_{\text{Opt}}(f^{\prime})\,\gunderline{\bef r_{f\bef\text{flm}_{\text{Opt}}(f^{\prime}),g}^{\uparrow\text{Opt}}}=\text{flm}_{\text{Opt}}\big(\gunderline{f^{\prime}}\bef r_{f\bef\text{flm}_{\text{Opt}}(f^{\prime}),g}^{\uparrow\text{Opt}}\big)\\ {\color{greenunder}\text{expand function }f^{\prime}:}\quad & =\text{flm}_{\text{Opt}}\big(a\rightarrow a\triangleright f^{\prime}\bef r_{f\bef\text{flm}_{\text{Opt}}(f^{\prime}),g}^{\uparrow\text{Opt}}\big)\quad. \end{align*} -The difference between sub-expressions has become smaller; it just -remains to show the following: +The difference between sub-expressions has become smaller. It remains +to show the following: \[ r_{f^{\prime},\text{liftOpt}_{G}(f)(g)}\overset{?}{=}r_{f\bef\text{flm}_{\text{Opt}}(f^{\prime}),g}\quad. \] @@ -2694,16 +2697,16 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{sub implements a special kind of filtering where the value $a^{:A}$ in the pair of type $A\times G^{A}$ needs to pass the filter for any data to remain in the functor after filtering. We can use the same -construction repeatedly with $G^{\bullet}\triangleq\bbnum 1$ and -obtain the type: +construction repeatedly with $G^{A}\triangleq\bbnum 1$ and obtain +the type: \[ L_{n}^{A}\triangleq\underbrace{\bbnum 1+A\times\left(\bbnum 1+A\times\left(\bbnum 1+...\times(\bbnum 1+A\times\bbnum 1)\right)\right)}_{\text{parameter }A\text{ is used }n\text{ times}}\quad, \] which is equivalent to a list of up to $n$ elements. The construction -defines a filtering operation for $L_{n}^{\bullet}$ that will delete -any data beyond the first value of type $A$ that does fails the predicate. -It is clear that this filtering operation implements the standard -\lstinline!takeWhile! method defined on sequences.\index{filterable!defined via takeWhile@defined via \texttt{takeWhile}} +defines a filtering operation for $L_{n}$ that will delete any data +beyond the first value of type $A$ that fails the predicate. It is +clear that this filtering operation implements the standard \lstinline!takeWhile! +method defined on sequences.\index{filterable!defined via takeWhile@defined via \texttt{takeWhile}} \label{proof-that-takeWhile-is-a-lawful-filter}So, \lstinline!takeWhile! is a lawful filtering operation (see Example~\ref{subsec:filt-solved-example-3} where it was used). @@ -2724,10 +2727,9 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{sub As we have seen in Chapter~\ref{chap:Functors,-contrafunctors,-and} (Statement~\ref{subsec:functor-Statement-functor-exponential}), functors involving a function type, such as $F^{A}\triangleq G^{A}\rightarrow H^{A}$, -require $G^{\bullet}$ to be a \emph{contrafunctor} rather than a -functor. It turns out that the functor $G^{A}\rightarrow H^{A}$ is -filterable only if the contrafunctor $G^{\bullet}$ has certain properties -(Eqs.~(\ref{eq:naturality-identity-law-filterable-contrafunctor})\textendash (\ref{eq:composition-law-filterable-contrafunctor}) +require $G$ to be a \emph{contrafunctor} rather than a functor. It +turns out that the functor $G^{A}\rightarrow H^{A}$ is filterable +only if the contrafunctor $G$ has certain properties (Eqs.~(\ref{eq:naturality-identity-law-filterable-contrafunctor})\textendash (\ref{eq:composition-law-filterable-contrafunctor}) below) that are quite similar to the properties of filterable functors. We will call such contrafunctors \textbf{filterable}.\index{filterable!contrafunctor} @@ -2739,27 +2741,28 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{sub \end{align*} Assume that $H$ is filterable, so that we have the function $\text{liftOpt}_{H}(f):H^{A}\rightarrow H^{B}$. We will fill the typed hole $\text{???}^{:H^{B}}$ if we somehow get -a value of type $H^{A}$; that is only possible if we apply $p^{:G^{A}\rightarrow H^{A}}$, +a value of type $H^{A}$. That is only possible if we apply $p^{:G^{A}\rightarrow H^{A}}$ +to something of type $G^{A}$: \[ \text{liftOpt}_{F}(f)=p^{:G^{A}\rightarrow H^{A}}\rightarrow g^{:G^{B}}\rightarrow\text{liftOpt}_{H}(f)(p(\text{???}^{:G^{A}}))\quad. \] The only way to proceed is to have a function $G^{B}\rightarrow G^{A}$. We cannot obtain such a function by lifting $f$ to the contrafunctor $G$: that gives $f^{\downarrow G}:G^{\bbnum 1+B}\rightarrow G^{A}$. -So, we need to require having a function: +We need to have a function $\text{liftOpt}_{G}$ with this type signature: \begin{equation} \text{liftOpt}_{G}(f^{:A\rightarrow\bbnum 1+B}):G^{B}\rightarrow G^{A}\quad.\label{eq:type-signature-liftOpt-contrafunctors} \end{equation} This function is analogous to \lstinline!liftOpt! for functors, except for the reverse direction of transformation ($G^{B}\rightarrow G^{A}$ -instead of $G^{A}\rightarrow G^{B}$). We can now complete the implementation -of $\text{liftOpt}_{F}$: +instead of $G^{A}\rightarrow G^{B}$). Assuming that $\text{liftOpt}_{G}$ +is available, we can now complete the implementation of $\text{liftOpt}_{F}$: \begin{align} & \text{liftOpt}_{F}(f^{:A\rightarrow\bbnum 1+B})\triangleq p^{:G^{A}\rightarrow H^{A}}\rightarrow g^{:G^{B}}\rightarrow\gunderline{\text{liftOpt}_{H}(f)\big(p(\text{\text{liftOpt}}_{G}(f)(g))\big)}\nonumber \\ - & \quad\triangleright\text{-notation}:\quad\nonumber \\ - & =p^{:G^{A}\rightarrow H^{A}}\rightarrow\gunderline{g^{:G^{B}}\rightarrow g\,\triangleright}\,\text{\text{liftOpt}}_{G}(f)\triangleright p\triangleright\text{liftOpt}_{H}(f)\\ - & \quad\text{omit }(g\rightarrow g\,\triangleright):\quad\label{eq:def-of-liftopt-function-type}\\ - & =p\rightarrow\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\quad. + & \quad{\color{greenunder}\text{pipe notation}:}\quad\nonumber \\ + & =p^{:G^{A}\rightarrow H^{A}}\rightarrow\gunderline{g^{:G^{B}}\rightarrow g\,\triangleright}\,\text{\text{liftOpt}}_{G}(f)\triangleright p\triangleright\text{liftOpt}_{H}(f)\nonumber \\ + & \quad{\color{greenunder}\text{omit }(g\rightarrow g\,\triangleright):}\quad\nonumber \\ + & =p\rightarrow\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\quad.\label{eq:def-of-liftopt-function-type} \end{align} Note that the last line is similar to Eq.~(\ref{eq:f-functor-exponential-def-of-fmap}) but with \lstinline!liftOpt! instead of \lstinline!map!: @@ -2767,29 +2770,28 @@ \subsubsection{Statement \label{subsec:Statement-filterable-coproduct-1}\ref{sub (f^{:A\rightarrow B})^{\uparrow F}=p^{:G^{A}\rightarrow H^{A}}\rightarrow f^{\downarrow G}\bef p\bef f^{\uparrow F}=p\rightarrow\text{cmap}_{G}(f)\bef p\bef\text{fmap}_{F}(f)\quad. \] -The laws for filterable contrafunctors are chosen such that $F^{A}\triangleq G^{A}\rightarrow H^{A}$ -can be shown to obey filtering laws when $H^{\bullet}$ is a filterable -functor and $G^{\bullet}$ is a filterable contrafunctor. +The laws for filterable contrafunctors ensure that $F^{A}\triangleq G^{A}\rightarrow H^{A}$ +obeys filtering laws when $H$ is a filterable functor and $G$ is +a filterable contrafunctor: \subsubsection{Statement \label{subsec:Statement-filterable-function-type}\ref{subsec:Statement-filterable-function-type}} -Assume that $H^{\bullet}$ is a lawful filterable functor and $G^{\bullet}$ -is a contrafunctor with a function $\text{liftOpt}_{G}$ having type -signature~(\ref{eq:type-signature-liftOpt-contrafunctors}) and obeying -the laws~(\ref{eq:naturality-identity-law-filterable-contrafunctor})\textendash (\ref{eq:composition-law-filterable-contrafunctor}) +Assume that $H$ is a lawful filterable functor and $G$ is a contrafunctor +with a function $\text{liftOpt}_{G}$ having type signature~(\ref{eq:type-signature-liftOpt-contrafunctors}) +and obeying the laws~(\ref{eq:naturality-identity-law-filterable-contrafunctor})\textendash (\ref{eq:composition-law-filterable-contrafunctor}) shown below. Then the functor $F^{A}\triangleq G^{A}\rightarrow H^{A}$ is filterable. \subparagraph{Proof} -We will arrive at the required laws for $G$ by trying to prove the -laws for $F$. +We will find the required laws for $G$ by trying to prove the laws +for $F$. -Because $F^{\bullet}$ has a function type, it is convenient to apply -both sides of the laws to an arbitrary value $p^{:G^{A}\rightarrow H^{A}}$. +Because $F$ contains a function type, it is convenient to apply both +sides of the laws to an arbitrary value $p^{:G^{A}\rightarrow H^{A}}$. Consider the naturality-identity law of $F$: \begin{align*} -{\color{greenunder}\text{expect to equal }p\triangleright f^{\uparrow F}=f^{\downarrow G}\bef p\bef f^{\uparrow H}:}\quad & p\triangleright\text{liftOpt}_{F}(f\bef\text{pu}_{\text{Opt}})\\ +{\color{greenunder}\text{expect to equal }f^{\downarrow G}\bef p\bef f^{\uparrow H}:}\quad & p\triangleright\text{liftOpt}_{F}(f\bef\text{pu}_{\text{Opt}})\\ {\color{greenunder}\text{definition~(\ref{eq:def-of-liftopt-function-type}) of }\text{liftOpt}_{F}:}\quad & =\text{\text{liftOpt}}_{G}(f\bef\text{pu}_{\text{Opt}})\bef p\bef\gunderline{\text{liftOpt}_{H}(f\bef\text{pu}_{\text{Opt}})}\\ {\color{greenunder}\text{naturality-identity law of }\text{liftOpt}_{H}:}\quad & =\text{\text{liftOpt}}_{G}(f\bef\text{pu}_{\text{Opt}})\bef p\bef\gunderline{f^{\uparrow H}}\quad. \end{align*} @@ -2806,8 +2808,8 @@ \subsubsection{Statement \label{subsec:Statement-filterable-function-type}\ref{s & \quad{\color{greenunder}\text{left-hand side of Eq.~(\ref{eq:composition-law-of-liftOpt}) for }F:}\quad\\ & p\triangleright\text{liftOpt}_{F}(f)\bef\text{liftOpt}_{F}(g)=\gunderline{p\triangleright\text{liftOpt}_{F}(f)}\triangleright\text{liftOpt}_{F}(g)\\ & \quad{\color{greenunder}\text{definition~(\ref{eq:def-of-liftopt-function-type}) of }\text{liftOpt}_{F}:}\quad\\ - & =\big(\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\big)\,\gunderline{\triangleright\text{liftOpt}_{F}(g)}\\ - & \quad{\color{greenunder}\text{definition~(\ref{eq:def-of-liftopt-function-type})}:}\quad\\ + & =\big(\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\big)\,\gunderline{\triangleright\,\text{liftOpt}_{F}(g)}\\ + & \quad{\color{greenunder}\text{again definition~(\ref{eq:def-of-liftopt-function-type})}:}\quad\\ & =\text{\text{liftOpt}}_{G}(g)\bef\big(\text{\text{liftOpt}}_{G}(f)\bef p\bef\gunderline{\text{liftOpt}_{H}(f)\big)\bef\text{liftOpt}_{H}(g)}\\ & \quad{\color{greenunder}\text{composition law~(\ref{eq:composition-law-of-liftOpt}) of }\text{liftOpt}_{H}:}\quad\\ & =\text{\text{liftOpt}}_{G}(g)\bef\text{\text{liftOpt}}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f\diamond_{_{\text{Opt}}}g)\quad. @@ -2835,16 +2837,16 @@ \subsubsection{Statement \label{subsec:Statement-filterable-function-type}\ref{s \text{List}^{A}\triangleq\bbnum 1+A\times\text{List}^{A}\quad, \] and the recursive construction for ordinary functors (Statement~\ref{subsec:functor-Statement-functor-recursive}) -that requires a bifunctor $S^{\bullet,\bullet}$. +that requires a bifunctor $S$. \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{subsec:Statement-filterable-recursive-type}} -If $G^{\bullet}$ is a filterable functor, the recursive functor $F^{\bullet}$ -defined by: +If $G$ is a filterable functor, the recursive functor $F$ defined +by: \[ F^{A}\triangleq G^{A}+A\times F^{A} \] -is filterable. With $G^{A}\triangleq\bbnum 1$, this construction +is filterable. When $G^{A}\triangleq\bbnum 1$, this construction reproduces the standard filtering operation of \lstinline!List!. \subparagraph{Proof} @@ -2852,14 +2854,14 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{ We first need to implement the type constructor $F$ and the function $\text{liftOpt}_{F}$: \begin{lstlisting} -sealed trait F[A] // Assume that the functor G was defined previously. +sealed trait F[A] // Assume that the functor G was defined previously. final case class FG[A](g: G[A]) extends F[A] final case class FAF[A](a: A, rf: F[A]) extends F[A] - // Assume that liftOpt_G is available and define liftOpt_F: + // Assume that liftOpt_G is available and define liftOpt_F: def liftOpt_F[A, B](f: A => Option[B]): F[A] => F[B] = { case FG(g) => FG(liftOpt_G(f)(g)) - case FAF(a, rf) => f(a) match { // Does `a` pass the filtering predicate? - case None => liftOpt_F(f)(rf) // No. Drop `a` and filter `rf` recursively. + case FAF(a, rf) => f(a) match { // Does `a` pass the filtering predicate? + case None => liftOpt_F(f)(rf) // No. Drop `a` and filter `rf` recursively. case Some(b) => FAF[B](b, liftOpt_F(f)(rf)) // Yes. Keep `b` and filter `rf` recursively. } } @@ -2892,8 +2894,8 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{ The disjunctive type $F^{A}$ has two cases, $G^{A}+\bbnum 0^{:A\times F^{A}}$ and $\bbnum 0^{:G^{A}}+A\times F^{A}$. When applied to a value $g^{:G^{A}}+\bbnum 0$, the function $\text{liftOpt}_{F}$ is exactly the same as $\text{liftOpt}_{G}$, -so both laws are satisfied since $G^{\bullet}$ is a lawful filterable -functor. It remains to verify the laws when applied to a value $\bbnum 0^{:G^{A}}+a^{:A}\times r^{:F^{A}}$. +so both laws are satisfied since $G$ is a lawful filterable functor. +It remains to verify the laws when applied to a value $\bbnum 0^{:G^{A}}+a^{:A}\times r^{:F^{A}}$. To prepare for the calculations, write the result of applying $\text{liftOpt}_{F}$ to a value $\bbnum 0+a\times r$: @@ -2905,8 +2907,8 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{ B & b^{:B}\rightarrow\bbnum 0^{:G^{B}}+\left(b\times r\right)\triangleright\overline{\text{liftOpt}_{F}}(f) \end{array}\quad.\label{eq:expression-liftOpt-derivation2} \end{align} -To check the naturality-identity law~(\ref{eq:combined-naturality-identity-law-of-liftOpt}) -of $\text{liftOpt}_{F}$, begin with the left-hand side: +To check the naturality-identity law~(\ref{eq:combined-naturality-identity-law-of-liftOpt}), +begin with the left-hand side: \begin{align*} & (\bbnum 0+a\times r)\triangleright\text{liftOpt}_{F}(\gunderline{f\bef\text{pu}_{\text{Opt}}})\\ & =(\bbnum 0+a\times r)\triangleright\text{liftOpt}_{F}(x\rightarrow\bbnum 0+f(x))\\ @@ -2934,15 +2936,15 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{ {\color{greenunder}\text{use Eq.~(\ref{eq:expression-liftOpt-derivation2})}:}\quad & =f(a)\triangleright\,\begin{array}{||c|} 1\rightarrow r\triangleright\overline{\text{liftOpt}_{F}}(f)\\ b^{:B}\rightarrow\bbnum 0+\left(b\times r\right)\triangleright\overline{\text{liftOpt}_{F}}(f) -\end{array}\,\,\gunderline{\triangleright\,\text{liftOpt}_{F}(g)} +\end{array}\,\,\,\gunderline{\triangleright\,\text{liftOpt}_{F}(g)} \end{align*} \begin{align} - & \quad\text{apply }\text{liftOpt}_{F}(g):\quad\nonumber \\ + & \quad{\color{greenunder}\text{apply }\text{liftOpt}_{F}(g):}\quad\nonumber \\ & =f(a)\triangleright\,\,\begin{array}{||c|} 1\rightarrow r\triangleright\overline{\text{liftOpt}_{F}}(f)\,\triangleright\,\text{liftOpt}_{F}(g)\\ b^{:B}\rightarrow\big(\bbnum 0+\left(b\times r\right)\triangleright\overline{\text{liftOpt}_{F}}(f)\big)\triangleright\text{liftOpt}_{F}(g) \end{array}\nonumber \\ - & \quad\text{use Eq.~(\ref{eq:expression-liftOpt-derivation2})}:\quad\nonumber \\ + & \quad{\color{greenunder}\text{use Eq.~(\ref{eq:expression-liftOpt-derivation2})}:}\quad\nonumber \\ & =f(a)\triangleright\,\begin{array}{||c|} 1\rightarrow r\triangleright\overline{\text{liftOpt}_{F}}(f)\bef\text{liftOpt}_{F}(g)\\ b^{:B}\rightarrow g(b)\triangleright\,\begin{array}{||c|} @@ -2950,7 +2952,7 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{ c^{:C}\rightarrow\bbnum 0+\left(c\times r\right)\triangleright\overline{\text{liftOpt}_{F}}(f)\bef\overline{\text{liftOpt}_{F}}(g) \end{array} \end{array}\nonumber \\ - & \quad{\color{greenunder}\text{inductive assumption}:}\quad\\ + & \quad{\color{greenunder}\text{inductive assumption}:}\quad\nonumber \\ & =a\triangleright f\bef\,\,\begin{array}{||c|} 1\rightarrow r\triangleright\overline{\text{liftOpt}_{F}}(f\diamond_{_{\text{Opt}}}g)\\ b^{:B}\rightarrow g(b)\triangleright\,\begin{array}{||c|} @@ -2959,10 +2961,11 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{ \end{array} \end{array}\quad.\label{eq:lhs-comp-law-liftOpt-derivation2} \end{align} -We are justified to use the inductive assumption for $\overline{\text{liftOpt}_{F}}(f)\bef\text{liftOpt}_{F}(g)$ +We are justified in using the inductive assumption for $\overline{\text{liftOpt}_{F}}(f)\bef\text{liftOpt}_{F}(g)$ even though the second function call, $\text{liftOpt}_{F}(g)$, is -not a recursive call. It is sufficient that at least one term in the -function composition is a recursive call to $\overline{\text{liftOpt}_{F}}$. +not marked as a recursive call. This is because the symbol $\overline{\text{liftOpt}_{F}}$ +denotes the \emph{same} function as $\text{liftOpt}_{F}$. By the +inductive assumption, the laws already hold for $\overline{\text{liftOpt}_{F}}$. The right-hand side of Eq.~(\ref{eq:composition-law-of-liftOpt}) applied to $\bbnum 0+a\times r$ gives: @@ -2990,8 +2993,8 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{ where $p\triangleq1\rightarrow r\triangleright\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}g)$ and $q\triangleq c\rightarrow\bbnum 0+c\times r\triangleright\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}g)$ are some fixed functions. We can show that Eq.~(\ref{eq:comp-liftOpt-last-diff-derivation2}) -holds for arbitrary functions $p$ and $q$. Start from the right-hand -side: +holds for arbitrary functions $p$ and $q$ having suitable types. +Start from the right-hand side: \begin{align*} {\color{greenunder}\text{expect the l.h.s.~of Eq.~(\ref{eq:comp-liftOpt-last-diff-derivation2})}:}\quad & (\gunderline{f\diamond_{_{\text{Opt}}}g})\bef\,\begin{array}{||c|} p\\ @@ -3036,13 +3039,13 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type}\ref{ method for sequences. $\square$ The next construction is for a functor defined via a filterable recursion -scheme. The filtering logic is then different from that used in Statement~\ref{subsec:Statement-filterable-recursive-type}. +scheme. Then the filtering logic is different from that used in Statement~\ref{subsec:Statement-filterable-recursive-type}. \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type-1}\ref{subsec:Statement-filterable-recursive-type-1}} If $S^{A,R}$ is a bifunctor that is filterable with respect to $A$, -the recursive functor $F^{A}$ defined by the recursion scheme $S^{A,R}$ -(type equation $F^{A}\triangleq S^{A,F^{A}}$) is filterable. +the recursive functor $F$ defined by the type equation $F^{A}\triangleq S^{A,F^{A}}$ +is filterable. \subparagraph{Proof} @@ -3095,26 +3098,26 @@ \subsubsection{Statement \label{subsec:Statement-filterable-recursive-type-1}\re \subsection{Filterable contrafunctors: motivation and examples} -An intuitive view is that functors are \textsf{``}wrappers\textsf{''} of data, while -contrafunctors \textsf{``}consume\textsf{''} data. Filterable functors permit us to -exclude certain data from a wrapper; filterable contrafunctors permit -us to exclude certain data from being consumed. Let us now make this -intuition precise. +An intuitive view is that functors are \textsf{``}wrappers\textsf{''} that store data, +while contrafunctors \textsf{``}consume\textsf{''} data. Filterable functors permit +us to exclude certain data from storage; filterable contrafunctors +permit us to exclude certain data from being consumed. Let us now +make this intuition precise. A simple contrafunctor is $C^{A}\triangleq A\rightarrow Z$, where $Z$ is a constant type. This is a general form of an \textsf{``}extractor\textsf{''} \textemdash{} for example, a function that extracts logging information -of type $Z$ from data of an arbitrary type $A$. It is sometimes -necessary to exclude particular kinds of information (e.g., private -personal data) from logging. We can implement this by providing a -predicate of type \lstinline!A => Boolean! that decides, depending -on the given value $x^{:A}$, whether $x$ should be passed to the -extractor. That predicate will be attached to a given extractor $c^{:C^{A}}$ -by the \lstinline!filter! operation: +of type $Z$ from data of various types $A$. It is sometimes necessary +to exclude particular kinds of information (e.g., private personal +data) from logging. We can implement this by providing a predicate +of type \lstinline!A => Boolean! that decides, depending on the given +value $x^{:A}$, whether $x$ should be passed to the extractor. That +predicate will be attached to a given extractor $c^{:C^{A}}$ by the +\lstinline!filter! operation: \begin{lstlisting} def filter[A](p: A => Boolean): C[A] => C[A] -val extractor: C[Payload] = ??? // Original code for extracting metadata from payloads. -val noPrivateData: Payload => Boolean = ??? // Returns true only if payload has no private data. +val extractor: C[Payload] = ??? // Code that extracts metadata from payloads. +val noPrivateData: Payload => Boolean = ??? // Returns true only if payload has no private data. val filtered: C[Payload] = filter(noPrivateData)(extractor) // Will not extract private data. \end{lstlisting} How could filtering work when the predicate returns \lstinline!false!? @@ -3143,10 +3146,10 @@ \subsubsection{Example \label{subsec:Example-first-filterable-contrafunctor}\ref \begin{lstlisting} val c: A => Option[Z] = ??? \end{lstlisting} -we need somehow to impose a filter predicate $p^{:A\rightarrow\bbnum 2}$ -ensuring that the function $c$ is applied only to values that pass -the predicate. The result will be a new function $d^{:A\rightarrow\bbnum 1+Z}$ -that will use its argument only if it passes the predicate. The function +we need somehow to ensure that the function $c$ is applied only to +values that pass a given filter predicate $p^{:A\rightarrow\bbnum 2}$. +The result will be a new function $d^{:A\rightarrow\bbnum 1+Z}$ that +will use its argument only if it passes the predicate. The function $d$ could return \lstinline!None! for all arguments, but that implementation would lose information. If \lstinline!p(a) == true!, we may compute \lstinline!c(a)!, getting a value of type $\bbnum 1+Z$ that $d$ @@ -3156,11 +3159,11 @@ \subsubsection{Example \label{subsec:Example-first-filterable-contrafunctor}\ref val d: A => Option[Z] = { a => if (p(a)) c(a) else None } \end{lstlisting} The transformation from \lstinline!c! to \lstinline!d! is a filtering -operation for the contrafunctor $C^{A}$, implemented as: +operation for the contrafunctor $C$, implemented as: \begin{lstlisting} def filter[A](p: A => Boolean)(c: A => Option[Z]): A => Option[Z] = { a => if (p(a)) c(a) else None -} // Equivalent code is { a => Some(a).filter(p).flatMap(c) } +} // Equivalent code is { a => Some(a).filter(p).flatMap(c) } \end{lstlisting} \[ \text{filt}_{C}(p^{:A\rightarrow\bbnum 2})\triangleq c^{:A\rightarrow\bbnum 1+Z}\rightarrow\text{pu}_{\text{Opt}}\bef\text{filt}_{\text{Opt}}(p)\bef\text{flm}_{\text{Opt}}(c)=c\rightarrow\psi_{p}\bef\text{flm}_{\text{Opt}}(c)\quad. @@ -3196,9 +3199,9 @@ \subsubsection{Example \label{subsec:Example-first-filterable-contrafunctor-1}\r \lstinline!filter! function is implemented by: \begin{lstlisting} def filter[A](p: A => Boolean)(c: Option[A] => Z): Option[A] => Z = { - case Some(a) if p(a) => c(Some(a)) // Only apply `c` to `a` if `p(a) == true`. - case _ => c(None) // Return c(None) otherwise, or for empty Option. -} // Equivalent code is _.filter(p).pipe(c) (Scala 2.13). + case Some(a) if p(a) => c(Some(a)) // Only apply `c` to `a` if p(a) == true. + case _ => c(None) // Return c(None) if p(a) == false, or for empty Option. +} // Equivalent code is: filter(p)(c) = _.filter(p).pipe(c) \end{lstlisting} \begin{align*} \text{filt}_{C}(p^{:A\rightarrow\bbnum 2}) & \triangleq c^{:\bbnum 1+A\rightarrow Z}\rightarrow\gunderline{x^{:\bbnum 1+A}\rightarrow x}\triangleright\text{filt}_{\text{Opt}}(p)\triangleright c\\ @@ -3207,9 +3210,9 @@ \subsubsection{Example \label{subsec:Example-first-filterable-contrafunctor-1}\r Another motivation for filterable contrafunctors comes from the construction $F^{A}\triangleq G^{A}\rightarrow H^{A}$ (Statement~\ref{subsec:Statement-filterable-function-type}): -In order to assure the properties of a filterable functor for $F^{\bullet}$, -the contrafunctor $G^{\bullet}$ must have the \lstinline!liftOpt! -function as shown in Eq.~(\ref{eq:type-signature-liftOpt-contrafunctors}). +In order to assure the properties of a filterable functor for $F$, +the contrafunctor $G$ must have the \lstinline!liftOpt! function +as shown in Eq.~(\ref{eq:type-signature-liftOpt-contrafunctors}). The existence of the \lstinline!liftOpt! function for a contrafunctor turns out to be equivalent to the existence of the \lstinline!filter! function, as long as suitable laws hold. To verify that equivalence, @@ -3234,8 +3237,7 @@ \subsubsection{Example \label{subsec:Example-first-filterable-contrafunctor-1}\r has $4$ laws, $\text{inflate}_{C}$ has $3$, and $\text{liftOpt}_{C}$ has just $2$ laws. So, $\text{liftOpt}_{C}$ is the most convenient function for proving laws, while $\text{inflate}_{C}$ is the easiest -to implement in code (and to check whether a given contrafunctor is -filterable). The laws\index{identity laws!of liftOpt for contrafunctors@of \texttt{liftOpt} for contrafunctors}\index{composition law!of liftOpt for contrafunctors@of \texttt{liftOpt} for contrafunctors} +to implement in code. The laws\index{identity laws!of liftOpt for contrafunctors@of \texttt{liftOpt} for contrafunctors}\index{composition law!of liftOpt for contrafunctors@of \texttt{liftOpt} for contrafunctors} of $\text{liftOpt}_{C}$ are similar to the laws of \lstinline!liftOpt! for filterable functors (we omit the derivations): \begin{align*} @@ -3250,16 +3252,17 @@ \subsubsection{Example \label{subsec:Example-first-filterable-contrafunctor-1}\r \subsubsection{Example \label{subsec:filt-solved-example-5}\ref{subsec:filt-solved-example-5}} -Implement \lstinline!inflate! and \lstinline!liftOpt! for $C^{A}\triangleq A\rightarrow\bbnum 1+Z$, +Implement \lstinline!inflate! and \lstinline!liftOpt! for $C^{A}\triangleq A\rightarrow\bbnum 1+Z$ and verify Eq.~(\ref{eq:express-filter-via-inflate-for-contrafunctor}). \subparagraph{Solution } We implement the type signatures of \lstinline!inflate! and \lstinline!liftOpt!, -preserving information: +trying to preserve information as much as possible: \begin{lstlisting} def inflate[A](c: A => Option[Z]): Option[A] => Option[Z] = _.flatMap(c) -def liftOpt[A, B](f: A => Option[B])(c: B => Option[Z]): A => Option[Z] = { a => f(a).flatMap(c) } +def liftOpt[A, B](f: A => Option[B])(c: B => Option[Z]): A => Option[Z] = + { a => f(a).flatMap(c) } \end{lstlisting} \begin{align*} & \text{inflate}_{C}\triangleq c^{:A\rightarrow\bbnum 1+Z}\rightarrow\text{flm}_{\text{Opt}}(c)\quad,\\ @@ -3297,8 +3300,8 @@ \subsubsection{Example \label{subsec:filt-solved-example-5}\ref{subsec:filt-solv \subsubsection{Example \label{subsec:filt-solved-example-5-1}\ref{subsec:filt-solved-example-5-1}} Verify Eq.~(\ref{eq:express-filter-via-inflate-for-contrafunctor}) -for an arbitrary filterable contrafunctor $C^{\bullet}$, assuming -needed laws. +for an arbitrary filterable contrafunctor $C$, assuming naturality +laws as needed. \subparagraph{Solution} @@ -3310,13 +3313,13 @@ \subsubsection{Example \label{subsec:filt-solved-example-5-1}\ref{subsec:filt-so \begin{align*} {\color{greenunder}\text{expect to equal }\text{filt}_{C}(p):}\quad & \text{filt}_{C}^{\prime}(p)=\text{inflate}_{C}\bef\psi_{p}^{\downarrow C}=\text{get}^{\downarrow C}\bef\text{filt}_{C}(\text{nonEmpty})\bef\psi_{p}^{\downarrow C}\quad. \end{align*} -The computation gets stuck here: We could simplify the composition +The computation gets stuck here: We would simplify the composition $\psi_{p}\bef\text{get}$ using Eq.~(\ref{eq:composition-of-psi-p-and-get-simplified}), if only we could move these functions next to each other. It is clear -that we need a law that exchanges the order of compositions of $\text{filt}_{C}$ +that we need a law that exchanges the order of composition of $\text{filt}_{C}$ with lifted functions. Typically, that is done by naturality laws. By analogy with Eq.~(\ref{eq:naturality-law-of-filter}) and making -sure types match, we write a \textbf{naturality law} of\index{naturality law!of filter for contrafunctors@of \texttt{filter} for contrafunctors} +sure types match, we obtain a \textbf{naturality law} of\index{naturality law!of filter for contrafunctors@of \texttt{filter} for contrafunctors} $\text{filt}_{C}$ as: \begin{equation} \text{filt}_{C}(p^{:A\rightarrow\bbnum 2})\bef(f^{:B\rightarrow A})^{\downarrow C}=f^{\downarrow C}\bef\text{filt}_{C}(f\bef p)\quad.\label{eq:naturality-for-filter-for-contrafunctors} @@ -3371,9 +3374,10 @@ \subsubsection{Exercise \label{subsec:filt-exercise-derive-inflate-liftopt-for-1 \subsubsection{Exercise \label{subsec:filt-exercise-derive-liftOpt-equivalence-1}\ref{subsec:filt-exercise-derive-liftOpt-equivalence-1}} -Verify Eq.~(\ref{eq:express-liftOpt-via-inflate-for-contrafunctors}) -for an arbitrary filterable contrafunctor $C^{\bullet}$, assuming -needed laws. +Proceeding similarly to Example~\ref{subsec:filt-solved-example-5-1}, +verify Eq.~(\ref{eq:express-liftOpt-via-inflate-for-contrafunctors}) +for an arbitrary filterable contrafunctor $C$, assuming naturality +laws as needed. \subsection{Constructions of filterable contrafunctors\label{subsec:Constructions-of-filterable-contrafunctors}} @@ -3394,25 +3398,27 @@ \subsection{Constructions of filterable contrafunctors\label{subsec:Construction all methods are identity functions, so all laws hold. Further constructions that work with type parameters are functor compositions. -The composition $P\circ Q\triangleq P^{Q^{\bullet}}$ is a contrafunctor -when $P$ is a functor and $Q$ is a contrafunctor, or vice versa. -The contrafunctor $P^{Q^{\bullet}}$ is filterable if $Q^{\bullet}$ -(whether it is a functor or a contrafunctor) is filterable: +The composition $P\circ Q$ defined as $(P\circ Q)^{A}\triangleq P^{Q^{A}}$ +is a contrafunctor when $P$ is a functor and $Q$ is a contrafunctor, +or vice versa. It turns out that the contrafunctor $P\circ Q$ is +filterable if $Q$ (whether it is a functor or a contrafunctor) is +filterable: \subsubsection{Statement \label{subsec:Statement-filterable-contrafunctor-composition}\ref{subsec:Statement-filterable-contrafunctor-composition}} -\textbf{(a)} If $P^{\bullet}$ is any contrafunctor and $Q^{\bullet}$ -is a filterable functor then $P^{Q^{\bullet}}$ is filterable. +The type constructor $P\circ Q$ is filterable: -\textbf{(b)} If $P^{\bullet}$ is any functor and $Q^{\bullet}$ is -a filterable contrafunctor then $P^{Q^{\bullet}}$ is filterable. +\textbf{(a)} If $P$ is any contrafunctor and $Q$ is a filterable +functor. + +\textbf{(b)} If $P$ is any functor and $Q$ is a filterable contrafunctor. \subparagraph{Proof} -We follow the proof of Statement~\ref{subsec:Statement-filterable-composition-functors} -with necessary modifications. +We follow the proof of Statement~\ref{subsec:Statement-filterable-composition-functors}, +\emph{mutatis mutandis}. -\textbf{(a)} We define the \lstinline!liftOpt! operation for $P\circ Q$ +\textbf{(a)} Define the \lstinline!liftOpt! operation for $P\circ Q$ as: \[ \text{liftOpt}_{P\circ Q}(f^{:A\rightarrow\bbnum 1+B})\triangleq\big(\text{liftOpt}_{Q}(f)\big)^{\downarrow P}\quad. @@ -3436,7 +3442,7 @@ \subsubsection{Statement \label{subsec:Statement-filterable-contrafunctor-compos {\color{greenunder}\text{composition law~(\ref{eq:composition-law-of-liftOpt}) of }Q:}\quad & =\big(\text{liftOpt}_{Q}(f\diamond_{_{\text{Opt}}}g)\big)^{\downarrow P}=\text{liftOpt}_{P\circ Q}(f\diamond_{_{\text{Opt}}}g)\quad. \end{align*} -\textbf{(b)} We define the \lstinline!liftOpt! operation for $P\circ Q$ +\textbf{(b)} Define the \lstinline!liftOpt! operation for $P\circ Q$ as: \[ \text{liftOpt}_{P\circ Q}(f^{:A\rightarrow\bbnum 1+B})\triangleq\big(\text{liftOpt}_{Q}(f)\big)^{\uparrow P}\quad. @@ -3465,21 +3471,19 @@ \subsubsection{Statement \label{subsec:Statement-filterable-contrafunctor-compos \paragraph{Products and co-products} -If $G^{\bullet}$ and $H^{\bullet}$ are filterable contrafunctors, -the product $G^{A}\times H^{A}$ and the co-product $G^{A}+H^{A}$ -will also be filterable contrafunctors. Proofs are analogous to the -case of filterable functors and are delegated to Exercise~\ref{subsec:Exercise-filterable-laws-6-1}. +If $G$ and $H$ are filterable contrafunctors, the product contrafunctor +$G\times H$ and the co-product contrafunctor $G+H$ will also be +filterable. Proofs are analogous to the case of filterable functors +and are delegated to Exercise~\ref{subsec:Exercise-filterable-laws-6-1}. \paragraph{Functions} -We have a construction similar to that of Statement~\ref{subsec:Statement-filterable-function-type} -for filterable functors: +We have a construction similar to that of Statement~\ref{subsec:Statement-filterable-function-type}: \subsubsection{Statement \label{subsec:Statement-function-type-exponential-filterable-contrafunctor}\ref{subsec:Statement-function-type-exponential-filterable-contrafunctor}} The contrafunctor $F^{A}\triangleq G^{A}\rightarrow H^{A}$ is filterable -for any filterable functor $G^{A}$ and any filterable contrafunctor -$H^{A}$. +for any filterable functor $G$ and any filterable contrafunctor $H$. \subparagraph{Proof} @@ -3493,7 +3497,7 @@ \subsubsection{Statement \label{subsec:Statement-function-type-exponential-filte \begin{align*} & \text{liftOpt}_{F}(f)\\ & \triangleq p^{:G^{B}\rightarrow H^{B}}\rightarrow\gunderline{g^{:G^{A}}\rightarrow g}\triangleright\text{liftOpt}_{G}(f)\triangleright p\triangleright\text{liftOpt}_{H}(f)\\ -{\color{greenunder}\text{simplify }(x\rightarrow x\triangleright y)=y:}\quad & \quad=p^{:G^{B}\rightarrow H^{B}}\rightarrow\text{liftOpt}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\quad. +{\color{greenunder}\text{simplify }(x\rightarrow x\triangleright y)=y:}\quad & =p^{:G^{B}\rightarrow H^{B}}\rightarrow\text{liftOpt}_{G}(f)\bef p\bef\text{liftOpt}_{H}(f)\quad. \end{align*} To verify the naturality-identity law~(\ref{eq:naturality-identity-law-filterable-contrafunctor}), @@ -3520,13 +3524,13 @@ \subsubsection{Example \label{subsec:Example-search-functor}\ref{subsec:Example- (the search functor)} An application of Statement~\ref{subsec:Statement-function-type-exponential-filterable-contrafunctor} -is the \textbf{search functor}\index{search functor} $S_{Z}^{\bullet}$ -defined by $S_{Z}^{A}\triangleq(A\rightarrow\bbnum 1+Z)\rightarrow\bbnum 1+A$, +is the \textbf{search functor}\index{search functor} $S_{Z}$ defined +by $S_{Z}^{A}\triangleq(A\rightarrow\bbnum 1+Z)\rightarrow\bbnum 1+A$, where $Z$ is a fixed type. This functor is filterable because it is a function from the filterable contrafunctor $A\rightarrow\bbnum 1+Z$ -to the filterable functor $\bbnum 1+A$. The simplest case of the -search functor is found by setting $Z\triangleq\bbnum 1$, which gives -the type constructor: +to the filterable functor $\bbnum 1+A$. A simple case of the search +functor is found by setting $Z\triangleq\bbnum 1$, which gives the +type constructor: \[ S_{\bbnum 1}^{A}\triangleq(A\rightarrow\bbnum 2)\rightarrow\bbnum 1+A\quad. \] @@ -3543,7 +3547,7 @@ \subsubsection{Example \label{subsec:Example-search-functor}\ref{subsec:Example- \subsubsection{Statement \label{subsec:Statement-function-a-to-1-+z-filterable-contrafunctor}\ref{subsec:Statement-function-a-to-1-+z-filterable-contrafunctor}} -If a contrafunctor $H^{\bullet}$ is filterable, so is the contrafunctor +If a contrafunctor $H$ is filterable then so is the contrafunctor $F^{A}\triangleq A\rightarrow\bbnum 1+H^{A}$. \subparagraph{Proof} @@ -3603,15 +3607,14 @@ \subsubsection{Statement \label{subsec:Statement-recursive-filterable-contrafunc \subparagraph{Proof} -The recursive contrafunctor $F^{\bullet}$ is implemented by wrapping -$S$ in a case class: +The recursive contrafunctor $F$ is implemented by wrapping $S$ in +a case class: \begin{lstlisting} type S[A, R] = ... final case class F[A](s: S[A, F[A]]) \end{lstlisting} -The code of the function \lstinline!liftOpt! for $F^{\bullet}$ is -recursive and uses the \lstinline!xmap! method of the profunctor -$S$. +The code of the function \lstinline!liftOpt! for $F$ is recursive +and uses the \lstinline!xmap! method of the profunctor $S$. \begin{lstlisting} def liftOpt_F[A, B](f: A => Option[B]): F[B] => F[A] = { case F(sbfb) => F( @@ -3624,8 +3627,8 @@ \subsubsection{Statement \label{subsec:Statement-recursive-filterable-contrafunc \xyScaleY{1.8pc}\xyScaleX{6pc} & S^{A,F^{A}} } \] -Note that $F^{B}\cong S^{B,F^{B}}$. As before, we use an overline -to mark recursive calls to the same function: +Note that $F^{B}\cong S^{B,F^{B}}$. We use an overline to mark recursive +calls: \begin{align*} & \text{liftOpt}_{F}(f^{:A\rightarrow\bbnum 1+B})\triangleq\text{liftOpt}_{S}(f)\bef\big(\overline{\text{liftOpt}_{F}}(f)\big)^{\uparrow S^{A,\bullet}}\\ & =\text{liftOpt}_{S}(f)\bef\text{xmap}_{S}(\text{id})(\overline{\text{liftOpt}_{F}}(f))\quad. @@ -3634,7 +3637,8 @@ \subsubsection{Statement \label{subsec:Statement-recursive-filterable-contrafunc To verify the laws, we need the code for lifting to the contrafunctor $F$: \begin{lstlisting} -def cmap_F[A, B](f: A => B): F[B] => F[A] = { case F(sbfb) => F( sbfb.xmap_S(f)(cmap_F(f)) ) } +def cmap_F[A, B](f: A => B): F[B] => F[A] = { case F(sbfb) => + F( sbfb.xmap_S(f)(cmap_F(f)) ) } \end{lstlisting} \[ f^{\downarrow F}\triangleq\text{xmap}_{S}(f)(\overline{f^{\downarrow F}})=f^{\downarrow S^{\bullet,F^{B}}}\bef\big(\overline{f^{\downarrow F}}\big)^{\uparrow S^{A,\bullet}}\quad. @@ -3662,7 +3666,8 @@ \subsubsection{Statement \label{subsec:Statement-recursive-filterable-contrafunc & =\text{liftOpt}_{S}(f\diamond_{_{\text{Opt}}}g)\bef\big(\overline{\text{liftOpt}_{F}}(f\diamond_{_{\text{Opt}}}g)\big)^{\uparrow S^{A,\bullet}}=\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}g)\quad. \end{align*} In this derivation, we have used the naturality law of $\text{liftOpt}_{S}$ -with respect to lifting in the type parameter $A$ of $S^{A,R}$: +with respect to lifting in the type parameter $A$ of $S^{A,R}$. +That law is: \[ \xymatrix{S^{B,R}\ar[r]\sp(0.5){\text{liftOpt}_{S}(f^{:A\rightarrow\bbnum 1+B})}\ar[d]\sp(0.4){(h^{:R\rightarrow R^{\prime}})^{\uparrow S^{B,\bullet}}} & S^{A,R}\ar[d]\sb(0.4){h^{\uparrow S^{A,\bullet}}}\\ \xyScaleY{1.8pc}\xyScaleX{6.0pc}S^{B,R^{\prime}}\ar[r]\sp(0.45){\text{liftOpt}_{S}(f)} & S^{A,R^{\prime}} @@ -3691,26 +3696,25 @@ \section{Summary} \item Given a filterable type constructor, generate the code for \lstinline!filter! or \lstinline!liftOpt! automatically. \end{itemize} -This cannot be done because most non-trivial type constructors have -many lawful but \emph{inequivalent} implementations of \lstinline!filter!. -When we say \textsf{``}a type constructor $F$ is filterable\textsf{''} we mean that -there is at least one lawful implementation. It is not possible to -choose a \textsf{``}preferred\textsf{''} implementation automatically, since different -applications may need different filtering behavior. While in most -cases the standard library provides generally useful implementations -of filtering (e.g., the \lstinline!filter! or \lstinline!takeWhile! -methods on sequences), in some situations the programmer will need -to write a custom implementation of \lstinline!filter! for a custom -data type. The programmer must examine the given requirements and -decide whether those requirements can be implemented as a lawful \lstinline!filter! -function. +This cannot be done because many type constructors have several lawful +but \emph{inequivalent} implementations of \lstinline!filter!. When +we say \textsf{``}a type constructor $F$ is filterable\textsf{''} we mean that there +is at least one lawful implementation. It is not possible to choose +a \textsf{``}preferred\textsf{''} implementation automatically, since different applications +may need different filtering behavior. While in most cases the standard +library provides generally useful implementations of filtering (e.g., +the \lstinline!filter! or \lstinline!takeWhile! methods on sequences), +in some situations the programmer will need to write a custom implementation +of \lstinline!filter! for a custom data type. The programmer must +examine the given requirements and decide whether those requirements +can be implemented as a lawful \lstinline!filter! function. \subsection{Examples\index{examples (with code)}} \subsubsection{Example \label{subsec:Example-filterable-laws-1}\ref{subsec:Example-filterable-laws-1}} Show that the functor $F^{A}\triangleq G^{A}\rightarrow A$ is not -filterable (for any contrafunctor $G^{A}$). +filterable (for any contrafunctor $G$). \subparagraph{Solution} @@ -3720,14 +3724,14 @@ \subsubsection{Example \label{subsec:Example-filterable-laws-1}\ref{subsec:Examp \text{deflate}_{F}:(G^{\bbnum 1+A}\rightarrow\bbnum 1+A)\rightarrow G^{A}\rightarrow A\quad,\quad\text{deflate}_{F}=p^{:G^{\bbnum 1+A}\rightarrow\bbnum 1+A}\rightarrow g^{:G^{A}}\rightarrow\text{???}^{:A}\quad. \] We cannot extract a value of type $A$ from $g^{:G^{A}}$ since the -contrafunctor $G^{A}$ does not wrap any values of $A$. So, the only +contrafunctor $G$ does not wrap any values of $A$. So, the only hope of filling the typed hole $\text{???}^{:A}$ is to apply the function $p$ to an argument of type $G^{\bbnum 1+A}$. Even if we -are able to map $G^{A}\rightarrow G^{\bbnum 1+A}$ (e.g., if $G^{\bullet}$ +are able to map $G^{A}\rightarrow G^{\bbnum 1+A}$ (e.g., if $G$ is filterable), the result of applying $p$ will be a value of type $\bbnum 1+A$. We cannot compute a value of type $A$ out of that. -So, the type signature of \lstinline!deflate! for $F^{\bullet}$ -is not implementable. We conclude that $F^{\bullet}$ is not filterable. +So, the type signature of \lstinline!deflate! for $F$ is not implementable. +We conclude that $F$ is not filterable. \subsubsection{Example \label{subsec:Example-filterable-laws-2}\ref{subsec:Example-filterable-laws-2}} @@ -3740,9 +3744,9 @@ \subsubsection{Example \label{subsec:Example-filterable-laws-2}\ref{subsec:Examp \subparagraph{Solution} -We need to analyze the structure of the functor $F^{\bullet}$ to -decide which constructions we may use. Define some auxiliary functors -that represents sub-expressions in $F^{A}$: +We need to analyze the structure of the functor $F$ to decide which +constructions we may use. Define some auxiliary functors that represents +sub-expressions in $F^{A}$: \begin{align*} R_{1}^{A}\triangleq\text{Int}\times\text{String}\rightarrow A\quad, & \quad\quad R_{2}^{A}\triangleq\text{Int}\rightarrow A\quad,\\ G^{A}\triangleq\bbnum 1+\text{Int}\times A+A\times\left(\bbnum 1+A\right)\quad, & \quad\quad H^{A}\triangleq\bbnum 1+A+A\times A\times\text{String}\quad. @@ -3751,39 +3755,38 @@ \subsubsection{Example \label{subsec:Example-filterable-laws-2}\ref{subsec:Examp \[ F^{A}=R_{1}^{L^{A}}\quad,\quad\quad L^{A}\triangleq G^{A}+R_{2}^{H^{A}}\quad. \] -The type of $G^{\bullet}$ is a co-product, so we need to check which -of the two co-product constructions (Statements~\ref{subsec:Statement-filterable-coproduct} +The type of $G$ is a co-product, so we need to check which of the +two co-product constructions (Statements~\ref{subsec:Statement-filterable-coproduct} or~\ref{subsec:Statement-filterable-coproduct-1}) might apply. The first of them does not apply because the functors $\text{Int}\times A$ and $A\times\left(\bbnum 1+A\right)$ are not filterable. But the second construction applies if we write $G^{A}$ in the form $G^{A}=\bbnum 1+A\times K^{A}$ where $K^{A}\triangleq\bbnum 1+\text{Int}+A$. Since $K^{A}$ is the co-product of the \lstinline!Option! functor and a constant functor -(the fixed type \lstinline!Int!), $K^{\bullet}$ is filterable by -Statement~\ref{subsec:Statement-filterable-coproduct}. So, $G^{A}$ -is filterable. +(the fixed type \lstinline!Int!), $K$ is filterable by Statement~\ref{subsec:Statement-filterable-coproduct}. +So, $G$ is filterable. -Similarly, we find that $H^{A}$ is filterable by Statement~\ref{subsec:Statement-filterable-coproduct-1} +Similarly, we find that $H$ is filterable by Statement~\ref{subsec:Statement-filterable-coproduct-1} if we write $H^{A}=\bbnum 1+A\times(\bbnum 1+A\times\text{String})$, where the functor $\bbnum 1+A\times\text{String}$ is filterable by the same construction. -The functor $R_{2}^{H^{\bullet}}$ is filterable since it is a functor -composition (Statement~\ref{subsec:Statement-filterable-composition-functors}) -and $H^{\bullet}$ is filterable. The co-product $L^{A}\triangleq G^{A}+R_{2}^{H^{A}}$ +The functor $R_{2}\circ H$ is filterable: it is a functor composition +(Statement~\ref{subsec:Statement-filterable-composition-functors}) +and $H$ is filterable. The co-product $L^{A}\triangleq G^{A}+R_{2}^{H^{A}}$ is filterable by Statement~\ref{subsec:Statement-filterable-coproduct}, -and $R_{1}^{L^{A}}$ by Statement~\ref{subsec:Statement-filterable-composition-functors}. +and $R_{1}\circ L$ by Statement~\ref{subsec:Statement-filterable-composition-functors}. Each construction gives a specific code for the corresponding \lstinline!liftOpt! function, and so we could derive the code for $\text{liftOpt}_{F}$ that is guaranteed to obey the filter laws. However, keep in mind that there are several inequivalent ways of implementing a lawful \lstinline!liftOpt! for this functor. For instance, the filtering -operation for $H^{\bullet}$ could be defined similarly to that for -\lstinline!JillsCoupons! in Example~\ref{subsec:filt-solved-example-2} -and not through a co-product construction. The constructions give -one possibility out of many. The programmer needs to choose the implementation -according to the business requirements at hand. +operation for $H$ could be defined similarly to that for \lstinline!JillsCoupons! +in Example~\ref{subsec:filt-solved-example-2} and not through a +co-product construction. The constructions give one possibility out +of many. The programmer needs to choose the implementation according +to the business requirements at hand. \subsubsection{Example \label{subsec:Example-identity-law-of-deflate}\ref{subsec:Example-identity-law-of-deflate} (identity law of \texttt{deflate})} @@ -3899,10 +3902,11 @@ \subsubsection{Example \label{subsec:Example-identity-law-of-deflate}\ref{subsec \subsubsection{Example \label{subsec:Example-filterable-property-1+K}\ref{subsec:Example-filterable-property-1+K}} Assume that a given functor $H^{A}\triangleq\bbnum 1+K^{A}$ is filterable -(but $K^{\bullet}$ is not necessarily filterable). The functor $H^{\bullet}$ -is a \textsf{``}data wrapper\textsf{''} with a fixed empty value, $1+\bbnum 0^{:K^{A}}$. -Show that an empty wrapper remains empty after any filtering: the -function $\text{filt}_{H}$ satisfies, for any $p^{:A\rightarrow\bbnum 2}$, +(but $K$ is not necessarily filterable). The functor $H$ is a \textsf{``}data +wrapper\textsf{''} with a fixed empty value, $1+\bbnum 0^{:K^{A}}$. Show +that an empty wrapper must remain empty after any filtering. In other +words, for any $p^{:A\rightarrow\bbnum 2}$ the function $\text{filt}_{H}$ +satisfies the equation: \begin{equation} (1+\bbnum 0^{:K^{A}})\triangleright\text{filt}_{H}(p)=1+\bbnum 0^{:K^{A}}\quad.\label{eq:empty-filter-remains-empty-via-filt} \end{equation} @@ -3910,15 +3914,15 @@ \subsubsection{Example \label{subsec:Example-filterable-property-1+K}\ref{subsec \subparagraph{Solution} -We know nothing about $H^{\bullet}$ and $K^{\bullet}$ other than -the fact that $\text{filt}_{H}$ obeys the filtering laws. Rewrite -Eq.~(\ref{eq:empty-filter-remains-empty-via-filt}) via the simpler -function \lstinline!deflate!, which is equivalent to $\text{filt}_{H}$: +We know nothing about $H$ and $K$ other than the fact that $\text{filt}_{H}$ +obeys the filtering laws. Rewrite Eq.~(\ref{eq:empty-filter-remains-empty-via-filt}) +via the simpler function \lstinline!deflate!, which is equivalent +to $\text{filt}_{H}$: \[ (1+\bbnum 0^{:K^{A}})\triangleright\psi_{p}^{\uparrow H}\bef\text{deflate}_{H}\overset{?}{=}1+\bbnum 0^{:K^{A}}\quad. \] -Any function lifted to $H^{\bullet}$ works separately for the two -parts of the disjunctive type $H^{A}=\bbnum 1+K^{A}$. So: +Any function lifted to $H$ works separately for the two parts of +the disjunctive type $H^{A}=\bbnum 1+K^{A}$. So: \[ (1+\bbnum 0^{:K^{A}})\triangleright f^{\uparrow H}=1+\bbnum 0^{:K^{B}}\quad, \] @@ -3956,10 +3960,9 @@ \subsubsection{Example \label{subsec:Example-filterable-property-1+K}\ref{subsec \subsubsection{Example \label{subsec:Example-filterable-laws-3-1}\ref{subsec:Example-filterable-laws-3-1}} -Assuming that $G^{\bullet}$ and $H^{\bullet}$ are filterable functors -and $H^{\bullet}$ is of the form $H^{A}\triangleq\bbnum 1+K^{A}$ -(where $K^{\bullet}$ is not necessarily filterable), prove that the -functor $F^{\bullet}\triangleq G^{K^{\bullet}}$ is filterable. +Assuming that $G$ and $H$ are filterable functors and $H$ is of +the form $H^{A}\triangleq\bbnum 1+K^{A}$ (where $K$ is not necessarily +filterable), prove that the functor $F\triangleq G\circ K$ is filterable. \subparagraph{Solution} @@ -3982,7 +3985,7 @@ \subsubsection{Example \label{subsec:Example-filterable-laws-3-1}\ref{subsec:Exa \[ \text{pu}_{\text{Opt}}^{K^{A}}:K^{A}\rightarrow\bbnum 1+K^{A}\quad,\quad\quad\text{pu}_{\text{Opt}}^{K^{A}}\bef\text{liftOpt}_{H}^{K^{B},K^{B}}(f^{:A\rightarrow\bbnum 1+B}):K^{A}\rightarrow\bbnum 1+K^{B}\quad. \] - Now we are ready to write the code for $\text{liftOpt}_{F}$ as + Now we are ready to write the code for $\text{liftOpt}_{F}$ as: \[ \text{liftOpt}_{F}(f^{:A\rightarrow\bbnum 1+B})\triangleq\text{liftOpt}_{G}^{K^{A},K^{B}}\big(\text{pu}_{\text{Opt}}^{K^{A}}\bef\text{liftOpt}_{H}(f)\big)\quad. \] @@ -4063,18 +4066,18 @@ \subsubsection{Example \label{subsec:Example-filterable-laws-unrolling-trick}\re Rather than proving the laws by hand (as we did in a similar case in Statement~\ref{subsec:Statement-filterable-recursive-type}), we will use a trick that will make calculations shorter. The trick -is to \textsf{``}unroll\textsf{''} the recursive equation and to reduce $F^{A}$ to -the \lstinline!List! functor, which has a standard filtering operation. +is to \textsf{``}unroll\textsf{''} the recursive equation and to reduce $F$ to the +\lstinline!List! functor, which has a standard filtering operation. The \index{unrolling trick for recursive types}\index{recursive types!unrolling trick}unrolling trick gives, for any recursive definition of the form $F^{A}\triangleq P^{A}+Q^{A}\times F^{A}$: \begin{equation} \text{if}\quad F^{A}\triangleq P^{A}+Q^{A}\times F^{A}\quad\text{then}\quad F^{A}\cong P^{A}\times\text{List}^{Q^{A}}\quad,\label{eq:def-recursive-functor-unrolling} \end{equation} -where $P^{\bullet}$ and $Q^{\bullet}$ are arbitrary functors. The -functor $F^{A}$ given in this example is of this form with the functors -$P^{A}\triangleq\bbnum 1+A\times A$ and $Q^{A}\triangleq A\times\left(\bbnum 1+A\right)$. -Comparing with the definition of the \lstinline!List! functor, +where $P$ and $Q$ are arbitrary functors. The functor $F$ given +in this example is of that form with the functors $P^{A}\triangleq\bbnum 1+A\times A$ +and $Q^{A}\triangleq A\times\left(\bbnum 1+A\right)$. Comparing with +the definition of the \lstinline!List! functor, \begin{align*} \text{List}^{A} & \triangleq\bbnum 1+A\times\text{List}^{A}\cong\bbnum 1+A\times(\bbnum 1+A\times(\bbnum 1+...(\bbnum 1+A\times\text{List}^{A})))\\ & \cong\bbnum 1+A+A\times A+A\times A\times A+...+\underbrace{A\times...\times A}_{n\text{ times}}\times\,\text{List}^{A}\quad, @@ -4089,17 +4092,17 @@ \subsubsection{Example \label{subsec:Example-filterable-laws-unrolling-trick}\re The type equivalence~(\ref{eq:def-recursive-functor-unrolling}), $F^{A}\cong P^{A}\times\text{List}^{Q^{A}}$, follows by induction on the number of \textsf{``}unrolled\textsf{''} functors $F$. We can now use the -functor product construction for $F^{\bullet}$ if we show that $\text{List}^{Q^{\bullet}}$ -is filterable. This does not follow by functor composition because -$Q^{A}\triangleq A\times\left(\bbnum 1+A\right)$ is \emph{not} filterable -(we saw that in Section~\ref{subsec:Examples-of-non-filterable-functors}). +functor product construction for $F$ if we show that the functor +$\text{List}\circ Q$ is filterable. This does not follow by functor +composition because $Q^{A}\triangleq A\times\left(\bbnum 1+A\right)$ +is \emph{not} filterable (we saw that in Section~\ref{subsec:Examples-of-non-filterable-functors}). However, we can derive from Statement~\ref{subsec:Statement-filterable-coproduct-1}, setting $G^{A}\triangleq\bbnum 1+A$, that: \[ H^{A}\triangleq\bbnum 1+A\times\left(\bbnum 1+A\right)=\bbnum 1+Q^{A} \] is filterable. So, we can use the result of Example~\ref{subsec:Example-filterable-laws-3-1} -and conclude that $\text{List}^{Q^{\bullet}}$ is filterable. +and conclude that the functor $\text{List}\circ Q$ is filterable. \subsubsection{Example \label{subsec:Example-filterable-laws-4}\ref{subsec:Example-filterable-laws-4}} @@ -4109,7 +4112,7 @@ \subsubsection{Example \label{subsec:Example-filterable-laws-4}\ref{subsec:Examp \subparagraph{Solution} -Try to implement the function $\text{inflate}_{C}:C^{A}\rightarrow C^{\bbnum 1+A}$ +Try implementing the function $\text{inflate}_{C}:C^{A}\rightarrow C^{\bbnum 1+A}$ and write: \[ \text{inflate}_{C}:(A\rightarrow Z)\rightarrow\bbnum 1+A\rightarrow Z\quad,\quad\quad\text{inflate}_{C}=c^{:A\rightarrow Z}\rightarrow p^{:\bbnum 1+A}\rightarrow\text{???}^{:Z}\quad. @@ -4118,12 +4121,12 @@ \subsubsection{Example \label{subsec:Example-filterable-laws-4}\ref{subsec:Examp $c$ to an argument of type $A$. However, we do not have values of type $A$; we only have $p$ of type $\bbnum 1+A$, and $p$ might be the \textsf{``}empty\textsf{''} value, $1+\bbnum 0$. So, it is impossible to implement -$\text{inflate}_{C}$, and we conclude that $C$ is not filterable. +$\text{inflate}_{C}$. We conclude that $C$ is not filterable. \subsubsection{Example \label{subsec:Example-filterable-laws-4-1}\ref{subsec:Example-filterable-laws-4-1}} -Given a filterable functor $F^{\bullet}$, show that the type $F^{\bbnum 1}\rightarrow F^{\bbnum 0}$ -is \emph{not} void. +Show that the type $F^{\bbnum 1}\rightarrow F^{\bbnum 0}$ is \emph{not} +void when $F$ is a filterable functor. \subparagraph{Solution} @@ -4137,8 +4140,8 @@ \subsection{Exercises\index{exercises}} \subsubsection{Exercise \label{subsec:Exercise-filterable-laws}\ref{subsec:Exercise-filterable-laws}} Implement a \lstinline!Filterable! instance for the functor \lstinline!F[T] = G[H[T]]! -assuming that the contrafunctor \lstinline!H[_]! already has a \lstinline!Filterable! -instance and \lstinline!G[_]! is an arbitrary contrafunctor. Verify +assuming that the contrafunctor \lstinline!H! already has a \lstinline!Filterable! +instance and \lstinline!G! is an arbitrary contrafunctor. Verify the laws of filterable functor rigorously (by symbolic derivations, not tests). @@ -4152,12 +4155,17 @@ \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-2}\ref{subsec:Exe Prove rigorously (not via tests) that $\text{flm}_{\text{Opt}}(\text{pu}_{\text{Opt}})=\text{id}^{:\bbnum 1+A\rightarrow\bbnum 1+A}$. +\subsubsection{Exercise \label{subsec:Exercise-filterable-laws-2-1}\ref{subsec:Exercise-filterable-laws-2-1}} + +Prove rigorously (not via tests) that Eqs.~(\ref{eq:left-naturality-flatmap-option})\textendash (\ref{eq:right-naturality-flatmap-option}) +hold. + \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-7}\ref{subsec:Exercise-filterable-laws-7}} Show that one can define \lstinline!deflate!$~:C^{\bbnum 1+A}\rightarrow C^{A}$ -for any contrafunctor $C^{A}$ (not necessarily filterable). Prove -that \emph{in case} $C^{\bullet}$ is filterable, the \textbf{identity -law}\index{identity laws!of inflate@of \texttt{inflate}} will hold: +for any contrafunctor $C$ (not necessarily filterable). Prove that +\emph{in case} $C$ is filterable, the \textbf{identity law}\index{identity laws!of inflate@of \texttt{inflate}} +will hold: \[ \text{inflate}_{C}\bef\text{deflate}_{C}=\text{id}^{:C^{A}\rightarrow C^{A}}\quad. \] @@ -4165,9 +4173,9 @@ \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-7}\ref{subsec:Exe \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-3}\ref{subsec:Exercise-filterable-laws-3}} -Assuming that $G^{\bullet}$ is a filterable functor, prove rigorously -that the recursive functor $F^{A}\triangleq G^{A}+\text{Int}\times A\times A\times A\times F^{A}$ -is filterable. Implement a \lstinline!Filterable! instance for $F^{\bullet}$. +Assuming that $G$ is a filterable functor, prove rigorously that +the recursive functor $F^{A}\triangleq G^{A}+\text{Int}\times A\times A\times A\times F^{A}$ +is filterable. Implement a \lstinline!Filterable! instance for $F$. \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-6}\ref{subsec:Exercise-filterable-laws-6}} @@ -4177,17 +4185,17 @@ \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-6}\ref{subsec:Exe \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-4}\ref{subsec:Exercise-filterable-laws-4}} Prove that $F^{A}\triangleq\bbnum 1+A\times G^{A}$ is in general -not filterable if $G^{A}$ is an arbitrary (non-filterable) functor; -give an example of a suitable $G^{A}$. Since $F^{\bbnum 1}\rightarrow F^{\bbnum 0}\cong\bbnum 1+G^{\bbnum 1}\rightarrow\bbnum 1\cong\bbnum 1$, +not filterable if $G$ is an arbitrary (non-filterable) functor; give +an example of a suitable $G$. Since $F^{\bbnum 1}\rightarrow F^{\bbnum 0}\cong\bbnum 1+G^{\bbnum 1}\rightarrow\bbnum 1\cong\bbnum 1$, this will demonstrate that Example~\ref{subsec:Example-filterable-laws-4-1} -gives a necessary but not a sufficient condition for a functor $F^{\bullet}$ +gives a necessary but not a sufficient condition for a functor $F$ to be filterable. \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-5}\ref{subsec:Exercise-filterable-laws-5}} Show that $F^{A}\triangleq\bbnum 1+G^{A}+H^{A}$ is filterable if $\bbnum 1+G^{A}$ and $\bbnum 1+H^{A}$ are filterable (even when -$G^{\bullet}$ and $H^{\bullet}$ are not filterable). +$G$ and $H$ are not filterable). \subsubsection{Exercise \label{subsec:filt-exercise-4}\ref{subsec:filt-exercise-4}} @@ -4201,29 +4209,29 @@ \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-6-2}\ref{subsec:E \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-6-1}\ref{subsec:Exercise-filterable-laws-6-1}} -If $G^{A}$ and $H^{A}$ are filterable contrafunctors, prove that -the contrafunctors $G^{A}\times H^{A}$ and $G^{A}+H^{A}$ are also -filterable. +If $G$ and $H$ are filterable contrafunctors, prove that the contrafunctors +$P^{A}\triangleq G^{A}\times H^{A}$ and $Q^{A}\triangleq G^{A}+H^{A}$ +are also filterable. \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-8}\ref{subsec:Exercise-filterable-laws-8}} Show that the contrafunctor $C^{A}\triangleq A\times F^{A}\rightarrow Z$ -is \emph{not} filterable for any functor $F^{\bullet}$ and any fixed -type $Z$ that does not have a known default value. +is \emph{not} filterable for any functor $F$ and any fixed type $Z$ +that does not have a known default value. \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-8-1}\ref{subsec:Exercise-filterable-laws-8-1}} -Show that a \emph{necessary} condition for a contrafunctor $C^{\bullet}$ -to be filterable is that a function of type $C^{\bbnum 0}\rightarrow C^{\bbnum 1}$ +Show that a \emph{necessary} condition for a contrafunctor $C$ to +be filterable is that a function of type $C^{\bbnum 0}\rightarrow C^{\bbnum 1}$ can be implemented (i.e., the type $C^{\bbnum 0}\rightarrow C^{\bbnum 1}$ is not void). \subsubsection{Exercise \label{subsec:Exercise-filterable-laws-8-2}\ref{subsec:Exercise-filterable-laws-8-2}} -Show that a \emph{polynomial} functor $F^{\bullet}$ is filterable -(in some way) if and only if the type $F^{\bbnum 1}\rightarrow F^{\bbnum 0}$ -is not void. Find an example of a non-filterable polynomial functor -$F^{\bullet}$ that violates the condition $F^{\bbnum 1}\rightarrow F^{\bbnum 0}\not\cong\bbnum 0$. +Give an example of a non-filterable polynomial functor $F$ for which +$F^{\bbnum 1}\rightarrow F^{\bbnum 0}\cong\bbnum 0$. Show that a +polynomial functor $F$ is filterable (in some way) if and only if +the type $F^{\bbnum 1}\rightarrow F^{\bbnum 0}$ is not void. \section{Further developments} @@ -4299,7 +4307,7 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- \[ \text{headOpt}:\text{List}^{A}\rightarrow\text{Opt}^{A}\quad. \] -We can write an equivalent Scala code for this function as +We can write an equivalent Scala code for this function as: \begin{lstlisting} def headOption[A]: List[A] => Option[A] = { @@ -4317,7 +4325,7 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- this requirement as an equation called the \textbf{naturality law}\index{naturality law!of headOption@of \texttt{headOption}} of \lstinline!headOption!: \[ -\xymatrix{\text{List}^{A}\ar[r]\sp(0.55){\text{headOpt}^{A}}\ar[d]\sp(0.4){(f^{:A\rightarrow B})^{\uparrow\text{List}}} & \text{Opt}^{A}\ar[d]\sb(0.4){f^{\uparrow\text{Opt}}}\\ +\xymatrix{\text{List}^{A}\ar[r]\sp(0.55){\text{headOpt}^{A}}\ar[d]\sb(0.5){(f^{:A\rightarrow B})^{\uparrow\text{List}}} & \text{Opt}^{A}\ar[d]\sb(0.4){f^{\uparrow\text{Opt}}}\\ \xyScaleY{1.6pc}\xyScaleX{4.5pc}\text{List}^{B}\ar[r]\sp(0.55){\text{headOpt}^{B}} & \text{Opt}^{B} } \] @@ -4326,20 +4334,22 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- \] It is important to keep in mind that this law does not depend on the fact that \lstinline!headOption! extracts the \emph{first} element -of a list. The same naturality law will hold for any fully parametric -function of type $\text{List}^{A}\rightarrow\text{Opt}^{A}$, e.g., -the functions \lstinline!_.lastOption! or \lstinline!_.drop(2).headOption!. -The naturality law only expresses the property that the function \lstinline!headOption! -works in the same way for all types and has no special code for a -specific type or any specific values in the list. - -Other examples of natural transformations are the functions \lstinline!pure!, +of a list. The same law will hold for any fully parametric function +of type $\text{List}^{A}\rightarrow\text{Opt}^{A}$, e.g., for the +functions \lstinline!_.lastOption! or \lstinline!_.drop(2).headOption!. +The naturality law only expresses the property that the function works +in the same way for all types; the function should not behave differently +for any specific types or for any specific values in the list. This +is true for \lstinline!headOption!, for \lstinline!lastOption!, +and for many other functions. + +Further examples of natural transformations are the functions \lstinline!pure!, \lstinline!deflate!, and \lstinline!inflate!, whose naturality laws we have already seen. All these naturality laws are captured by the \textsf{``}natural transformation\textsf{''} pattern, which we can formulate for arbitrary -functors $F^{\bullet}$ and $G^{\bullet}$ as the following law: +functors $F$ and $G$ as the following law: \[ -\xymatrix{F^{A}\ar[r]\sp(0.55){t^{A}}\ar[d]\sp(0.4){(f^{:A\rightarrow B})^{\uparrow F}} & G^{A}\ar[d]\sp(0.4){f^{\uparrow G}}\\ +\xymatrix{F^{A}\ar[r]\sp(0.55){t^{A}}\ar[d]\sb(0.5){(f^{:A\rightarrow B})^{\uparrow F}} & G^{A}\ar[d]\sp(0.4){f^{\uparrow G}}\\ \xyScaleY{1.7pc}\xyScaleX{3.5pc}F^{B}\ar[r]\sp(0.55){t^{B}} & G^{B} } \] @@ -4352,14 +4362,14 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- function $f^{:A\rightarrow B}$ with two type parameters. Types will match only if the function $f$ is lifted to the functor $F$ when $f$ is applied before $t$, and to the functor $G$ when $f$ is -applied after $t$. So, the naturality law is $f^{\uparrow F}\bef t=t\bef f^{\uparrow G}$ +applied after $t$. So, naturality laws have the form $f^{\uparrow F}\bef t=t\bef f^{\uparrow G}$ with appropriate type parameters. -The analogous naturality law for natural transformations $t:C^{A}\rightarrow D^{A}$ -between contrafunctors $C$ and $D$ has exactly the same form, but -the order of type parameters must be swapped: +The naturality law for natural transformations $t^{A}:C^{A}\rightarrow D^{A}$ +between \emph{contrafunctors} $C$ and $D$ has exactly the same form, +but the order of type parameters must be swapped: \[ -\xymatrix{C^{A}\ar[r]\sp(0.55){t^{A}}\ar[d]\sp(0.4){(f^{:B\rightarrow A})^{\downarrow C}} & D^{A}\ar[d]\sp(0.4){f^{\downarrow D}}\\ +\xymatrix{C^{A}\ar[r]\sp(0.55){t^{A}}\ar[d]\sb(0.5){(f^{:B\rightarrow A})^{\downarrow C}} & D^{A}\ar[d]\sp(0.4){f^{\downarrow D}}\\ \xyScaleY{1.7pc}\xyScaleX{3.5pc}C^{B}\ar[r]\sp(0.55){t^{B}} & D^{B} } \] @@ -4395,27 +4405,29 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- \] It is clear that we must choose an arbitrary value $c$ of type $C^{B}$ (rather than $C^{A}$) for all types to match. It remains to fill -the typed hole, which must be of the form $t(...)\bef f^{\uparrow G}$, +the right-hand side, which must be of the form $t(...)\bef f^{\uparrow G}$. +We write: \[ f^{\uparrow F}\bef t^{B}(c^{:C^{B}})=t^{A}(\text{???}^{:C^{A}})\bef f^{\uparrow G}\quad. \] -The argument of $t$ of type $C^{A}$ is obtained by applying $f^{\downarrow C}$ +The value $\text{???}^{:C^{A}}$ is obtained by applying $f^{\downarrow C}$ to $c$. So, the law is: \begin{equation} f^{\uparrow F}\bef t(c)=t(c\triangleright f^{\downarrow C})\bef f^{\uparrow G}\quad.\label{eq:naturality-law-general-parameterized-transformation} \end{equation} -A similar law can be derived for the case when $C^{\bullet}$ is a -functor and $F^{\bullet},G^{\bullet}$ are contrafunctors. +A similar law can be derived for the case when $C$ is a functor and +$F$, $G$ are contrafunctors. Functions $t:C^{A}\rightarrow F^{A}\rightarrow G^{A}$ are reduced to natural transformations if we swap the order of curried arguments to $F^{A}\rightarrow C^{A}\rightarrow G^{A}$ and define the functor $H^{A}\triangleq C^{A}\rightarrow G^{A}$. Then $F^{A}\rightarrow C^{A}\rightarrow G^{A}=F^{A}\rightarrow H^{A}$, which is a type signature of a natural transformation. Denoting by -$\tilde{t}:F^{A}\rightarrow H^{A}$ the function $t$ with its arguments -swapped, we can write the naturality law of $\tilde{t}$ as: +$\tilde{t}^{A}:F^{A}\rightarrow H^{A}$ the function $t$ with its +arguments swapped, we can write the naturality law of $\tilde{t}$ +as: \[ -f^{\uparrow F}\bef\tilde{t}=\tilde{t}\bef f^{\uparrow H}\quad,\quad\text{where}\quad\tilde{t}\triangleq p^{:F^{A}}\rightarrow c^{:C^{A}}\rightarrow p\triangleright t(c)\quad. +f^{\uparrow F}\bef\tilde{t}=\tilde{t}\bef f^{\uparrow H}\quad,\quad\text{where}\quad\tilde{t}^{A}\triangleq p^{:F^{A}}\rightarrow c^{:C^{A}}\rightarrow p\triangleright t(c)\quad. \] To show that this law is equivalent to Eq.~(\ref{eq:naturality-law-general-parameterized-transformation}), we use the definition of $^{\uparrow H}$, @@ -4436,10 +4448,9 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- applied to an arbitrary $p^{:F^{A}}$and considered as functions of $c^{:C^{B}}$. -Reduction to natural transformations works similarly when $C^{\bullet}$ -is a functor and $F^{\bullet},G^{\bullet}$ are contrafunctors. A -naturality law of $t:C^{A}\rightarrow F^{A}\rightarrow G^{A}$ can -then be derived from $t$\textsf{'}s type signature. +Reduction to natural transformations works similarly when $C$ is +a functor and $F$, $G$ are contrafunctors. A naturality law of $t^{A}:C^{A}\rightarrow F^{A}\rightarrow G^{A}$ +can then be derived from $t$\textsf{'}s type signature. \paragraph{\textquotedblleft Liftings\textquotedblright} @@ -4453,13 +4464,13 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- & \text{flm}_{\text{Opt}}:(A\rightarrow\text{Opt}^{B})\rightarrow\text{Opt}^{A}\rightarrow\text{Opt}^{B}\quad,\\ & \text{liftOpt}_{F}:(A\rightarrow\text{Opt}^{B})\rightarrow F^{A}\rightarrow F^{B}\quad. \end{align*} -Replacing $\text{Opt}$ by an arbitrary functor $G$, we obtain the -type signature: +Replacing \textsf{``}$\text{Opt}$\textsf{''} by an arbitrary functor $G$, we obtain +the type signature: \[ -\text{lift}_{G,F}^{A,B}:(A\rightarrow G^{B})\rightarrow F^{A}\rightarrow F^{B}\quad, +\text{lift}_{G,F}^{A,B}:(A\rightarrow G^{B})\rightarrow F^{A}\rightarrow F^{B}\quad. \] -which we can view as a generalized \textsf{``}lifting\textsf{''} of functions with -a \textsf{``}twisted\textsf{''} type $A\rightarrow G^{B}$ (which we call \index{Kleisli!functions}\textbf{Kleisli +We can view this as a generalized \textsf{``}lifting\textsf{''} from functions of +type $A\rightarrow G^{B}$ (called \index{Kleisli!functions}\textbf{Kleisli functions}) to functions of type $F^{A}\rightarrow F^{B}$. We will look at properties of generalized liftings in the next subsection. Here we focus on the naturality laws for generalized liftings. @@ -4487,7 +4498,7 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- \paragraph{Parametricity theorem} -It turns out that the naturality law of a natural transformation $t:F^{A}\rightarrow G^{A}$ +It turns out that the naturality law of a natural transformation $t^{A}:F^{A}\rightarrow G^{A}$ will \emph{always hold} if the code of the function $t$ is fully parametric. More precisely, naturality holds if the code of $t$ is a combination of the eight standard code constructions (shown in Section~\ref{subsec:Short-notation-for-eight-code-constructions}) @@ -4498,16 +4509,16 @@ \subsection{Naturality laws and natural transformations\label{subsec:Naturality- whose code is known to be fully parametric. This saves a significant amount of work, since every method of every typeclass will have one naturality law per type parameter. Until now, we have been systematically -deriving and checking all naturality laws; but we will not check those -laws in the rest of the book. +deriving and checking all naturality laws; but we will not keep checking +those laws in the rest of the book. Even if naturality laws hold automatically, it is important to be able to recognize their form and to use them in derivations where they are frequently needed. The mnemonic recipe for naturality laws~(\ref{eq:law-natural-transformation-of-functors})\textendash (\ref{eq:law-natural-transformation-of-contrafunctors}) is that an arbitrary function $f$ is lifted to the functor $F$ at -the left side of $t:F^{A}\rightarrow G^{A}$ and to the functor $G$ -at the right side of $t$, matching the two sides of the type signature -$F^{A}\rightarrow G^{A}$. +the left side of $t^{A}:F^{A}\rightarrow G^{A}$ and to the functor +$G$ at the right side of $t$, matching the two sides of the type +signature $F^{A}\rightarrow G^{A}$. All methods of typeclasses considered in this book are covered by the natural transformation recipe. However, not all type signatures @@ -4526,21 +4537,21 @@ \subsection{Generalizing the laws of liftings. Kleisli functions\label{subsec:Ge \lstinline!liftOpt! methods. These methods and their laws are equivalent but play different roles: \lstinline!filter! is the most convenient to use in program code; \lstinline!deflate! is the easiest type signature -to implement and to reason about, especially in order to demonstrate -that a functor is not filterable; \lstinline!liftOpt! has the fewest -laws and is most convenient for proofs of general type constructions. +to implement and to reason about, especially in order to detect that +a functor is not filterable; \lstinline!liftOpt! has the fewest laws +and is most convenient for proofs of general type constructions. If we put the naturality laws aside, \lstinline!liftOpt! has the laws of identity~(\ref{eq:identity-law-of-liftOpt}) and composition~(\ref{eq:composition-law-of-liftOpt}). -It is notable how these two laws are similar to the functor laws~(\ref{eq:f-identity-law-functor-fmap})\textendash (\ref{eq:f-composition-law-functor-fmap}): +It is notable that those two laws are similar to the functor laws~(\ref{eq:f-identity-law-functor-fmap})\textendash (\ref{eq:f-composition-law-functor-fmap}): \begin{align*} \text{liftOpt}_{F}(\text{pu}_{F})=\text{id}\quad, & \quad\text{liftOpt}_{F}(f^{:A\rightarrow\bbnum 1+B})\bef\text{liftOpt}_{F}(g^{:B\rightarrow\bbnum 1+C})=\text{liftOpt}_{F}(f\diamond_{_{\text{Opt}}}g)\quad.\\ \text{fmap}_{F}(\text{id})=\text{id}\quad, & \quad\text{fmap}_{F}(f^{:A\rightarrow B})\bef\text{fmap}_{F}(g^{:B\rightarrow C})=\text{fmap}_{F}(f\bef g)\quad. \end{align*} -The only difference between these laws is in replacing $\text{id}^{:A\rightarrow A}$ +The only difference between those sets of laws is in replacing $\text{id}^{:A\rightarrow A}$ by $\text{pu}_{F}^{:A\rightarrow F^{A}}$ and the function composition $f\bef g$ by the Kleisli composition $f\diamond_{_{\text{Opt}}}g$. -We will now focus on the analogy between these laws, which goes far +We will now focus on the analogy between those laws, which goes far beyond the superficial similarity of form. Kleisli functions $f^{:A\rightarrow\bbnum 1+B}$ and $g^{:B\rightarrow\bbnum 1+C}$ @@ -4573,6 +4584,7 @@ \subsubsection{Statement \label{subsec:Statement-Kleisli-Option-laws}\ref{subsec {\color{greenunder}\text{use Eq.~(\ref{eq:associativity-law-of-flatMap-for-Option})}:}\quad & \quad=f\bef\text{flm}_{\text{Opt}}(g\diamond_{_{\text{Opt}}}h)=f\diamond_{_{\text{Opt}}}\big(g\diamond_{_{\text{Opt}}}h\big)\quad. \end{align*} This calculation motivates the name \textsf{``}associativity law\textsf{''} for Eq.~(\ref{eq:associativity-law-of-flatMap-for-Option}). +$\square$ The function \lstinline!liftOpt! can be viewed as a \textsf{``}generalized lifting\textsf{''} from Kleisli functions $A\rightarrow\bbnum 1+B$ to functions @@ -4590,30 +4602,30 @@ \subsubsection{Statement \label{subsec:Statement-Kleisli-Option-laws}\ref{subsec (filterable functor product). The same holds for the proofs of functor co-product and functor composition constructions. -The similarity between these proofs means, in the mathematical sense, -that we have been proving essentially the same statements twice but -did not look at the appropriate level of abstraction to see that. -While programmers may accept the work of writing these proofs twice, -a mathematician would prefer to define a \textsf{``}generalized lifting\textsf{''} -that replaces $\text{Opt}$ by a functor $M$: +The similarity between those proofs means, in a mathematician\textsf{'}s view, +that we have been proving essentially the same statement twice but +did not use an appropriate level of abstraction to see that. While +programmers may accept the work of writing those proofs twice, a mathematician +would prefer to define a \textsf{``}generalized lifting\textsf{''} that replaces $\text{Opt}$ +by a functor $M$: \begin{align*} & \text{lift}_{M,F}:(A\rightarrow M^{B})\rightarrow F^{A}\rightarrow F^{B}\quad,\\ & \text{pu}_{M}:A\rightarrow M^{A}\quad,\quad\quad\diamond_{_{M}}:(A\rightarrow M^{B})\rightarrow(B\rightarrow M^{C})\rightarrow(A\rightarrow M^{C})\quad, \end{align*} -and postulating the required properties as the set of identity, associativity, -and composition laws: +and postulating the required properties as the laws of identity, associativity, +and composition: \begin{align*} & \text{lift}_{M,F}(\text{pu}_{M}^{:A\rightarrow M^{A}})=\text{id}^{:F^{A}\rightarrow F^{A}}\quad,\quad\quad\text{lift}_{M,F}(f)\bef\text{lift}_{M,F}(g)=\text{lift}_{M,F}(f\diamond_{_{M}}g)\quad,\\ & \text{pu}_{M}\diamond_{_{M}}g=g\quad,\quad\quad f\diamond_{_{M}}\text{pu}_{M}=f\quad,\quad\quad\big(f\diamond_{_{M}}g\big)\diamond_{_{M}}h=f\diamond_{_{M}}\big(g\diamond_{_{M}}h\big)\quad. \end{align*} Now the two sets of proofs can be replaced by a single set of proofs formulated for an \textsf{``}$M$\textbf{-filterable}\textsf{''}\index{$M$-filterable functor} -functor $F$, where $M$ could be later set to the identity functor -or the $\text{Opt}$ functor. +functor $F$, where $M$ could be later set either to the identity +functor or to the \lstinline!Option! functor. Not all functors $M$ support the Kleisli composition $\diamond_{_{M}}$ -with the required laws. We will study such functors $M$, which are -known as \textbf{monads}\index{monads}, in Chapter~\ref{chap:Semimonads-and-monads}. +with the required laws. We will study such functors $M$, known as +\textbf{monads}\index{monads}, in Chapter~\ref{chap:Semimonads-and-monads}. \subsection{Motivation for using category theory\label{subsec:Motivation-for-using-category-theory}} @@ -4629,10 +4641,10 @@ \subsection{Motivation for using category theory\label{subsec:Motivation-for-usi composition but differ in the type of functions being lifted: the ordinary function $A\rightarrow B$, the \textsf{``}reversed\textsf{''} type $B\rightarrow A$, the Kleisli function $A\rightarrow M^{B}$, and the \textsf{``}reversed\textsf{''} -Kleisli function $B\rightarrow M^{A}$. In turn, all these function -types obey the laws of identity and composition. (For the types to -match, composition of reversed functions needs to be performed in -the reverse order.) +Kleisli function $B\rightarrow M^{A}$. In turn, all those function +types obey their own versions of the laws of identity and composition. +(For the types to match, composition of reversed functions needs to +be performed in the reverse order.) In order to avoid writing essentially the same proofs multiple times, we use a more abstract view of this situation: a new notion of a functor @@ -4658,11 +4670,11 @@ \subsection{Motivation for using category theory\label{subsec:Motivation-for-usi \hline {\small{}\textsf{``}plain\textsf{''}} & {\small{}$f:A\rightarrow B$} & {\small{}$\text{id}^{:A\rightarrow A}$} & {\small{}$f\bef g$}\tabularnewline \hline -{\small{}\textsf{``}reversed\textsf{''}} & {\small{}$f:B\rightarrow A$} & {\small{}$\text{id}^{:A\rightarrow A}$} & {\small{}$g\bef f$}\tabularnewline +{\small{}\textsf{``}reverse\textsf{''}} & {\small{}$f:B\rightarrow A$} & {\small{}$\text{id}^{:A\rightarrow A}$} & {\small{}$g\bef f$}\tabularnewline \hline {\small{}\textsf{``}$M$-Kleisli\textsf{''}} & {\small{}$f:A\rightarrow M^{B}$} & {\small{}$\text{pu}_{M}:A\rightarrow M^{A}$} & {\small{}$f\diamond_{_{M}}g$}\tabularnewline \hline -{\small{}\textsf{``}reversed $M$-Kleisli\textsf{''}} & {\small{}$f:B\rightarrow M^{A}$} & {\small{}$\text{pu}_{M}:A\rightarrow M^{A}$} & {\small{}$g\diamond_{_{M}}f$}\tabularnewline +{\small{}\textsf{``}reverse $M$-Kleisli\textsf{''}} & {\small{}$f:B\rightarrow M^{A}$} & {\small{}$\text{pu}_{M}:A\rightarrow M^{A}$} & {\small{}$g\diamond_{_{M}}f$}\tabularnewline \hline {\small{}\textsf{``}$F$-lifted\textsf{''}} & {\small{}$f:F^{A}\rightarrow F^{B}$} & {\small{}$\text{id}^{:F^{A}\rightarrow F^{A}}$} & {\small{}$f\bef g$}\tabularnewline \hline @@ -4698,7 +4710,7 @@ \subsubsection{Definition \label{subsec:Definition--(category)}\ref{subsec:Defin of category is general and does not require that morphisms be functions or that objects be types. -In functional programming, a \textsf{``}functor\textsf{''} is a type constructor $F^{\bullet}$ +In functional programming, a \textsf{``}functor\textsf{''} is a type constructor $F$ with a lawful lifting of functions $A\rightarrow B$ to functions $F^{A}\rightarrow F^{B}$. Category theory\index{category theory!functor|textit} defines a functor more\index{functor!in category theory|textit} generally @@ -4709,7 +4721,7 @@ \subsubsection{Definition \label{subsec:Definition--(category)}\ref{subsec:Defin \textemdash{} categorical functors from a category $\mathcal{C}$ to itself. The functional programming community says \textsf{``}functor\textsf{''} instead of \textsf{``}endofunctor\textsf{''} because almost all categorical functors -used in functional programming are endofunctors.}). +used in programming are endofunctors.}). \subsubsection{Definition \label{subsec:Definition--(categorical-functor)}\ref{subsec:Definition--(categorical-functor)} (categorical functor)} @@ -4761,7 +4773,7 @@ \subsubsection{Example \label{subsec:Example-category-definition-of-contrafuncto {\color{greenunder}\text{identity law}:}\quad & \big(\text{id}^{:A\rightarrow A}\big)^{\downarrow C}=\text{id}^{:C^{A}\rightarrow C^{A}}\quad,\\ {\color{greenunder}\text{composition law}:}\quad & (g\bef f)^{\downarrow C}=f^{\downarrow C}\bef g^{\downarrow C}\quad. \end{align*} -We derived these laws previously as the laws of contrafunctors. $\square$ +We derived these laws previously as the laws of contrafunctors. \subsubsection{Example \label{subsec:Example-category-definition-of-filterable-functor}\ref{subsec:Example-category-definition-of-filterable-functor}} @@ -4778,15 +4790,15 @@ \subsubsection{Example \label{subsec:Example-category-definition-of-filterable-f is mapped to the morphism $\text{liftOpt}_{F}(f):F^{A}\rightarrow F^{B}$ of the $F$-lifted category. -Formulate the laws of identity and composition for the (categorical) -functor using the definitions of the identity morphisms and the composition +Write the laws of identity and composition for that categorical functor +using the definitions of the identity morphisms and the composition operation in each category: \begin{align*} {\color{greenunder}\text{for the Opt-Kleisli category}:}\quad & \text{pu}_{\text{Opt}}^{:A\rightarrow\bbnum 1+A}\quad\text{and}\quad f\diamond_{_{\text{Opt}}}g\quad,\\ {\color{greenunder}\text{for the }F\text{-lifted category}:}\quad & \text{id}^{:F^{A}\rightarrow F^{A}}\quad\text{and}\quad f\bef g\quad. \end{align*} -Now we require that the first category\textsf{'}s identity morphism is mapped -to the second category\textsf{'}s identity morphism, and that a composition +The functor laws require that the first category\textsf{'}s identity morphism +is mapped to the second category\textsf{'}s identity morphism, and that a composition of any two morphisms (as defined in the first category) is mapped to a composition as defined in the second category: \begin{align*} @@ -4808,7 +4820,7 @@ \subsubsection{Example \label{subsec:Example-category-definition-of-filterable-f of \lstinline!filter! and did not assume more laws than necessary. In contrast, the two laws of the categorical functor are general and appear time and again in different areas of mathematics. This gives -us confidence that these laws are correctly chosen and will be useful +us confidence that those laws are correctly chosen and will be useful in a wide range of contexts. Proving that the four laws of \lstinline!filter! from Section~\ref{subsec:Motivation-for-and-derivation-of-laws-of-filtering} are equivalent to the two laws of a categorical functor gives assurance @@ -4824,17 +4836,19 @@ \subsubsection{Example \label{subsec:Example-category-definition-of-filterable-f four theorems (say, for the product of functors, contrafunctors, filterable functors, and filterable contrafunctors) by a single but more abstract theorem about the product of (categorical) functors being a functor -between suitably defined categories. We will not look at these proofs +between suitably defined categories. We will not look at those proofs here; Chapters~\ref{chap:Functors,-contrafunctors,-and}\textendash \ref{chap:Filterable-functors} already worked through a few almost identical proofs that show the required techniques. -The categorical view also shows us two directions for developing the -theory further, hoping to find useful applications. First, we can -look for functors $M$ (called \textsf{``}monads\textsf{''}) that admit the Kleisli -composition with the properties (the identity and the associativity -laws) required by an $M$-Kleisli category. Second, having found some -new monads $M$, we can look for \textsf{``}$M$-filterable\textsf{''} functors\index{$M$-filterable functor} +The categorical view also shows us some directions for developing +the theory further, hoping to find useful applications. We have found +a useful operation (Kleisli composition) and the properties it must +satisfy (the identity and the associativity laws). The \lstinline!Option! +functor obeys those properties, and so we may want to look for other +functors $M$ that also have these properties. (Those functors are +called \textsf{``}monads\textsf{''}.) Having found a new monad $M$, we can then look +for \textsf{``}$M$-filterable\textsf{''} functors\index{$M$-filterable functor} or contrafunctors $F$ that admit an operation $\text{lift}_{M,F}$ similar to $\text{liftOpt}_{F}$ but adapted to the monad $M$ instead of \lstinline!Option!. We will see some examples of $M$-filterable @@ -4846,21 +4860,20 @@ \subsubsection{Example \label{subsec:Example-category-definition-of-filterable-f To summarize, using the category theory\textsf{'}s notion of functor brings the following advantages: \begin{itemize} -\item We are assured that we found a correct set of laws of a typeclass. -We can derive the formulation of those laws from the standard laws -of categories and functors, without guessing. +\item We are assured that we have found a correct set of laws of a typeclass. +We can derive the formulation of those laws without guessing, by starting +from the standard laws of categories and functors. \item We may find some promising directions for obtaining more general type constructions. \item Several proofs may be replaced by a single proof for properties of some (categorical) functors. -\item We can formulate general constructions (e.g., functor product) that -work in the same way for many different typeclasses. +\item We may find general constructions (e.g., functor product) that work +in the same way for many different typeclasses. \end{itemize} -In this way, we find that category theory is a useful tool for reasoning -about abstract constructions that work with different typeclasses -(functor, contrafunctor, filterable, etc.). Category theory views -many typeclasses in a similar way and gives a systematic guidance -for deriving the typeclass laws. +We see that category theory is a useful tool for reasoning about abstract +constructions that work with different typeclasses (functor, contrafunctor, +filterable, etc.). Category theory views many typeclasses in a similar +way and gives a systematic guidance for deriving the typeclass laws. \medskip{} @@ -4872,8 +4885,8 @@ \subsubsection{Example \label{subsec:Example-category-definition-of-filterable-f typeclass for the \lstinline!takeWhile! function), CT gives neither proofs nor proof techniques that programmers could use. \item CT does not help determine whether a given type constructor (say, -\lstinline!type F[A] = Option[(A, A)]!) will belong to a specific -typeclass (e.g., a filterable functor, a pointed functor, or a monad). +\lstinline!type F[A] = Option[(A, A)]!) belongs to a specific typeclass +(e.g., a filterable functor, a pointed functor, or a monad). \item Even if we know that, say, a lawful \lstinline!Filterable! instance actually exists for the type constructor \lstinline!F[A] = Option[(A, A)]!, CT does not help in writing correct code for that typeclass instance. @@ -4881,7 +4894,7 @@ \subsubsection{Example \label{subsec:Example-category-definition-of-filterable-f two given type constructors, or how to implement one if it exists and how to verify the suitable naturality laws. \end{itemize} -Performing these tasks requires certain techniques of symbolic derivation +Performing those tasks requires certain symbolic derivation techniques adapted to \emph{applied} (that is, practically relevant) functional programming. Developing such techniques and selecting the necessary theoretical material is one of the main themes of this book. diff --git a/sofp-src/tex/sofp-free-type.tex b/sofp-src/tex/sofp-free-type.tex index 69cd37989..4c0320cc4 100644 --- a/sofp-src/tex/sofp-free-type.tex +++ b/sofp-src/tex/sofp-free-type.tex @@ -5391,10 +5391,8 @@ \subsection{The Church encoding of recursive types\label{subsec:The-Church-encod There is rarely an advantage in replacing a simple type $T$ by a more complicated function type, $\forall X.\,(T\rightarrow X)\rightarrow X$. However, the Church encoding has a different form when $T$ is a \emph{recursive} -type.\footnote{This \textsf{``}Church encoding\textsf{''} is known more precisely as \textsf{``}Boehm-Berarducci -encoding\textsf{''}. For the purposes of this book, they are the same. See -\texttt{\href{http://okmij.org/ftp/tagless-final/course/Boehm-Berarducci.html}{http://okmij.org/ftp/tagless-final/course/Boehm-Berarducci.html}} -for discussion.} +type.\footnote{The \textsf{``}Boehm-Berarducci encoding\textsf{''} discussed in \texttt{\href{http://okmij.org/ftp/tagless-final/course/Boehm-Berarducci.html}{http://okmij.org/ftp/tagless-final/course/Boehm-Berarducci.html}} +can be seen as a curried form of the Church encoding.} Consider a recursive type $T$ defined by a fixpoint equation $T\triangleq F^{T}$ with a given structure functor $F$. It turns out that a useful Church @@ -5413,8 +5411,8 @@ \subsection{The Church encoding of recursive types\label{subsec:The-Church-encod The Yoneda lemma only applies to types of the form $\forall X.\,(A\rightarrow X)\rightarrow F^{X}$, where the type $A$ cannot depend on the quantified type $X$. -The following statement\footnote{See also the papers \textsf{``}A note on strong dinaturality\textsf{''} (\texttt{\href{https://web.archive.org/web/20110601105059/http://www.cs.ioc.ee/~tarmo/papers/fics10.pdf}{http://www.cs.ioc.ee/$\sim$tarmo/papers/fics10.pdf}}) -and \textsf{``}Build, augment, and destroy universally\textsf{''} (\texttt{\href{https://kodu.ut.ee/~varmo/papers/aplas04.ps.gz}{https://kodu.ut.ee/$\sim$varmo/papers/aplas04.ps.gz}}).} shows that the Church encoding~(\ref{eq:Church-encoding-recursive-type}) +The following statement\footnote{See also the papers \texttt{\href{https://web.archive.org/web/20110601105059/http://www.cs.ioc.ee/~tarmo/papers/fics10.pdf}{http://www.cs.ioc.ee/$\sim$tarmo/papers/fics10.pdf}} +and \texttt{\href{https://kodu.ut.ee/~varmo/papers/aplas04.ps.gz}{https://kodu.ut.ee/$\sim$varmo/papers/aplas04.ps.gz}}.} shows that the Church encoding~(\ref{eq:Church-encoding-recursive-type}) is a fixpoint of $F$: \subsubsection{Statement \label{subsec:Statement-Church-encoding-recursive-type-covariant}\ref{subsec:Statement-Church-encoding-recursive-type-covariant}} @@ -5469,6 +5467,10 @@ \subsubsection{Statement \label{subsec:Statement-Church-encoding-recursive-type- \[ \text{fix}\triangleq f^{:F^{\forall X.\,(F^{X}\rightarrow X)\rightarrow X}}\rightarrow\forall Y.\,q^{:F^{Y}\rightarrow Y}\rightarrow f\triangleright\big(p^{:\forall X.\,(F^{X}\rightarrow X)\rightarrow X}\rightarrow q\triangleright p^{Y}\big)^{\uparrow F}\triangleright q\quad. \] +Omitting type annotations, we may write \lstinline!fix! as: +\[ +\text{fix}\triangleq f\rightarrow\,^{Y}\rightarrow q\rightarrow f\triangleright(p\rightarrow q\triangleright p^{Y})^{\uparrow F}\bef q\quad. +\] We turn to implementing \lstinline!unfix!: \begin{align*} @@ -5520,7 +5522,8 @@ \subsubsection{Statement \label{subsec:Statement-Church-encoding-recursive-type- of the variables, we get: \begin{align*} & \text{fix}^{\uparrow F}\bef f\overset{?}{=}f^{\uparrow F}\bef q\quad,\\ -{\color{greenunder}\text{or equivalently}:}\quad & \text{fix}^{\uparrow F}\bef\big(p^{:\forall X.\,(F^{X}\rightarrow X)\rightarrow X}\rightarrow q\triangleright p^{Y}\big)^{\uparrow F}\bef q\overset{?}{=}\big(p^{:\forall X.\,(F^{X}\rightarrow X)\rightarrow X}\rightarrow q\triangleright p^{Y}\big)^{\uparrow F\uparrow F}\bef q^{\uparrow F}\bef q\quad. +{\color{greenunder}\text{or equivalently}:}\quad & \text{fix}^{\uparrow F}\bef\big(p^{:\forall X.\,(F^{X}\rightarrow X)\rightarrow X}\rightarrow q\triangleright p^{Y}\big)^{\uparrow F}\bef q\\ + & \quad\overset{?}{=}\big(p^{:\forall X.\,(F^{X}\rightarrow X)\rightarrow X}\rightarrow q\triangleright p^{Y}\big)^{\uparrow F\uparrow F}\bef q^{\uparrow F}\bef q\quad. \end{align*} Evaluate the function composition in the left-hand side, and make both sides equal: @@ -5557,23 +5560,33 @@ \subsubsection{Statement \label{subsec:Statement-Church-encoding-recursive-type- r\triangleright p\triangleright f=s\triangleright p\quad. \] This law reproduces Eq.~(\ref{eq:fix-unfix-derivation2}) if we define -$r$ and $f$ by +$r$ and $f$ by: \[ -r\triangleq\text{fix}^{\uparrow F}\quad,\quad\quad f\triangleq u^{:F^{T}}\rightarrow s\triangleright(u\triangleright\text{fix})\quad. +r\triangleq\text{fix}^{\uparrow F}\quad,\quad\quad f\triangleq u^{:F^{T}}\rightarrow s\triangleright(u\triangleright\text{fix})^{B}\quad. \] +Substituting the definition of \lstinline!fix!, we get a simplified +formula for $f$: +\begin{align*} + & f=u^{:F^{T}}\rightarrow s\triangleright(u\triangleright\text{fix})^{B}\\ + & =u^{:F^{T}}\rightarrow u\triangleright(q^{:\forall X.\,(F^{X}\rightarrow X)\rightarrow X}\rightarrow s\triangleright q^{B})^{\uparrow F}\bef s\\ + & =(q^{:\forall X.\,(F^{X}\rightarrow X)\rightarrow X}\rightarrow s\triangleright q^{B})^{\uparrow F}\bef s\quad. +\end{align*} It remains to verify that the assumption of the strong dinaturality law holds: \begin{align*} & r\bef f\overset{?}{=}f^{\uparrow F}\bef s\quad,\\ -{\color{greenunder}\text{or equivalently}:}\quad & \text{fix}^{\uparrow F}\bef(u\rightarrow s\triangleright(u\triangleright\text{fix}))\overset{?}{=}\big(u^{:F^{T}}\rightarrow s\triangleright(u\triangleright\text{fix})\big)^{\uparrow F}\bef s\quad. +{\color{greenunder}\text{or equivalently}:}\quad & \text{fix}^{\uparrow F}\bef(q\rightarrow s\triangleright q^{B})^{\uparrow F}\bef s\overset{?}{=}f^{\uparrow F}\bef s\quad. \end{align*} +To prove the last equation, it is sufficient to prove that: +\[ +\text{fix}\bef(q\rightarrow s\triangleright q^{B})=f\quad. +\] Rewrite the left-hand side above until it becomes equal to the right-hand side: \begin{align*} - & \text{fix}^{\uparrow F}\bef(u\rightarrow s\triangleright(u\triangleright\text{fix}))=(u^{:F^{F^{T}}}\rightarrow u\triangleright\text{fix}^{\uparrow F})\bef(u\rightarrow s\triangleright(u\triangleright\text{fix}))\\ -{\color{greenunder}\text{compute composition}:}\quad & =u\rightarrow s\triangleright(u\triangleright\text{fix}^{\uparrow F}\triangleright\gunderline{\text{fix}})=\gunderline{u\rightarrow u\,\triangleright}\,\text{fix}^{\uparrow F}\triangleright(q\rightarrow s\triangleright q)^{\uparrow F}\bef s\\ -{\color{greenunder}\text{unexpand function}:}\quad & =\big(\text{fix}\bef(q\rightarrow s\triangleright q)\big)^{\uparrow F}\bef s=\big((u^{:F^{T}}\rightarrow u\triangleright\text{fix})\bef(q\rightarrow s\triangleright q)\big)^{\uparrow F}\bef s\\ -{\color{greenunder}\text{compute composition}:}\quad & =\big(u^{:F^{T}}\rightarrow s\triangleright(u\triangleright\text{fix})\big)^{\uparrow F}\bef s\quad. + & \text{fix}\bef(q\rightarrow s\triangleright q^{B})\\ + & =(u^{:F^{T}}\rightarrow u\triangleright\text{fix})\bef(q\rightarrow s\triangleright q^{B})\\ + & =u^{:F^{T}}\rightarrow s\triangleright(u\triangleright\text{fix})^{B}=f. \end{align*} The two sides are now equal. $\square$ diff --git a/sofp-src/tex/sofp-functors.tex b/sofp-src/tex/sofp-functors.tex index 6dbf0bec1..da2c3b69b 100644 --- a/sofp-src/tex/sofp-functors.tex +++ b/sofp-src/tex/sofp-functors.tex @@ -4,15 +4,15 @@ \chapter{Functors and contrafunctors\label{chap:Functors,-contrafunctors,-and}} \global\long\def\gunderline#1{\mathunderline{greenunder}{#1}}% \global\long\def\bef{\forwardcompose}% \global\long\def\bbnum#1{\custombb{#1}}% -Type constructors such as \lstinline!Seq[A]! or \lstinline!Array[A]! -are data structures that hold or \textsf{``}wrap\textsf{''} zero or more values of -a given type \lstinline!A!. These data structures are fully parametric: -they work in the same way for every type \lstinline!A!. Working with -parametric \textsf{``}data wrappers\textsf{''} or \textsf{``}data containers\textsf{''} turns out -to be a powerful design pattern of functional programming. To realize -all its benefits, we will formalize the concept of data wrapping through -a set of mathematical laws. We will then extend that design pattern -to all data types for which the laws hold. +Types \lstinline!Seq[A]! or \lstinline!Array[A]! represent data +structures that hold or \textsf{``}wrap\textsf{''} zero or more values of a given +type \lstinline!A!. Those data structures are fully parametric: they +work in the same way for every type \lstinline!A!. Working with parametric +\textsf{``}data wrappers\textsf{''} or \textsf{``}data containers\textsf{''} turns out to be a powerful +design pattern of functional programming. To realize all its benefits, +we will formalize the concept of data wrapping through a set of mathematical +laws. We will then extend that design pattern to all data types for +which the laws hold. \section{Practical use} @@ -26,8 +26,8 @@ \subsection{Motivation. Type constructors that wrap data} as \lstinline!{x => x * 2}! and obtain a \textsf{``}wrapped\textsf{''} value $246$. Let us look at some often used type constructors defined in the Scala -standard library, such as \lstinline!Seq[A]!, \lstinline!Try[A]!, -and \lstinline!Future[A]!. We notice the common features: +standard library, such as \lstinline!Seq!, \lstinline!Try!, and +\lstinline!Future!. We notice the common features: \begin{itemize} \item There are some methods for creating a data structure that wraps zero or more values of a given type. For example, the Scala code \lstinline!List.fill(10)(0)! @@ -40,7 +40,7 @@ \subsection{Motivation. Type constructors that wrap data} them wrapped. For example, \lstinline!List(10, 20, 30).map(_ + 5)! evaluates to \lstinline!List(15, 25, 35)!. \end{itemize} -The data types \lstinline!Seq[A]!, \lstinline!Try[A]!, and \lstinline!Future[A]! +The types \lstinline!Seq!, \lstinline!Try!, and \lstinline!Future! express quite different kinds of wrapping. The data structure implementing \lstinline!Seq[A]! can hold a variable number of values of type \lstinline!A!. The data structure \lstinline!Try[A]! holds either a successfully @@ -54,9 +54,9 @@ \subsection{Motivation. Type constructors that wrap data} values have different type signatures for each wrapper. However, the method \lstinline!map! is similar in all three examples. We can say generally that the \lstinline!map! method will apply a given function -\lstinline!f: A => B! to data of type \lstinline!A! stored inside -the wrapper, such that new data (of type \lstinline!B!) will remain -within a wrapper of the same type: +\lstinline!f: A => B! to all the data of type \lstinline!A! stored +inside the wrapper, putting new data (of type \lstinline!B!) into +a wrapper of the same type: \begin{lstlisting} val a = List(x,y,z).map(f) // Result is List(f(x), f(y), f(z)). val b = Try(x).map(f) // Result is Try(f(x)). @@ -78,10 +78,10 @@ \subsection{Motivation. Type constructors that wrap data} data, waiting until data becomes available, etc., \textemdash{} as they are implemented by methods specific to each wrapper type. -\subsection{Extended example: \texttt{Option} and the identity law\label{subsec:f-Example:-Option-and}} +\subsection{Example: \texttt{Option} and the identity law\label{subsec:f-Example:-Option-and}} As another example of a \textsf{``}data wrapper\textsf{''}, consider the type constructor -\lstinline!Option[A]!, which is written in the type notation as: +\lstinline!Option!, which is written in the type notation as: \[ \text{Opt}^{A}\triangleq\bbnum 1+A\quad. \] @@ -94,10 +94,10 @@ \subsection{Extended example: \texttt{Option} and the identity law\label{subsec: about manipulating data in a wrapper. Two possible implementations of \lstinline!map! will fit the type signature: \begin{lstlisting} -def mapX[A, B](oa: Option[A])(f: A => B): Option[B] = None +def mapX[A, B](p: Option[A])(f: A => B): Option[B] = None -def mapY[A, B](oa: Option[A])(f: A => B): Option[B] = - oa match { +def mapY[A, B](p: Option[A])(f: A => B): Option[B] = + p match { case None => None case Some(x) => Some(f(x)) } @@ -109,33 +109,33 @@ \subsection{Extended example: \texttt{Option} and the identity law\label{subsec: How can we formulate this property of \lstinline!mapY! in a rigorous way? The trick is to choose the argument $f^{:A\rightarrow B}$ in -the expression \lstinline!map(oa)(f)! to be the identity function +the expression \lstinline!map(p)(f)! to be the identity function $\text{id}^{:A\rightarrow A}$ (setting \lstinline!map!\textsf{'}s type parameters as $A=B$, so that the types match). Applying an identity function -to a value wrapped in an \lstinline!Option[A]! should not change -that value. To verify that, substitute the identity function instead -of \lstinline!f! into \lstinline!mapY! and compute: +to a value stored in an \lstinline!Option[A]! should not change that +value. To verify that, substitute the identity function instead of +\lstinline!f! into \lstinline!mapY! and compute: \begin{lstlisting} -mapY[A, A](x: Option[A])(identity[A]: A => A): Option[A] - == x match { +mapY[A, A](p: Option[A])(identity[A]: A => A): Option[A] + == p match { case None => None // No change. case Some(x) => Some(x) // No change. - } == x + } == p \end{lstlisting} -The result is always equal to \lstinline!x!. We can write that fact +The result is always equal to \lstinline!p!. We can write that fact as an equation: \[ -\forall x^{:\text{Opt}^{A}}.\,\,\text{map}\,(x)(\text{id})=x\quad. +\text{for all }p^{:\text{Opt}^{A}}:\,\,\text{map}\,(p)(\text{id})=p\quad. \] This equation is called the \textbf{identity law}\index{identity laws!of functors} of \lstinline!map!. The identity law is a formal way of expressing the information-preserving property of the \lstinline!map! function. The implementation \lstinline!mapX! violates the identity law since -it always returns \lstinline!None! and so \lstinline!mapX(oa)(id) == None! -and not equal to \lstinline!oa! for arbitrary values of \lstinline!oa!. -A data wrapper should not unexpectedly lose information when we manipulate -the wrapped data. So, the correct implementation of \lstinline!map! -is \lstinline!mapY!. The code notation for \lstinline!map! is: +it always returns \lstinline!None! and so \lstinline!mapX(p)(id) == None! +and not equal to \lstinline!p! for all \lstinline!p!. A data wrapper +should not unexpectedly lose information when we manipulate the wrapped +data. So, the correct implementation of \lstinline!map! is \lstinline!mapY!. +The code notation for that implementation of \lstinline!map! is: \[ \text{map}^{A,B}\triangleq p^{:\bbnum 1+A}\rightarrow f^{:A\rightarrow B}\rightarrow p\triangleright\,\begin{array}{|c||cc|} & \bbnum 1 & B\\ @@ -225,35 +225,48 @@ \subsection{Motivation for the composition law} We can formulate this property more generally: Liftings should preserve the function composition for arbitrary functions $f^{:A\rightarrow B}$ -and $g^{:B\rightarrow C}$. This is written as: +and $g^{:B\rightarrow C}$. So, we must have: \begin{lstlisting} -c.map(f).map(g) == c.map(f andThen g) == c.map(x => g(f(x))) +c.map(f).map(g) == c.map(f andThen g) \end{lstlisting} \[ -c^{:F^{A}}\triangleright\text{fmap}\,(f)\triangleright\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f)\bef\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f\bef g)\quad. +c^{:F^{A}}\triangleright\text{fmap}\,(f)\triangleright\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f\bef g)\quad. \] -This equation has the form $c\triangleright p=c\triangleright q$ -with some functions $p$ and $q$, or equivalently $p(c)=q(c)$. When -we have $p(c)=q(c)$ for all $c$, it means \emph{the functions themselves} -are equal: $p=q$.\index{equality between functions} So, we may omit -the argument $c$ and rewrite the equation in a shorter form: +The pipe notation\index{pipe notation} allows us to write: +\[ +c\triangleright\text{fmap}\,(f)\triangleright\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f)\bef\text{fmap}\,(g)\quad. +\] +Then we can express the property of \lstinline!map! as: +\[ +c\triangleright\text{fmap}\,(f)\bef\text{fmap}\,(g)=c\triangleright\text{fmap}\,(f\bef g)\quad. +\] +When $c\triangleright p=c\triangleright q$ for all $c$, where $p$ +and $q$ are some functions, it means \emph{the functions themselves} +are equal, \index{equality between functions} and we may simply write +$p=q$ instead of $c\triangleright p=c\triangleright q$. So, we omit +the argument $c$ in the last equation and rewrite it in a shorter +form: \[ \text{fmap}\,(f)\bef\text{fmap}\,(g)=\text{fmap}\,(f\bef g)\quad. \] -This equation is called the \textbf{composition law}\index{composition law!of functors} +This is called the \textbf{composition law}\index{composition law!of functors} of functors. Let us verify the composition law of the \lstinline!Option! type, whose \lstinline!fmap! function was shown in Section~\ref{subsec:f-Example:-Option-and}. For clarity and practice, we will perform the derivations both in -the code notation and in the Scala syntax. To evaluate $\text{fmap}\,(f\bef g$), -we apply \lstinline!fmap(f andThen g)!, where \lstinline!f: A => B! -and \lstinline!g: B => C! are arbitrary functions, to an arbitrary -value \lstinline!oa:Option[A]!. In Scala code, it is convenient to -use the method \lstinline!map! and write \lstinline!oa.map(f)! -instead of the equivalent expression \lstinline!fmap(f)(oa)!: -\begin{lstlisting} -fmap(f andThen g)(oa) == oa.map(f andThen g) == oa match { +the code notation and in the Scala syntax. + +The composition law must hold for arbitrary functions $f^{:A\rightarrow B}$ +and $g^{:B\rightarrow C}$. Then both sides of the composition law +are functions of type \lstinline!Option[A] => Option[C]!. To show +that both sides are equal, we apply both sides to an arbitrary value +\lstinline!p: Option[A]! and show that the results are equal. Begin +with the right-hand side, which is \lstinline!fmap(f andThen g)! +in Scala. It is convenient to use the method \lstinline!map! and +write \lstinline!p.map(f)! instead of the equivalent expression \lstinline!fmap(f)(p)!: +\begin{lstlisting} +fmap(f andThen g)(p) == p.map(f andThen g) == p match { case None => None case Some(x) => (f andThen g)(x) } @@ -261,25 +274,24 @@ \subsection{Motivation for the composition law} Since \lstinline!(f andThen g)(x) == g(f(x))!, we rewrite the result as: \begin{lstlisting} -oa.map(f andThen g) == oa match { +p.map(f andThen g) == p match { case None => None case Some(x) => g(f(x)) } \end{lstlisting} -Now we consider the left-hand side of the law, $\text{fmap}\,(f)\bef\text{fmap}\,(g)$, -and write the Scala expressions: +Now apply the left-hand side of the law to \lstinline!p!: \begin{lstlisting} -oa.map(f).map(g) == (oa match { +p.map(f).map(g) == (p match { case None => None case Some(x) => f(x) -}).map(g) == (oa match { +}).map(g) == (p match { case None => None case Some(x) => f(x) }) match { case None => None case Some(y) => g(y) -} == oa match { +} == p match { case None => None case Some(x) => g(f(x)) } @@ -316,7 +328,7 @@ \subsection{Motivation for the composition law} but, say, \lstinline!Some(1)! or \lstinline!None!, our ordinary intuitions about data transformations would become incorrect. In other words, violations of the composition law prevent us from understanding -the code via mathematical reasoning about transformation of data values. +the code via reasoning about transformation of data values. The composition law is a rigorous formulation of the requirement that wrapped data should be transformed (by lifted functions) in the same @@ -345,14 +357,14 @@ \subsubsection{Statement \label{subsec:f-Statement-composition-associativy-law}\ \subsection{Functors: definition and examples\label{subsec:Functors:-definition-and-examples}} -Separating the functionality of \textsf{``}data wrapper\textsf{''} from any other -features of a data type, we obtain: +Separating the functionality of \textsf{``}data wrapper\textsf{''} from other features +of a data type, we obtain: \begin{itemize} \item A data type with a type parameter, e.g., \lstinline!L[A]!. We will -use the notation $L^{\bullet}$ (in Scala, \lstinline!L[_]!) for -the type constructor itself. +sometimes use the notation $L^{\bullet}$ (in Scala, \lstinline!L[_]!) +in order to emphasize that $L$ is a type constructor. \item A \index{fully parametric!function}fully parametric function \lstinline!fmap! -with this type signature: +with the type signature: \[ \text{fmap}_{L}:\left(A\rightarrow B\right)\rightarrow L^{A}\rightarrow L^{B}\quad. \] @@ -363,8 +375,7 @@ \subsection{Functors: definition and examples\label{subsec:Functors:-definition- {\color{greenunder}\text{composition law}:}\quad & \text{fmap}_{L}(f^{:A\rightarrow B}\bef g^{:B\rightarrow C})=\text{fmap}_{L}(f^{:A\rightarrow B})\bef\text{fmap}_{L}(g^{:B\rightarrow C})\quad.\label{eq:f-composition-law-functor-fmap} \end{align} -A type constructor $L^{\bullet}$ with these properties is called -a \textbf{functor}\index{functor}. +A type constructor $L$ with these properties is called a \textbf{functor}\index{functor}. When a law involves function compositions, it is helpful to draw a type diagram\index{type diagram} to clarify how the functions transform @@ -378,9 +389,9 @@ \subsection{Functors: definition and examples\label{subsec:Functors:-definition- L^{A}\ar[ru]\sp(0.4){\text{fmap}_{L}(f^{:A\rightarrow B})\ ~}\ar[rr]\sb(0.5){\text{fmap}_{L}(f^{:A\rightarrow B}\bef g^{:B\rightarrow C})\ } & & L^{C} } \] -There are two paths from $L^{A}$ to $L^{C}$; by Eq.~(\ref{eq:f-composition-law-functor-fmap}), -both paths must give the same result. Mathematicians call such diagrams -\textbf{commutative}\index{commutative diagram}. +There are two paths from $L^{A}$ to $L^{C}$. By Eq.~(\ref{eq:f-composition-law-functor-fmap}), +both paths must give the same result. Mathematicians say that the +diagram is \textsf{``}\textbf{commutative}\textsf{''}\index{commutative diagram}. Type diagrams are easier to read when using the \emph{forward} composition $\left(f\bef g\right)$ because the order of edges is the same as @@ -392,7 +403,7 @@ \subsection{Functors: definition and examples\label{subsec:Functors:-definition- \] The function \lstinline!map! is equivalent to \lstinline!fmap! and -can be defined through \lstinline!fmap! by: +can be expressed through \lstinline!fmap! by: \begin{align*} & \text{map}_{L}:L^{A}\rightarrow\left(A\rightarrow B\right)\rightarrow L^{B}\quad,\\ & \text{map}_{L}(x^{:L^{A}})(f^{:A\rightarrow B})=\text{fmap}_{L}(f^{:A\rightarrow B})(x^{:L^{A}})\quad. @@ -401,7 +412,7 @@ \subsection{Functors: definition and examples\label{subsec:Functors:-definition- Each of the type constructors \lstinline!Option!, \lstinline!Seq!, \lstinline!Try!, and \lstinline!Future! has its own definition of \lstinline!map! but the functor laws remain the same. We use the -subscript $L$ when writing $\text{map}_{L}$ and $\text{fmap}_{L}$, +subscript $L$ when writing $\text{map}_{L}$ and $\text{fmap}_{L}$ in order to indicate clearly the type constructor those functions work with. @@ -424,8 +435,8 @@ \subsection{Functors: definition and examples\label{subsec:Functors:-definition- and \lstinline!Concurrent! (provided by the \texttt{cats-effect} library), \lstinline!ZIO! (provided by the \texttt{zio} library). \item Dictionaries: \lstinline!Map[K, V]! with respect to the type parameter -\lstinline!V!. The functor\textsf{'}s \lstinline!fmap! method transforms -the values in a dictionary, leaving the keys unchanged. +\lstinline!V!. The \lstinline!map! method transforms the values +in a dictionary, leaving the keys unchanged. \end{itemize} Application-specific, custom type constructors defined by the programmer, such as case classes with type parameters, are often functors. Their @@ -442,8 +453,9 @@ \subsection{Functors: definition and examples\label{subsec:Functors:-definition- \begin{lstlisting} final case class Counted[A](n: Int, a: A) \end{lstlisting} -We may implement \lstinline!map! for \lstinline!Counted[A]! as a -function: +The data type \lstinline!Counted[A]! may be used to describe \lstinline!n! +repetitions of a given value \lstinline!a: A!. We may implement \lstinline!map! +for \lstinline!Counted[A]! as a function: \begin{lstlisting} def map[A, B](c: Counted[A])(f: A => B): Counted[B] = c match { case Counted(n, a) => Counted(n, f(a)) @@ -458,7 +470,7 @@ \subsection{Functors: definition and examples\label{subsec:Functors:-definition- } \end{lstlisting} This code defines both the type \lstinline!Counted! and the method -\lstinline!map!, which can be used like this: +\lstinline!map!. Here is a usage example: \begin{lstlisting} scala> Counted(10, "abc").map(s => "prefix " + s) res0: Counted[String] = Counted(10,prefix abc) @@ -468,9 +480,10 @@ \subsection{Functors: definition and examples\label{subsec:Functors:-definition- \[ \text{Counted}^{A}\triangleq\text{Int}\times A\quad, \] -showing that \lstinline!Counted[_]! is a polynomial type constructor. -The existence of a \lstinline!map! method suggests that \lstinline!Counted[_]! -is a functor. Let us now verify that the functor laws hold for it. +showing that \lstinline!Counted! is a polynomial type constructor. +Let us verify that \lstinline!Counted! is a functor. As we already +have a \lstinline!map! method for \lstinline!Counted!, it remains +to check that the functor laws hold. \subsubsection{Example \label{subsec:f-Example-Int-x-A}\ref{subsec:f-Example-Int-x-A}\index{examples (with code)}} @@ -480,7 +493,7 @@ \subsubsection{Example \label{subsec:f-Example-Int-x-A}\ref{subsec:f-Example-Int \subparagraph{Solution} The implementation of \lstinline!map! is fully parametric since it -does not perform any type-specific operations; it uses the value \lstinline!n:Int! +does not perform any type-specific operations; it uses the value \lstinline!n: Int! as if \lstinline!Int! were a type parameter. It remains to check that the laws hold. We will first verify the laws using the Scala syntax and then using the code notation. @@ -512,12 +525,12 @@ \subsubsection{Example \label{subsec:f-Example-Int-x-A}\ref{subsec:f-Example-Int Let us now write a proof in the code notation, formulating the laws via the \lstinline!fmap! method: \[ -\text{fmap}_{\text{Counted}}(f^{:A\rightarrow B})\triangleq\big(n^{:\text{Int}}\times a^{:A}\rightarrow n\times f(a)\big)\quad. +\text{fmap}_{\text{Counted}}(f^{:A\rightarrow B})\triangleq n^{:\text{Int}}\times a^{:A}\rightarrow n\times f(a)\quad. \] To verify the identity law, we write: \begin{align*} {\color{greenunder}\text{expect to equal }\text{id}:}\quad & \text{fmap}_{\text{Counted}}(\text{id})\\ -{\color{greenunder}\text{definition of }\text{fmap}_{\text{Counted}}:}\quad & =\big(n\times a\rightarrow n\times\gunderline{\text{id}\,(a)}\big)\\ +{\color{greenunder}\text{definition of }\text{fmap}_{\text{Counted}}:}\quad & =n\times a\rightarrow n\times\gunderline{\text{id}\,(a)}\\ {\color{greenunder}\text{definition of }\text{id}:}\quad & =\left(n\times a\rightarrow n\times a\right)=\text{id}\quad. \end{align*} To verify the composition law (for brevity, denote $\text{fmap}_{\text{Counted}}$ @@ -526,7 +539,7 @@ \subsubsection{Example \label{subsec:f-Example-Int-x-A}\ref{subsec:f-Example-Int {\color{greenunder}\text{expect to equal }\text{fmap}(f\bef g):}\quad & \text{fmap}\,(f)\bef\text{fmap}\,(g)\\ {\color{greenunder}\text{definition of }\text{fmap}:}\quad & =\left(n\times a\rightarrow n\times f(a)\right)\bef\left(n\times b\rightarrow n\times g(b)\right)\\ {\color{greenunder}\text{compute composition}:}\quad & =n\times a\rightarrow n\times\gunderline{g(f(a))}\\ -{\color{greenunder}\text{definition of }\left(f\bef g\right):}\quad & =\left(n\times a\rightarrow n\times(f\bef g)(a)\right)=\text{fmap}\,(f\bef g)\quad. +{\color{greenunder}\text{definition of }\left(f\bef g\right):}\quad & =n\times a\rightarrow n\times(f\bef g)(a)=\text{fmap}\,(f\bef g)\quad. \end{align*} We will prove later that all polynomial type constructors have a definition @@ -549,8 +562,8 @@ \subsubsection{Example \label{subsec:f-Example-Int-x-A}\ref{subsec:f-Example-Int Counter(n, a) != map_bad(Counter(n, a))(identity) == Counter(n + 1, a) // Identity law does not hold. \end{lstlisting} -The failure of functor laws leads to surprising behavior because a -code refactoring changes the result: +The failure of functor laws means that code involving \lstinline!map! +cannot be refactored in usual ways: \begin{lstlisting} Counter(n, a).map(incr).map(incr) != Counter(n, a).map(x => x + 2) \end{lstlisting} @@ -582,9 +595,9 @@ \subsubsection{Example \label{subsec:f-Example-A-A-A}\ref{subsec:f-Example-A-A-A fail to preserve information about the values \lstinline!x!, \lstinline!y!, \lstinline!z! and about their ordering in the original data, \lstinline!Vec(x, y, z)!. For this reason, we use the implementation of \lstinline!fmap! shown -first. +above. -The type notation for the type constructor \lstinline!Vec3[_]! is: +The type notation for the type constructor \lstinline!Vec3! is: \[ \text{Vec}_{3}{}^{A}\triangleq A\times A\times A\quad, \] @@ -632,7 +645,7 @@ \subsubsection{Example \label{subsec:f-Example-P+QxA}\ref{subsec:f-Example-P+QxA methods tail-recursively! Since our main focus is to prove that various laws hold for \lstinline!fmap!, we will prefer shorter and more straightforward code even if it is not tail-recursive. Once the laws are proved for -that code, the programmer may use an equivalent but more efficient +that code, the programmer may look for an equivalent but more efficient and stack-safe implementation of the same function. Proving laws for a recursive function needs mathematical induction. @@ -687,9 +700,9 @@ \subsubsection{Example \label{subsec:Example-rec-poly-functor-List}\ref{subsec:E Note that the expression \lstinline!fmap(f)(tail)! in the code of \lstinline!fmap! is a \emph{recursive call} to \lstinline!fmap!. -So, the inductive assumption says that the law already holds for any -recursive calls of the function \lstinline!fmap!. We will see this -pattern in all proofs of laws for recursive functions. +The inductive assumption says that the law already holds for any recursive +calls of the function \lstinline!fmap!. We will use such inductive +assumptions in all proofs of laws for recursive functions. To prove the composition law of \lstinline!fmap!, take arbitrary functions \lstinline!f: A => B! and \lstinline!g: B => C!, and apply @@ -779,7 +792,9 @@ \subsubsection{Example \label{subsec:Example-rec-poly-functor-List}\ref{subsec:E A diagonal code matrix whose elements are identity functions will never change any values in any of the disjunctive parts. So, that matrix is equal to the identity function applied to the entire disjunctive -type. This concludes the proof of the identity law. +type. + +This concludes the proof of the identity law. To verify the composition law, we write (omitting types for brevity): \begin{align*} @@ -798,9 +813,9 @@ \subsubsection{Example \label{subsec:Example-rec-poly-functor-List}\ref{subsec:E \end{array}\quad. \end{align*} By the inductive assumption, the law already holds for recursive calls, -which we denoted as $\overline{\text{fmap}}$: +which we denoted by $\overline{\text{fmap}}$: \[ -\overline{\text{fmap}}\,(g)\big(\overline{\text{fmap}}\,(f)(t)\big)=t\triangleright\overline{\text{fmap}}\,(f)\bef\overline{\text{fmap}}\,(g)=t\triangleright\overline{\text{fmap}}\,(f\bef g)\quad. +\overline{\text{fmap}}\,(g)\big(\overline{\text{fmap}}\,(f)(t)\big)=t\triangleright\overline{\text{fmap}}\,(f)\bef\overline{\text{fmap}}\,(g)\overset{!}{=}t\triangleright\overline{\text{fmap}}\,(f\bef g)\quad. \] This allows us to complete the proof of the law: \[ @@ -814,7 +829,7 @@ \subsubsection{Example \label{subsec:Example-rec-poly-functor-List}\ref{subsec:E \subsubsection{Example \label{subsec:Example-rec-poly-functor}\ref{subsec:Example-rec-poly-functor}} Define a list of \emph{odd} length as a recursive type constructor -$\text{LO}^{\bullet}$: +$\text{LO}$: \begin{align} \text{LO}^{A} & \triangleq A+A\times A\times\text{LO}^{A}\label{eq:f-lo-def}\\ & \cong A+A\times A\times A+A\times A\times A\times A\times A+...\quad,\nonumber @@ -823,7 +838,7 @@ \subsubsection{Example \label{subsec:Example-rec-poly-functor}\ref{subsec:Exampl \subparagraph{Solution} -The Scala definition of the type constructor \lstinline!LO[_]! is: +The Scala definition of the type constructor \lstinline!LO! is: \begin{lstlisting} sealed trait LO[A] @@ -841,14 +856,13 @@ \subsubsection{Example \label{subsec:Example-rec-poly-functor}\ref{subsec:Exampl This code for \lstinline!fmap! is \emph{not} tail-recursive because \lstinline!fmap! is called inside the case class constructor \lstinline!LO2!. -The type constructor $\text{LO}^{\bullet}$ is a \textbf{recursive} -\index{polynomial functor!recursive}\textbf{polynomial} \textbf{functor} -because it is defined by a recursive type equation~(\ref{eq:f-lo-def}) +The type constructor $\text{LO}$ is a \textbf{recursive} \index{polynomial functor!recursive}\textbf{polynomial} +\textbf{functor} because it is defined by a recursive type equation~(\ref{eq:f-lo-def}) that uses only polynomial type constructions (\textsf{``}sums and products\textsf{''}) in its right-hand side. Other examples of recursive polynomial functors are lists and trees. We will prove later (Statement~\ref{subsec:functor-Statement-functor-recursive}) -that \lstinline!LO[_]! (as well as other list-like and tree-like -data types) are lawful functors. +that \lstinline!LO! and other list-like and tree-like data types +are lawful functors. \subsection{Functor block expressions\index{functor block}} @@ -866,9 +880,9 @@ \subsection{Functor block expressions\index{functor block}} \begin{lstlisting} scala> val result = for { (i, name) <- Map(1 -> "one", 2 -> "two", 3 -> "three") // For each (i, name): - x = i * i // define `x` by computing i * i... - product = s"$name * $name" // define `product`... -} yield s"$product is $x" // and add this to the `result` sequence. + x = i * i // define `x` by computing i * i... + product = s"$name * $name" // define `product`... +} yield s"$product is $x" // and add this to the `result` sequence. result: Seq[String] = List(one * one is 1, two * two is 4, three * three is 9) \end{lstlisting} Written in this way, the computations are easier to understand for @@ -906,7 +920,6 @@ \subsection{Functor block expressions\index{functor block}} x <- p // The first line must contain a left arrow. y = f(x) // Some computation involving x. z = g(x, y) // Another computation, uses x and y. - ... } yield q(x, y, z) // The `yield` may use any defined variables. \end{lstlisting} The above functor block assumes that \lstinline!q(x, y, z)! has type @@ -915,7 +928,6 @@ \subsection{Functor block expressions\index{functor block}} val result: L[B] = p .map { x => (x, f(x)) } // Create a tuple: we need to keep x and f(x). .map { case (x, y) => (x, y, g(x, y)) } // Need to keep x, y, and g(x, y). - ... .map { case (x, y, z) => q(x, y, z) } // Here we can use x, y, and z. \end{lstlisting} This code creates intermediate tuples only because the values \lstinline!x!, @@ -931,26 +943,27 @@ \subsection{Functor block expressions\index{functor block}} \end{lstlisting} A confusing feature of the \lstinline!for! / \lstinline!yield! syntax -is that, at first sight, functor blocks (such as this code: +is that, at first sight, this code: \begin{lstlisting} for { x <- p; ... } yield expr(x) \end{lstlisting} -appear to compute the value \lstinline!expr(x)! because the code +appears to compute the value \lstinline!expr(x)! because the code says \lstinline!yield expr(x)!. However, this is not so. As the above examples show, if \lstinline!p! is a sequence then the functor block -also computes a \emph{sequence}. In general, the result of a functor -block is a \textsf{``}wrapped\textsf{''} value, where the type of the \textsf{``}wrapper\textsf{''} -is determined by the first line of the functor block. The first line -must have a left arrow followed by a \textsf{``}source\index{functor block!source}\textsf{''}, +also computes a \emph{sequence}; that will be a sequence of values +of the form \lstinline!expr(x)! for various \lstinline!x!. In general, +the result of a functor block is a \textsf{``}wrapped\textsf{''} value, where the +type of the \textsf{``}wrapper\textsf{''} is determined by the first line of the functor +block. The first line must have a left arrow followed by a \textsf{``}source\index{functor block!source}\textsf{''}, which must be an expression of a functor type, i.e., of type \lstinline!L[A]! for some functor \lstinline!L[_]!. The result\textsf{'}s type will be \lstinline!L[B]! where \lstinline!B! is the type of the expression after the \lstinline!yield! keyword. -For instance, the first line of the following functor block contains -an \lstinline!Option! value, \lstinline!Some(123)!, as the \textsf{``}source\textsf{''}. -Because of that, the value of the entire functor block expression -will also be of type \lstinline!Option!: +As another example, the first line of the following functor block +contains the value \lstinline!Some(123)! as the \textsf{``}source\textsf{''}. Because +of that, the value of the entire functor block expression will also +be of type \lstinline!Option[...]!: \begin{lstlisting} scala> for { x <- Some(123) // "Source" is Option[Int]. @@ -960,17 +973,17 @@ \subsection{Functor block expressions\index{functor block}} \end{lstlisting} In this code, the \lstinline!yield! keyword is followed by an expression of type \lstinline!String!. So, the result of the entire functor -block is of type \lstinline!Option[String]!. Note that the expression -after the \textsf{``}\lstinline!yield!\textsf{''} can be a block of arbitrary code -containing new \lstinline!val!s, new \lstinline!def!s, and/or other -\lstinline!for!/\lstinline!yield! functor blocks if needed. +block is of type \lstinline!Option[String]!. The expression after +the \textsf{``}\lstinline!yield!\textsf{''} can be a block of arbitrary code containing +new \lstinline!val!s, new \lstinline!def!s, and other \lstinline!for!/\lstinline!yield! +functor blocks if needed. Functor blocks can be used with any functor that has a \lstinline!map! method, not only with library-defined type constructors such as \lstinline!Seq! or \lstinline!Option!. Here are some examples of defining the \lstinline!map! methods and using functor blocks with disjunctive types. -The type constructor \lstinline!QueryResult[_]! may define the \lstinline!map! +The type constructor \lstinline!QueryResult! may define the \lstinline!map! method on the trait itself and split its implementation between the case classes like this: \begin{lstlisting} @@ -1008,7 +1021,7 @@ \subsection{Functor block expressions\index{functor block}} def map[B](f: A => B): LO[B] = LO2[B](f(x), f(y), tail.map(f)) } \end{lstlisting} -After these definitions, we may use values of type \lstinline!LO[_]! +After these definitions, we may use values of type \lstinline!LO[...]! in functor blocks: \begin{lstlisting} scala> val result = for { @@ -1023,15 +1036,15 @@ \subsection{Functor block expressions\index{functor block}} \paragraph{Functor blocks and functor laws} There is an important connection between the functor laws and the -properties of code in functor blocks. Consider the following code: +properties of code in functor blocks. Consider this code: \begin{lstlisting} -def f(x: Int) = x * x // Some computations. -def g(x: Int) = x - 1 // More computations. +def f(x: Int) = x * x +def g(x: Int) = x - 1 scala> for { x <- List(10, 20, 30) y = x - z = f(y) // Perform computations. + z = f(y) } yield g(z) res0: List[Int] = List(99, 399, 899) \end{lstlisting} @@ -1040,17 +1053,16 @@ \subsection{Functor block expressions\index{functor block}} \begin{lstlisting} scala> for { x <- List(10, 20, 30) // Eliminated `y` from the code. - z = f(x) // Perform computations. + z = f(x) } yield g(z) res1: List[Int] = List(99, 399, 899) \end{lstlisting} -Another example of refactoring that appears reasonable is to combine -transformations: +Another example of a reasonable refactoring is to combine transformations: \begin{lstlisting} scala> for { x <- List(10, 20, 30) y = x + 1 - z = f(y) // Perform computations. + z = f(y) } yield g(z) res2: List[Int] = List(120, 440, 960) \end{lstlisting} @@ -1196,8 +1208,7 @@ \subsection{Functor block expressions\index{functor block}} \lstinline!p! of the appropriate type. We also find that code fragments \lstinline!3b! and \lstinline!4b! are equal if we can replace \lstinline!.map(x => x + 1).map(f)! by \lstinline!.map(x => f(x + 1))!. This replacement is justified -as long as the \lstinline!map! method obeys the functor composition -law: +if the \lstinline!map! method obeys the functor composition law: \begin{lstlisting} p.map(h).map(f) == p.map(x => f(h(x))) \end{lstlisting} @@ -1223,15 +1234,15 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} \item A given \lstinline!map! function is incorrect (does not satisfy the laws), although the error could be fixed: a different implementation of \lstinline!map! satisfies the laws. -\item A given \lstinline!map[A, B]! function satisfies the laws for most -types \lstinline!A! and \lstinline!B!, but violates the laws for -certain specially chosen types. +\item A given \lstinline!map[A, B]! function satisfies the laws for some +types \lstinline!A! and \lstinline!B! but violates the laws for +other \lstinline!A! and \lstinline!B!. \end{itemize} We will now look at examples illustrating these possibilities. \paragraph{Cannot implement \texttt{map}\textsf{'}s type signature} -Consider the type constructor $H^{\bullet}$ defined by: +Consider the type constructor $H$ defined by: \begin{lstlisting} final case class H[A](r: A => Int) \end{lstlisting} @@ -1246,8 +1257,8 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} \text{map}^{A,B}:\left(A\rightarrow\text{Int}\right)\rightarrow\left(A\rightarrow B\right)\rightarrow\left(B\rightarrow\text{Int}\right)\quad. \] To see this, recall that a \index{fully parametric!function}fully -parametric function needs to treat all types as type parameters, including -the primitive type \lstinline!Int!. So, the code +parametric function needs to treat all types as type parameters. The +code: \begin{lstlisting} def map[A, B]: H[A] => (A => B) => H[B] = { r => f => C(_ => 123) } \end{lstlisting} @@ -1259,16 +1270,16 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} \text{map}^{A,B,N}:\left(A\rightarrow N\right)\rightarrow\left(A\rightarrow B\right)\rightarrow B\rightarrow N\quad. \] We have seen in Example~\ref{subsec:ch-solvedExample-6} that this -type signature is not implementable. So, the type constructor $H$ -is not a functor. +type signature cannot be implemented by fully parametric code. So, +the type constructor $H$ is not a functor. Another important example where the \lstinline!map!\textsf{'}s type signature cannot be implemented are certain kinds of type constructors called \index{generalized algebraic data types!see GADT}\textbf{generalized algebraic data types} (\textbf{GADT}s\index{GADT|textit}). In this book we call a GADT a type constructor whose definition sets type -parameters to certain specific types. This makes it impossible to -implement \lstinline!map!. An example of a GADT is: +parameters to specific types. This makes it impossible to implement +\lstinline!map!. An example of a GADT is: \begin{lstlisting} sealed trait ServerAction[R] final case class StoreId(x: Long, y: String) extends ServerAction[Boolean] @@ -1281,8 +1292,8 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} parameters and can only represent values of type \lstinline!ServerAction[Int]! but not, say, \lstinline!ServerAction[String]!. Similarly, \lstinline!StoreId! represents only values of type \lstinline!ServerAction[Boolean]!. -For this reason, \lstinline!ServerAction[A]! cannot have a fully -parametric \lstinline!map! function: +For this reason, \lstinline!ServerAction! cannot have a fully parametric +\lstinline!map! function: \begin{lstlisting} def map[A, B]: ServerAction[A] => (A => B) => ServerAction[B] \end{lstlisting} @@ -1294,17 +1305,17 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} given type \lstinline!B!. However, it is possible to create only values either of type \lstinline!ServerAction[Boolean]! or of type \lstinline!ServerAction[Int]!, because the disjunctive type \lstinline!ServerAction[R]! -has only the two parts with the specified types. So, we cannot create +has only two parts with the specified types. So, we cannot create a value of type \lstinline!ServerAction[B]! with an arbitrary type \lstinline!B!. For this reason, it is impossible to implement a fully parametric \lstinline!map! function for \lstinline!ServerAction!. We are prevented from implementing \lstinline!map! because some type -parameters are already set in the definition of \lstinline!ServerAction[R]!. -One can say that \lstinline!ServerAction[_]! fails to be fully parametric +parameters are already set in the definition of \lstinline!ServerAction!. +One can say that \lstinline!ServerAction! fails to be fully parametric \emph{in its type definition}. This behavior of GADTs is intentional. GADTs are used only in situations where the lack of \lstinline!map! -does not lead to problems (see Chapter~\ref{chap:Free-type-constructions}). +does not lead to problems. \paragraph{Cannot implement a lawful \texttt{map}} @@ -1340,13 +1351,14 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} \end{align*} The law must hold for arbitrary functions $q^{:A\rightarrow\text{Int}}$, but the function $\left(\_\rightarrow q(a)\right)$ always returns -the same value $q(a)$ and thus is not equal to the original function -$q$. So, the result of evaluating the expression $\text{map}(q\times a)(\text{id})$ +the same value $q(a)$ and is not equal to the original function $q$. +So, the result of evaluating the expression \textsf{``}$\text{map}(q\times a)(\text{id})$\textsf{''} is not always equal to the original value $q\times a$. -Since this \lstinline!map! function is the only available implementation -of the required type signature, we conclude that $Q$ is not a functor -(we cannot implement \lstinline!map! that satisfies the laws). +Since this \lstinline!map! function is the only available fully parametric +implementation of the required type signature, we conclude that $Q$ +is not a functor (we cannot implement \lstinline!map! that satisfies +the laws). \paragraph{Mistakes in implementing \texttt{map}} @@ -1360,9 +1372,9 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} \begin{lstlisting} def map[A, B](p: (A, A))(f: A => B): (B, B) = p match { case (x, y) => (f(y), f(x)) } \end{lstlisting} -This code swaps the values in the pair \lstinline!(x, y)!; we could -say that it fails to preserve information about the order of those -values. The functor identity law does not hold: +This code swaps the values in the pair \lstinline!(x, y)!; it fails +to preserve information about the order of those values. The functor +identity law does not hold: \begin{align*} {\color{greenunder}\text{expect to equal }x\times y:}\quad & \text{map}\,(x^{:A}\times y^{:A})(\text{id}^{A})\\ {\color{greenunder}\text{definition of }\text{map}:}\quad & =\gunderline{\text{id}\,(y)}\times\gunderline{\text{id}\,(x)}\\ @@ -1378,27 +1390,27 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} Example \ref{subsec:f-Example-A-A-A} shows the type constructor $\text{Vec}_{3}$ with an incorrect implementation of \lstinline!map! that reorders some parts of a tuple and duplicates other parts. The correct implementation -preserves the order of parts in a tuple and does not duplicate or -omit any parts. +preserves the order of parts in a tuple and neither duplicates nor +omits any data. -Another case of an an incorrect implementation is the following \lstinline!map! -function for \lstinline!Option[_]!: +Another case of an incorrect implementation is this \lstinline!map! +function for \lstinline!Option!: \begin{lstlisting} def map_bad[A, B]: Option[A] => (A => B) => Option[B] = { _ => _ => None } \end{lstlisting} This function always returns \lstinline!None!, losing information and violating the identity law. However, we have already seen that -\lstinline!Option[_]! has a different implementation of \lstinline!map! +\lstinline!Option! has a different implementation of \lstinline!map! that satisfies the functor laws. -Similarly, one could define \lstinline!map! for the \lstinline!List[_]! +Similarly, one could define \lstinline!map! for the \lstinline!List! type constructor to always return an empty list: \begin{lstlisting} def map_bad[A, B]: List[A] => (A => B) => List[B] = { _ => _ => List() } \end{lstlisting} This implementation loses information and violates the functor laws. Of course, the Scala library provides a correct implementation of -\lstinline!map! for \lstinline!List[_]!. +\lstinline!map! for \lstinline!List!. Example~\ref{subsec:f-Example-Int-x-A} is another situation where an incorrectly implemented \lstinline!map! violates functor laws. @@ -1414,7 +1426,7 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} import scala.reflect.runtime.universe._ def getType[T: TypeTag]: Type = weakTypeOf[T] def equalTypes[A: TypeTag, B: TypeTag]: Boolean = getType[A] =:= getType[B] -def fmap_bad[A: TypeTag, B: TypeTag](f: A => B)(oa: Option[A]): Option[B] = oa match { +def fmap_bad[A: TypeTag, B: TypeTag](f: A => B): Option[A] => Option[B] = { case None => None case Some(x) => // If A = B, compute f(f(x)), else compute f(x). val z: B = if (equalTypes[A, B]) f(f(x).asInstanceOf[A]) else f(x) @@ -1430,7 +1442,8 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} res1: Option[String] = Some(123 a a) \end{lstlisting} The function \lstinline!fmap_bad[A, B]! satisfies the identity law -but violates the composition law when \lstinline!A = B!: +but violates the composition law when \lstinline!A = B!. To see that, +we use functions \lstinline!(_ + " a")! and \lstinline!(_ + " b")!: \begin{lstlisting} scala> fmap_bad[String, String](_ + " b")(Some("123 a a")) res2: Option[String] = Some(123 a a b b) @@ -1440,11 +1453,8 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} \end{lstlisting} In all these examples, we \emph{could} implement a \lstinline!map! -function that would obey the laws. It is not precise to say that, -e.g., the type constructor \lstinline!Vec3[_]! is \emph{by itself} -a functor: being a functor depends on having a lawful \lstinline!map! -function. Keeping that in mind, we will say that the type constructor -\lstinline!Vec3[_]! \textsf{``}is\textsf{''} a functor, meaning that a suitable lawful +function that obeys the laws. When we say that, e.g., the type constructor +\lstinline!Vec3! \textsf{``}is a functor\textsf{''}, we will mean that a lawful implementation of \lstinline!map! can be found. \paragraph{Laws hold for some types but not for others} @@ -1503,7 +1513,8 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} are equal. The composition law of functors will fail when intermediate values of that type are used: \begin{lstlisting} -val f: OnlyA[Int, Int] => Int = { case OnlyA(Left(a)) => a; case OnlyA(Right(a)) => a } +val f: OnlyA[Int, Int] => Int = { case OnlyA(Left(a)) => a + case OnlyA(Right(a)) => a } val g: Int => OnlyA[Int, Int] = { a => OnlyA(Right(a)) } val xs = Seq(0, 0, 0).map(g).toSet @@ -1541,19 +1552,16 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} res7: Set[IgnoreB[Int,Int]] = Set(IgnoreB(0,0)) \end{lstlisting} -The functor laws for a type constructor $L^{\bullet}$ do not require -that the types $A,B$ used in the function: +The functor laws~(\ref{eq:f-identity-law-functor-fmap})\textendash (\ref{eq:f-composition-law-functor-fmap}) +for a type constructor $L$ are imposed on the function: \[ -\text{fmap}_{L}:\left(A\rightarrow B\right)\rightarrow L^{A}\rightarrow L^{B} +\text{fmap}_{L}:\left(A\rightarrow B\right)\rightarrow L^{A}\rightarrow L^{B}\quad. \] -should have a mathematically lawful definition of the \lstinline!equals! -method (or of any other operation). The \lstinline!map! method of -a functor $L^{\bullet}$ must be \textbf{lawful}\index{lawful functor}\index{functor!laws of}, -i.e., must satisfy the functor laws~(\ref{eq:f-identity-law-functor-fmap})\textendash (\ref{eq:f-composition-law-functor-fmap}) -for all types $A,B$. The functor laws must hold even if a type $A$\textsf{'}s -implementation of some operations violate some other laws. For this -reason, \lstinline!Set[_]! cannot be considered a functor in a rigorous -sense. +The functor laws do not require that the types $A$ and $B$ should +have an \lstinline!equals! method (or any other operation). The +functor laws must hold even if a type $A$\textsf{'}s implementations of some +other operations violate some other laws. For this reason, \lstinline!Set! +cannot be considered a functor in a rigorous sense. The \lstinline!map! method for dictionaries has a similar problem: the keys of a dictionary must be distinct and will be compared using @@ -1564,30 +1572,20 @@ \subsection{Examples of non-functors\label{subsec:Examples-of-non-functors}} The Scala standard library still provides the \lstinline!map! and \lstinline!flatMap! methods for sets \lstinline!Set[K]! and dictionaries \lstinline!Map[K, V]! because most applications will use types \lstinline!K! -that have lawful \lstinline!equals! operations, and the functor laws -will hold. +that have a lawful \lstinline!equals! operation, and the functor +laws will hold for those \lstinline!K!. \subsection{Contrafunctors\label{subsec:Contrafunctors}} As we have seen in Section~\ref{subsec:Examples-of-non-functors}, -the type constructor $H^{\bullet}$ defined by $H^{A}\triangleq A\rightarrow\text{Int}$ +the type constructor $H$ defined by $H^{A}\triangleq A\rightarrow\text{Int}$ is not a functor because it is impossible to implement the type signature of \lstinline!map! as a fully parametric function: \[ \text{map}^{A,B}:\left(A\rightarrow\text{Int}\right)\rightarrow\left(A\rightarrow B\right)\rightarrow B\rightarrow\text{Int}\quad. \] -To see why, begin writing the code with a typed hole: -\[ -\text{map}\,(h^{:A\rightarrow\text{Int}})(f^{:A\rightarrow B})(b^{:B})=\text{???}^{:\text{Int}}\quad. -\] -The only way of returning an \lstinline!Int! in fully parametric -code is by applying the function $h^{:A\rightarrow\text{Int}}$. Since -$h$ consumes (rather than wraps) values of type $A$, we have no -values of type $A$ and cannot apply the function $h^{:A\rightarrow\text{Int}}$. -However, it would be possible to apply a function of type $B\rightarrow A$ -since a value of type $B$ is given as one of the curried arguments, -$b^{:B}$. So, we can implement a function called \lstinline!contramap! -with a different type signature where the function type is $B\rightarrow A$ +However, it is possible to implement a function called \lstinline!contramap! +with a different type signature ,where the function type is $B\rightarrow A$ instead of $A\rightarrow B$: \[ \text{contramap}^{A,B}:\left(A\rightarrow\text{Int}\right)\rightarrow\left(B\rightarrow A\right)\rightarrow B\rightarrow\text{Int}\quad. @@ -1604,8 +1602,8 @@ \subsection{Contrafunctors\label{subsec:Contrafunctors}} Flipping the order of the curried arguments in \lstinline!contramap!, we define \lstinline!cmap! as: \begin{align} -\text{cmap}^{A,B} & :\left(B\rightarrow A\right)\rightarrow H^{A}\rightarrow H^{B}\quad,\nonumber \\ -\text{cmap} & \triangleq f^{:B\rightarrow A}\rightarrow h^{:A\rightarrow\text{Int}}\rightarrow\left(f\bef h\right)^{:B\rightarrow\text{Int}}\quad.\label{eq:f-example-1-contrafmap} + & \text{cmap}^{A,B}:\left(B\rightarrow A\right)\rightarrow H^{A}\rightarrow H^{B}\quad,\nonumber \\ + & \text{cmap}\triangleq f^{:B\rightarrow A}\rightarrow h^{:A\rightarrow\text{Int}}\rightarrow\left(f\bef h\right)^{:B\rightarrow\text{Int}}\quad.\label{eq:f-example-1-contrafmap} \end{align} The type signature of \lstinline!cmap! has the form of a \textsf{``}reverse lifting\textsf{''}: functions of type \lstinline!B => A! are lifted into @@ -1643,9 +1641,9 @@ \subsection{Contrafunctors\label{subsec:Contrafunctors}} {\color{greenunder}\text{use Eq.~(\ref{eq:f-example-1-contrafmap})}:}\quad & =\text{cmap}\left(g\bef f\right)\quad. \end{align*} -A type constructor with a fully parametric \lstinline!cmap! is called -a \textbf{contrafunctor}\index{contrafunctor} if the identity and -the composition laws are satisfied. +A type constructor with a fully parametric \lstinline!cmap! satisfying +the identity law and the composition law is called a \textbf{contrafunctor}\index{contrafunctor} +(short for \textsf{``}contravariant functor\textsf{''}). \subsubsection{Example \label{subsec:f-Example-contrafunctor}\ref{subsec:f-Example-contrafunctor}} @@ -1668,7 +1666,7 @@ \subsubsection{Example \label{subsec:f-Example-contrafunctor}\ref{subsec:f-Examp type $A$. We have two curried arguments of type $B$. So, we apply $f^{:B\rightarrow A}$ to those arguments, obtaining two values of type $A$. To avoid information loss, we need to preserve the order -of the curried arguments. So, the resulting expression is: +of the curried arguments. The resulting expression is: \[ \text{contramap}^{A,B}\triangleq d^{:A\rightarrow A\rightarrow\text{Int}}\rightarrow f^{:B\rightarrow A}\rightarrow b_{1}^{:B}\rightarrow b_{2}^{:B}\rightarrow d\left(f(b_{1})\right)\left(f(b_{2})\right)\quad. \] @@ -1676,7 +1674,7 @@ \subsubsection{Example \label{subsec:f-Example-contrafunctor}\ref{subsec:f-Examp \begin{lstlisting} def contramap[A, B](d: A => A => Int)(f: B => A): B => B => Int = { b1 => b2 => d(f(b1))(f(b2)) } \end{lstlisting} -To verify the laws, it is easier to use the equivalent \lstinline!cmap! +To verify the laws, it is easier to use the equivalent function \lstinline!cmap! defined by: \begin{equation} \text{cmap}^{A,B}(f^{:B\rightarrow A})\triangleq d^{:A\rightarrow A\rightarrow\text{Int}}\rightarrow b_{1}^{:B}\rightarrow b_{2}^{:B}\rightarrow d\left(f(b_{1})\right)\left(f(b_{2})\right)\quad.\label{eq:f-example-2-contrafmap} @@ -1684,8 +1682,8 @@ \subsubsection{Example \label{subsec:f-Example-contrafunctor}\ref{subsec:f-Examp To verify the identity law: \begin{align*} {\color{greenunder}\text{expect to equal }\text{id}:}\quad & \text{cmap}\left(\text{id}\right)\\ -{\color{greenunder}\text{use Eq.~(\ref{eq:f-example-2-contrafmap})}:}\quad & =d\rightarrow b_{1}\rightarrow b_{2}\rightarrow d\gunderline{\left(\text{id}\,(b_{1})\right)}\gunderline{\left(\text{id}\,(b_{2})\right)}\\ -{\color{greenunder}\text{definition of }\text{id}:}\quad & =d\rightarrow\gunderline{b_{1}\rightarrow b_{2}\rightarrow d(b_{1})(b_{2})}\\ +{\color{greenunder}\text{use Eq.~(\ref{eq:f-example-2-contrafmap})}:}\quad & =d\rightarrow b_{1}\rightarrow b_{2}\rightarrow d\,\gunderline{\left(\text{id}\,(b_{1})\right)}\gunderline{\left(\text{id}\,(b_{2})\right)}\\ +{\color{greenunder}\text{definition of }\text{id}:}\quad & =d\rightarrow\gunderline{b_{1}\rightarrow b_{2}\rightarrow d\left(b_{1}\right)\left(b_{2}\right)}\\ {\color{greenunder}\text{simplify curried function}:}\quad & =\left(d\rightarrow d\right)=\text{id}\quad. \end{align*} To verify the composition law, we rewrite its left-hand side: @@ -1724,25 +1722,25 @@ \subsubsection{Example \label{subsec:f-Example-contrafunctor}\ref{subsec:f-Examp N^{A}\triangleq\left(A\rightarrow\text{Int}\right)\times\left(\bbnum 1+A\right)\quad. \] We can implement neither \lstinline!map! nor \lstinline!contramap! -for $N^{\bullet}$. Intuitively, the type parameter $A$ is used both -to the left of a function arrow (being \textsf{``}consumed\textsf{''}) and outside -of a function (being \textsf{``}wrapped\textsf{''}). +for $N$. Intuitively, the type parameter $A$ is used both to the +left of a function arrow (being \textsf{``}consumed\textsf{''}) and outside of a function +(being \textsf{``}wrapped\textsf{''}). GADTs\index{GADT} (type constructors that lack full parametricity) also cannot be contrafunctors because the required type signature for \lstinline!contramap! cannot be implemented by a fully parametric -function. To show that \lstinline!ServerAction[_]! cannot be a contrafunctor, +function. To show that \lstinline!ServerAction! cannot be a contrafunctor, we can straightforwardly adapt the reasoning used in Section~\ref{subsec:Examples-of-non-functors} -when we showed that \lstinline!ServerAction[_]! cannot be a functor. +when we showed that \lstinline!ServerAction! cannot be a functor. \subsection{Subtyping, covariance, and contravariance\label{subsec:Covariance,-contravariance,-and-subtyping}} Ordinarily, applying a function of type $Q\rightarrow R$ to a value -of type $P$ is an error: +of a different type $P$ is an error: \begin{lstlisting} def h(q: Q): R = ??? val p: P = ??? -h(p) // Type error: need type Q but found P. +h(p) // Type error: expected type Q but found P. \end{lstlisting} However, the Scala compiler admits this kind of code when $P$ is a \textsf{``}subtype\textsf{''} of $Q$. @@ -1752,11 +1750,10 @@ \subsection{Subtyping, covariance, and contravariance\label{subsec:Covariance,-c $P$ is a subtype of $Q$. Each programming language defines in some way what are the possible subtypes of every given type. -We may imagine that the language\textsf{'}s compiler can automatically convert -values of type $P$ into values of type $Q$ using a fixed, designated -\textsf{``}conversion function\textsf{''} (of type $P\rightarrow Q$) that is somehow -already available. It is convenient to \emph{define} subtyping through -the existence of a conversion function: +We may imagine that the language\textsf{'}s compiler automatically converts +values of type $P$ into values of type $Q$ using a fixed, compiler-provided +\textsf{``}conversion function\textsf{''} of type $P\rightarrow Q$. It is convenient +to \emph{define} subtyping through the existence of a conversion function: \subsubsection{Definition \label{subsec:Definition-subtyping}\ref{subsec:Definition-subtyping} } @@ -1833,24 +1830,24 @@ \subsubsection{Definition \label{subsec:Definition-subtyping}\ref{subsec:Definit \[ \text{p2q}\,(f^{:\text{AtMostTwo}\rightarrow\text{Int}})\triangleq t^{:\text{Two}}\rightarrow f(t)\quad. \] -The code $\text{p2q}\,(f)=t^{:\text{Two}}\rightarrow f(t)$ is almost -the same as $\text{p2q}\,(f)=f$, except that $f$ is applied to values -of type \lstinline!Two!. So, the code of \lstinline!p2q! is the -same as the code of an identity function. +Compare the code $\text{p2q}\,(f)=t^{:\text{Two}}\rightarrow f(t)$ +with the code of an identity function, $\text{id}\left(f\right)=f=t\rightarrow f(t)$. +We see that the code of \lstinline!p2q! an identity function that +just reassigns the type of its argument to be \lstinline!Two!. In these cases, it is easy for the compiler to insert the appropriate conversion functions automatically whenever necessary. Any function -that consumes an argument of type $Q$ will then automatically apply -to arguments of type $P$. The compiler does not actually need to -insert any new code, since the code of conversion functions does not -perform any data transformations. +that consumes an argument of type $Q$ will be then automatically +compatible with arguments of type $P$. The compiler does not actually +need to insert any new code, since the code of conversion functions +does not perform any data transformations. For this to work, we need to tell the Scala compiler that a certain type is a subtype of another. This can be done in three ways depending on the situation: First, one may declare a class that \textsf{``}\lstinline!extends!\textsf{''} -another class. Second, one may declare type parameters with a \textsf{``}variance -annotation\textsf{''} such as \lstinline!L[+A]! or \lstinline!L[-B]!. Third, -one may declare type parameters with a \textsf{``}subtyping annotation\textsf{''} +another class. Second, one may declare type parameters with a \index{variance annotation}\textbf{variance +annotation} such as \lstinline!L[+A]! or \lstinline!L[-B]!. Third, +one may declare type parameters with a \textbf{subtyping annotation}\index{subtyping annotation} (\lstinline!A <: B!). Parts of a disjunctive type are subtypes of that type since they are @@ -1879,10 +1876,10 @@ \subsubsection{Definition \label{subsec:Definition-subtyping}\ref{subsec:Definit The void type (\lstinline!Nothing!, denoted by $\bbnum 0$ in the type notation) is special:\index{void type} it is a subtype of \emph{every} type $A$. The reason is that the type $\bbnum 0$ has \emph{no} values, -so a conversion function \lstinline!absurd! $:\bbnum 0\rightarrow A$ +so the conversion function \lstinline!absurd! $:\bbnum 0\rightarrow A$ (shown in Example~\ref{subsec:ch-Example-type-identity-0-to-A}) -can never be actually applied. The Scala compiler recognizes this -automatically. +exists but never actually needs to be applied. The Scala compiler +recognizes this automatically. Let us now consider subtyping for type constructors. If a type constructor $L^{A}$ is a functor, we can use its $\text{fmap}_{L}$ method to @@ -1925,19 +1922,18 @@ \subsubsection{Definition \label{subsec:Definition-subtyping}\ref{subsec:Definit methods. The Scala compiler \emph{does} determine whether a given type constructor \lstinline!F[A]! is covariant or contravariant, but the features of subtype-co(ntra)variance are not activated automatically. -For that, the programmer needs to use a \index{subtyping variance annotation}\textbf{subtyping -variance annotation}, which looks like \lstinline!F[+A]!, on the -relevant type parameters. - -For example, the type constructor \lstinline!Counted[A]! defined -in Section~\ref{subsec:Functors:-definition-and-examples} is a functor -(has a lawful \lstinline!map! method), and so it is covariant in -its type parameter \lstinline!A!. If we write the variance annotation -\lstinline!Counted[+A]! in the definition of \lstinline!Counted!, -Scala will activate the subtype covariance for \lstinline!Counted!. -Then, for example, the type \lstinline!Counted[Two]! will be considered -a subtype of \lstinline!Counted[AtMostTwo]!. We will be able to use -values of type \lstinline!Counted[Two]! in place of \lstinline!Counted[AtMostTwo]!: +To do that, the programmer needs to use a \index{variance annotation}\textbf{variance +annotation}, which looks like \lstinline!F[+A]!. + +For example, the type constructor \lstinline!Counted! defined in +Section~\ref{subsec:Functors:-definition-and-examples} is a functor +(has a lawful \lstinline!map! method), and so it is covariant. If +we write the variance annotation \lstinline!Counted[+A]! in the definition +of \lstinline!Counted!, Scala will activate the subtype covariance +for \lstinline!Counted!. Then, for example, the type \lstinline!Counted[Two]! +will be automatically recognized a subtype of \lstinline!Counted[AtMostTwo]!. +We will be able to use values of type \lstinline!Counted[Two]! in +place of \lstinline!Counted[AtMostTwo]!: \begin{lstlisting} final case class Counted[+A](n: Int, a: A) @@ -1953,23 +1949,26 @@ \subsubsection{Definition \label{subsec:Definition-subtyping}\ref{subsec:Definit The subtype-contravariance property is annotated as \lstinline!F[-A]!. -It is important that the covariance or contravariance of a type constructor -is determined by its type structure alone, and \emph{not} by its subtyping -properties. For instance, \lstinline!Counted[A]! is covariant because -it can have a lawful \lstinline!map! method, whether or not we use -subtyping. The subtyping variance annotation \lstinline!Counted[+A]! -merely tells the Scala compiler to activate the subtyping features -for \lstinline!Counted!. It is a type error if the subtyping variance -annotation specified by the programmer does not match the actual covariance -or contravariance of the type constructor: +It is important that the covariance/contravariance properties of a +type constructor are determined by its type structure alone, regardless +of variance annotations or subtyping. For instance, \lstinline!Counted[A]! +is covariant because we may implement a lawful \lstinline!map! method +for \lstinline!Counted!. (This property holds also in programming +languages that do not support subtyping.) The variance annotation +\lstinline!Counted[+A]! merely tells the Scala compiler to activate +the automatic subtyping features for \lstinline!Counted!. Before +doing that, the Scala compiler will check that \lstinline!Counted! +is in fact covariant. It will be a type error if a variance annotation +specified by the programmer does not match the actual covariance or +contravariance of the type constructor: \begin{lstlisting} final case class Counted[-A](n: Int, a: A) // Compile-time error. final case class C[+A](run: A => Unit) // Compile-time error. \end{lstlisting} -This type constructor \lstinline!C! is in fact contravariant; this +The type constructor \lstinline!C! is in fact contravariant; this can be verified by implementing a lawful \lstinline!cmap! function -for it. The Scala compiler will accept a subtype contravariance annotation +for it. The Scala compiler will accept a contravariance annotation on \lstinline!C! if we choose to write it: \begin{lstlisting} final case class C[-A](run: A => Unit) // OK @@ -1991,9 +1990,9 @@ \subsection{Examples: implementing functors and contrafunctors\index{examples (w \subsubsection{Example \label{subsec:f-Example-functors}\ref{subsec:f-Example-functors}} Consider this implementation of \lstinline!map! for the type constructor -\lstinline!Option[_]!: +\lstinline!Option!: \begin{lstlisting} -def map[A, B](oa: Option[A])(f: A => B): Option[B] = oa match { +def map[A, B](p: Option[A])(f: A => B): Option[B] = p match { case None => None case Some(x: Int) => Some(f((x + 1).asInstanceOf[A])) case Some(x) => Some(f(x)) @@ -2001,21 +2000,21 @@ \subsubsection{Example \label{subsec:f-Example-functors}\ref{subsec:f-Example-fu \end{lstlisting} This code performs a non-standard computation if the type parameter \lstinline!A! is set to \lstinline!Int!. Show that this implementation -of \lstinline!map! violates the functor laws. +of \lstinline!map! does not obey the functor laws. \subparagraph{Solution} If the type parameter \lstinline!A! is not \lstinline!Int!, or if -the argument \lstinline!oa! is \lstinline!None!, the given code -is the same as the standard (correct) implementation of \lstinline!map! +the argument \lstinline!p! is \lstinline!None!, the given code is +the same as the standard (correct) implementation of \lstinline!map! for \lstinline!Option!. The function does something non-standard -when, e.g., \lstinline!oa == Some(123)!. Substitute this value of -\lstinline!oa! into the identity law, \lstinline!map(oa)(identity) == oa!, -and compute symbolically (using Scala syntax): +when, e.g., \lstinline!p == Some(123)!. Substitute this \lstinline!p! +into the identity law, \lstinline!map(p)(identity) == p!, and compute +symbolically (using Scala syntax): \begin{lstlisting} -map(oa)(identity) == Some(identity((123+1).asInstanceOf[Int])) == Some(124) != oa +map(p)(identity) == Some(identity((123+1).asInstanceOf[Int])) == Some(124) != p \end{lstlisting} -This shows a violation of the functor identity law. +This shows a violation of the functor identity law: \lstinline*Some(124) != Some(123)*. \subsubsection{Example \label{subsec:f-Example-functors-1}\ref{subsec:f-Example-functors-1}} @@ -2125,11 +2124,11 @@ \subsubsection{Example \label{subsec:f-Example-functors-1}\ref{subsec:f-Example- type Q[X, Y, A] = (X => Y => A, A) type Data[A] = Either[Q[String, Int, A], Q[Boolean, Double, A]] \end{lstlisting} -For clarity, we implement \lstinline!fmap! separately for $Q^{\bullet}$ -and $\text{Data}^{\bullet}$. +For clarity, we implement \lstinline!fmap! separately for $Q$ and +$\text{Data}$. -To derive the code of \lstinline!fmap! for $Q^{\bullet}$, we begin -with the type signature: +To derive the code of \lstinline!fmap! for $Q$, we begin with the +type signature: \[ \text{fmap}_{Q}^{A,B}:\left(A\rightarrow B\right)\rightarrow\left(X\rightarrow Y\rightarrow A\right)\times A\rightarrow\left(X\rightarrow Y\rightarrow B\right)\times B\quad, \] @@ -2185,37 +2184,37 @@ \subsubsection{Example \label{subsec:f-Example-functors-4}\ref{subsec:f-Example- Decide whether these types are functors or contrafunctors, and implement \lstinline!fmap! or \lstinline!cmap! as appropriate: -\textbf{(a)} $\text{Data}^{A}\triangleq\left(\bbnum 1+A\rightarrow\text{Int}\right)+(A\rightarrow A\times A\rightarrow\text{String})\quad.$ +\textbf{(a)} $\text{Data1}^{A}\triangleq\left(\bbnum 1+A\rightarrow\text{Int}\right)+(A\rightarrow A\times A\rightarrow\text{String})\quad.$ -\textbf{(b)} $\text{Data}^{A,B}\triangleq\left(A+B\right)\times\left(\left(A\rightarrow\text{Int}\right)\rightarrow B\right)\quad.$ +\textbf{(b)} $\text{Data2}^{A,B}\triangleq\left(A+B\right)\times\left(\left(A\rightarrow\text{Int}\right)\rightarrow B\right)\quad.$ \subparagraph{Solution} -\textbf{(a)} The type constructor $\text{Data}^{A}$ is defined in +\textbf{(a)} The type constructor \lstinline!Data1! is defined in Scala as: \begin{lstlisting} -type Data[A] = Either[Option[A] => Int, A => ((A, A)) => String] +type Data1[A] = Either[Option[A] => Int, A => ((A, A)) => String] \end{lstlisting} The type parameter $A$ is always located to the left of function -arrows. So, $\text{Data}^{A}$ \emph{consumes} values of type $A$, -and we expect that $\text{Data}^{A}$ is a contrafunctor. Indeed, +arrows. So, $\text{Data1}^{A}$ \emph{consumes} values of type $A$, +and we expect that \lstinline!Data1! is a contrafunctor. Indeed, we can implement \lstinline!cmap!: \begin{lstlisting} -def cmap[A, B](f: B => A): Data[A] => Data[B] = { +def cmap[A, B](f: B => A): Data1[A] => Data1[B] = { case Left(oa2Int) => Left(b => oa2Int(b.map(f))) case Right(a2aa2Str) => Right( b1 => { case (b2, b3) => a2aa2Str(f(b1))((f(b2), f(b3))) } ) } \end{lstlisting} -\textbf{(b)} The type constructor $\text{Data}^{A,B}$ has two type +\textbf{(b)} The type constructor \lstinline!Data2! has two type parameters, and so we need to answer the question separately for each of them. Write the Scala type definition: \begin{lstlisting} -type Data[A, B] = (Either[A, B], (A => Int) => B) +type Data2[A, B] = (Either[A, B], (A => Int) => B) \end{lstlisting} Begin with the type parameter $A$ and notice that a value of type -$\text{Data}^{A,B}$ possibly contains a value of type $A$ within +$\text{Data2}^{A,B}$ possibly contains a value of type $A$ within \lstinline!Either[A, B]!. In other words, $A$ is \textsf{``}wrapped\textsf{''}, i.e., it is in a covariant position within the first part of the tuple. It remains to check the second part of the tuple, which is a higher-order @@ -2225,7 +2224,7 @@ \subsubsection{Example \label{subsec:f-Example-functors-4}\ref{subsec:f-Example- contravariant in $A$, but it turns out that a \textsf{``}consumer of a consumer of $A$\textsf{''} is \emph{covariant} in $A$. So, we expect to be able to implement \lstinline!fmap! that applies to the type parameter $A$ -of $\text{Data}^{A,B}$. Renaming the type parameter $B$ to $Z$ +of $\text{Data2}^{A,B}$. Renaming the type parameter $B$ to $Z$ for clarity, we write the type signature for \lstinline!fmap! like this: \[ @@ -2262,38 +2261,38 @@ \subsubsection{Example \label{subsec:f-Example-functors-4}\ref{subsec:f-Example- In the resulting Scala code for \lstinline!fmap!, we write out some types for clarity: \begin{lstlisting} -def fmapA[A, Z, C](f: A => C): Data[A, Z] => Data[C, Z] = { +def fmapA[A, Z, C](f: A => C): Data2[A, Z] => Data2[C, Z] = { case (e: Either[A, Z], g: ((A => Int) => Z)) => val newE: Either[C, Z] = e match { case Left(x) => Left(f(x)) case Right(z) => Right(z) } val newG: (C => Int) => Z = { p => g(a => p(f(a))) } - (newE, newG) // This has type Data[C, Z]. + (newE, newG) // This has type Data2[C, Z]. } \end{lstlisting} -This suggests that $\text{Data}^{A,Z}$ is covariant with respect +This suggests that $\text{Data2}^{A,Z}$ is covariant with respect to the type parameter $A$. The results of Section~\ref{sec:f-Laws-and-structure} will show rigorously that the functor laws hold for this implementation of \lstinline!fmap!. The analysis is simpler for the type parameter $B$ because it is only used in covariant positions, never to the left of function arrows. -So, we expect $\text{Data}^{A,B}$ to be a functor with respect to +So, we expect $\text{Data2}^{A,B}$ to be a functor with respect to $B$. Implementing the corresponding \lstinline!fmap! is straightforward: \begin{lstlisting} -def fmapB[Z, B, C](f: B => C): Data[Z, A] => Data[Z, B] = { +def fmapB[Z, B, C](f: B => C): Data2[Z, A] => Data2[Z, B] = { case (e: Either[Z, B], g: ((Z => Int) => B)) => val newE: Either[Z, B] = e match { case Left(x) => Left(f(x)) case Right(z) => Right(z) } val newG: (C => Int) => Z = { p => g(a => p(f(a))) } - (newE, newG) // This has type Data[C, Z]. + (newE, newG) // This has type Data2[C, Z]. } \end{lstlisting} -The code indicates that $\text{Data}^{A,B}$ is a functor with respect +The code indicates that $\text{Data2}^{A,B}$ is a functor with respect to both $A$ and $B$. \subsubsection{Example \label{subsec:f-Example-functors-6}\ref{subsec:f-Example-functors-6}} @@ -2413,8 +2412,8 @@ \subsection{Functor laws in the pipe notation} & \text{map}_{L}(x^{:L^{A}})(\text{id}^{:A\rightarrow A})=x\quad,\\ & \text{map}_{L}(x^{:L^{A}})(f^{:A\rightarrow B}\bef g^{:B\rightarrow C})=\text{map}_{L}\big(\text{map}_{L}(x)(f)\big)(g)\quad. \end{align*} -The laws are easier to read when using \lstinline!map! as a class -method: +But the laws become easier to read when using \lstinline!map! in +Scala\textsf{'}s method syntax: \begin{lstlisting} x.map(identity) == x x.map(f).map(g) == x.map(f andThen g) @@ -2422,7 +2421,7 @@ \subsection{Functor laws in the pipe notation} To take advantage of this syntax, we use the \index{pipe notation}\textbf{pipe notation} with the symbol $\triangleright$ (\textsf{``}pipe\textsf{''}). Then $x\triangleright\text{fmap}(f)$ is the same as \lstinline!fmap(f)(x)! and \lstinline!x.map(f)!. -Then the functor laws become: +The functor laws become: \begin{align*} & x\triangleright\text{fmap}_{L}(\text{id})=x\quad,\\ & x\triangleright\text{fmap}_{L}(f)\triangleright\text{fmap}_{L}(g)=x\triangleright\text{fmap}_{L}(f\bef g)\quad. @@ -2458,11 +2457,11 @@ \subsection{Functor laws in the pipe notation} \[ x\triangleright\left(f\bef g\right)^{\uparrow L}=x\triangleright f^{\uparrow L}\bef g^{\uparrow L}=x\triangleright f^{\uparrow L}\triangleright g^{\uparrow L}\quad. \] -This equation directly represents the Scala code syntax: +This equation corresponds to the Scala code syntax: \begin{lstlisting} x.map(f andThen g) == x.map(f).map(g) \end{lstlisting} -if we make the pipe symbol $\left(\triangleright\right)$ group weaker +because the pipe symbol $\left(\triangleright\right)$ groups weaker than the composition symbol $\left(\bef\right)$. Written in the \emph{backward} notation ($f\circ g$), the functor @@ -2471,7 +2470,7 @@ \subsection{Functor laws in the pipe notation} \left(g\circ f\right)^{\uparrow L}=g^{\uparrow L}\circ f^{\uparrow L}\quad. \] -The analogous notation for a contrafunctor $C^{\bullet}$ is: +The lifting notation for a contrafunctor $C$ is: \[ f^{\downarrow C}\triangleq\text{cmap}_{C}(f)\quad. \] @@ -2484,31 +2483,31 @@ \subsection{Functor laws in the pipe notation} book, keeping in mind that one can straightforwardly and mechanically translate between forward and backward notations: \[ -f\bef g=g\circ f\quad,\quad\quad x\triangleright f=f(x)\quad,\quad\quad x\triangleright f\triangleright g=g(f(x))\quad. +f\bef g=g\circ f\quad,\quad\quad x\triangleright f\bef g=x\triangleright f\triangleright g=g(f(x))=(g\circ f)(x)\quad. \] \subsection{Bifunctors\label{subsec:Bifunctors}} -A type constructor can be a functor with respect to several type parameters. A \textbf{bifunctor}\index{bifunctor} is a type constructor with -\emph{two} type parameters that satisfies the functor laws with respect -to both parameters. +\emph{two} type parameters that has lawful \lstinline!fmap! methods +with respect to both parameters. As an example, consider the type constructor $F$ defined by: \[ F^{A,B}\triangleq A\times B\times B\quad. \] -If we fix the type parameter $B$ but let the parameter $A$ vary, -we get a type constructor that we denote by $F^{\bullet,B}$. We see -that the type constructor $F^{\bullet,B}$ is a functor with the \lstinline!fmap! -function: +If we fix the type parameter $B$ in $F^{A,B}$ but let the parameter +$A$ vary, we get a type constructor that we denote by $F^{\bullet,B}$. +We see that the type constructor $F^{\bullet,B}$ is a functor with +the following \lstinline!fmap! function: \[ \text{fmap}_{F^{\bullet,B}}(f^{:A\rightarrow C})\triangleq a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times b_{1}\times b_{2}\quad. \] -Instead of saying that $F^{\bullet,B}$ is a functor, we can also +Instead of saying that $F^{\bullet,B}$ is a functor, we will also say more verbosely that $F^{A,B}$ is a functor with respect to $A$ -when $B$ is held fixed. +when $B$ is held fixed. For brevity, we will prefer the notation +$F^{\bullet,B}$. If we now fix the type parameter $A$, we find that the type constructor $F^{A,\bullet}$ is a functor with the following \lstinline!fmap! @@ -2517,22 +2516,20 @@ \subsection{Bifunctors\label{subsec:Bifunctors}} \text{fmap}_{F^{A,\bullet}}(g^{:B\rightarrow D})\triangleq a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow a\times g(b_{1})\times g(b_{2})\quad. \] -Since the bifunctor $F^{\bullet,\bullet}$ is a functor with respect -to each type parameter separately, we can transform a value of type -$F^{A,B}$ to a value of type $F^{C,D}$ by applying the two \lstinline!fmap! -functions one after another. It is convenient to denote this transformation +Since the bifunctor $F$ is a functor with respect to each type parameter +separately, we can transform a value of type $F^{A,B}$ to a value +of type $F^{C,D}$ by applying the two \lstinline!fmap! functions +one after another. It is convenient to express that transformation by a single operation called \lstinline!bimap! that uses two functions $f^{:A\rightarrow C}$ and $g^{:B\rightarrow D}$ as arguments: \begin{align} \text{bimap}_{F}(f^{:A\rightarrow C})(g^{:B\rightarrow D}) & :F^{A,B}\rightarrow F^{C,D}\quad,\nonumber \\ \text{bimap}_{F}(f^{:A\rightarrow C})(g^{:B\rightarrow D}) & \triangleq\text{fmap}_{F^{\bullet,B}}(f^{:A\rightarrow C})\bef\text{fmap}_{F^{C,\bullet}}(g^{:B\rightarrow D})\quad.\label{eq:f-definition-of-bimap} \end{align} -In the condensed notation, this is written as: +In the lifting notation, this is written as: \[ -\text{bimap}_{F}(f^{:A\rightarrow C})(g^{:B\rightarrow D})\triangleq f^{\uparrow F^{\bullet,B}}\bef g^{\uparrow F^{C,\bullet}}\quad, +\text{bimap}_{F}(f^{:A\rightarrow C})(g^{:B\rightarrow D})\triangleq f^{\uparrow F^{\bullet,B}}\bef g^{\uparrow F^{C,\bullet}}\quad. \] -although in this case the longer notation in Eq.~(\ref{eq:f-definition-of-bimap}) -may be easier to reason about. What if we apply the two \lstinline!fmap! functions in the opposite order? Since these functions work with different type parameters, @@ -2558,18 +2555,20 @@ \subsection{Bifunctors\label{subsec:Bifunctors}} \begin{align*} & \quad{\color{greenunder}\quad\text{left-hand side}:}\quad\\ & \text{fmap}_{F^{\bullet,B}}(f^{:A\rightarrow C})\bef\text{fmap}_{F^{C,\bullet}}(g^{:B\rightarrow D})\\ - & \quad{\color{greenunder}\quad\text{definitions of }\text{fmap}_{F^{\bullet,\bullet}}:}\quad\\ + & \quad{\color{greenunder}\quad\text{definitions of }\text{fmap}_{F^{\bullet,B}}\text{ and }\text{fmap}_{F^{C,\bullet}}:}\quad\\ & \quad=(a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times b_{1}\times b_{2})\bef(c^{:C}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow c\times g(b_{1})\times g(b_{2}))\\ & \quad{\color{greenunder}\quad\text{compute composition}:}\quad\\ - & \quad=a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times g(b_{1})\times g(b_{2})\quad,\\ + & \quad=a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times g(b_{1})\times g(b_{2})\quad. +\end{align*} +\begin{align*} & \quad{\color{greenunder}\quad\text{right-hand side}:}\quad\\ & \text{fmap}_{F^{A,\bullet}}(g^{:B\rightarrow D})\bef\text{fmap}_{F^{\bullet,D}}(f^{:A\rightarrow C})\\ - & \quad{\color{greenunder}\quad\text{definitions of }\text{fmap}_{F^{\bullet,\bullet}}:}\quad\\ + & \quad{\color{greenunder}\quad\text{definitions of }\text{fmap}_{F^{A,\bullet}}\text{ and }\text{fmap}_{F^{\bullet,D}}:}\quad\\ & \quad=(a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow a\times g(b_{1})\times g(b_{2}))\bef(a^{:A}\times d_{1}^{:D}\times d_{2}^{:D}\rightarrow f(a)\times d_{1}\times d_{2})\\ & \quad{\color{greenunder}\quad\text{compute composition}:}\quad\\ & \quad=a^{:A}\times b_{1}^{:B}\times b_{2}^{:B}\rightarrow f(a)\times g(b_{1})\times g(b_{2})\quad. \end{align*} -Both sides of the law are equal. +Both sides of the law are now equal. The commutativity law~(\ref{eq:f-fmap-fmap-bifunctor-commutativity}) leads to the composition law of \lstinline!bimap!: @@ -2579,12 +2578,11 @@ \subsection{Bifunctors\label{subsec:Bifunctors}} The following type diagram shows the relationships between various \lstinline!bimap! and \lstinline!fmap! functions: \[ -\xymatrix{\xyScaleY{3.0pc}\xyScaleX{12.0pc}F^{A,B}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{1})(g_{1})}\ar[r]\sp(0.4){\text{fmap}_{F^{\bullet,B}}(f_{1}^{:A\rightarrow C})}\ar[d]\sp(0.5){\text{fmap}_{F^{A,\bullet}}(g_{1}^{:B\rightarrow D})} & F^{C,B}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{2})(g_{1})}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,B}}(f_{2}^{:C\rightarrow E})}\ar[d]\sp(0.5){\text{fmap}_{F^{C,\bullet}}(g_{1}^{:B\rightarrow D})~~~} & F^{E,B}\ar[d]\sp(0.5){\text{fmap}_{F^{E,\bullet}}(g_{1}^{:B\rightarrow D})}\\ -F^{A,D}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{1})(g_{2})}\ar[r]\sp(0.4){\text{fmap}_{F^{\bullet,D}}(f_{1}^{:A\rightarrow C})}\ar[d]\sp(0.5){\text{fmap}_{F^{A,\bullet}}(g_{2}^{:D\rightarrow G})} & F^{C,D}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{2})(g_{2})}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,D}}(f_{2}^{:C\rightarrow E})}\ar[d]\sp(0.5){\text{fmap}_{F^{C,\bullet}}(g_{2}^{:D\rightarrow G})~~~} & F^{E,D}\ar[d]\sp(0.5){\text{fmap}_{F^{E,\bullet}}(g_{2}^{:D\rightarrow G})}\\ -F^{A,G}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,G}}(f_{1}^{:A\rightarrow C})} & F^{C,G}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,G}}(f_{2}^{:C\rightarrow E})} & F^{E,G} +\xymatrix{\xyScaleY{3.0pc}\xyScaleX{11.0pc}F^{A,B}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{1})(g_{1})}\ar[r]\sp(0.4){\text{fmap}_{F^{\bullet,B}}(f_{1}^{:A\rightarrow C})}\ar[d]\sp(0.5){\text{fmap}_{F^{A,\bullet}}(g_{1})} & F^{C,B}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{2})(g_{1})}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,B}}(f_{2}^{:C\rightarrow E})}\ar[d]\sp(0.5){\text{fmap}_{F^{C,\bullet}}(g_{1})~~~} & F^{E,B}\ar[d]\sp(0.5){\text{fmap}_{F^{E,\bullet}}(g_{1}^{:B\rightarrow D})}\\ +F^{A,D}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{1})(g_{2})}\ar[r]\sp(0.4){\text{fmap}_{F^{\bullet,D}}(f_{1})}\ar[d]\sp(0.5){\text{fmap}_{F^{A,\bullet}}(g_{2})} & F^{C,D}\ar[rd]\sp(0.6){~~~\text{bimap}_{F}(f_{2})(g_{2})}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,D}}(f_{2})}\ar[d]\sp(0.5){\text{fmap}_{F^{C,\bullet}}(g_{2})~~~} & F^{E,D}\ar[d]\sp(0.5){\text{fmap}_{F^{E,\bullet}}(g_{2}^{:D\rightarrow G})}\\ +F^{A,G}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,G}}(f_{1})} & F^{C,G}\ar[r]\sp(0.4){~\text{fmap}_{F^{\bullet,G}}(f_{2})} & F^{E,G} } \] - To derive the composition law from Eq.~(\ref{eq:f-fmap-fmap-bifunctor-commutativity}), write: \begin{align*} @@ -2623,7 +2621,7 @@ \subsection{Bifunctors\label{subsec:Bifunctors}} If $F^{A,B}$ is known to be a functor separately with respect to $A$ and $B$, will the commutativity law~(\ref{eq:f-fmap-fmap-bifunctor-commutativity}) -always hold? The calculation for the example $F^{A,B}\triangleq A\times B\times B$ +always hold? The calculation for $F^{A,B}\triangleq A\times B\times B$ shows that the two \lstinline!fmap! functions commute because they work on different parts of the data structure $F^{A,B}$. This turns out\footnote{A proof is given in Section~\ref{sec:Commutativity-laws-for-type-constructors} @@ -2661,7 +2659,7 @@ \subsection{Constructions of functors\label{subsec:f-Functor-constructions}} \hline {\footnotesize{}disjunctive type} & {\footnotesize{}$L^{A}\triangleq P^{A}+Q^{A}$} & {\footnotesize{}$P$ and $Q$ are functors}\tabularnewline \hline -{\footnotesize{}function type} & {\footnotesize{}$L^{A}\triangleq C^{A}\rightarrow P^{A}$} & {\footnotesize{}$P$ is a functor and $C$ a contrafunctor}\tabularnewline +{\footnotesize{}function type} & {\footnotesize{}$L^{A}\triangleq C^{A}\rightarrow P^{A}$} & {\footnotesize{}$P$ is a functor and $C$ is a contrafunctor}\tabularnewline \hline {\footnotesize{}type parameter} & {\footnotesize{}$L^{A}\triangleq Z$} & {\footnotesize{}$Z$ is a fixed type}\tabularnewline \hline @@ -2671,7 +2669,7 @@ \subsection{Constructions of functors\label{subsec:f-Functor-constructions}} \hline \end{tabular} \par\end{centering} -\caption{Type constructions defining a functor $L^{A}$.\label{tab:f-Functor-constructions}} +\caption{Type constructions defining a functor $L$.\label{tab:f-Functor-constructions}} \end{table} In each of these constructions, the \lstinline!fmap! function for @@ -2696,7 +2694,7 @@ \subsubsection{Statement \label{subsec:f-Statement-identity-functor}\ref{subsec: The identity function is the only fully parametric implementation of the type signature $\left(A\rightarrow B\right)\rightarrow A\rightarrow B$. Since the code of \lstinline!fmap! is the identity function, the -laws are satisfied automatically: +laws are satisfied: \begin{align*} {\color{greenunder}\text{identity law}:}\quad & \text{fmap}_{\text{Id}}(\text{id})=\text{id}(\text{id})=\text{id}\quad,\\ {\color{greenunder}\text{composition law}:}\quad & \text{\ensuremath{\text{fmap}_{\text{Id}}}}(f\bef g)=f\bef g=\text{fmap}_{\text{Id}}(f)\bef\text{fmap}_{\text{Id}}(g)\quad. @@ -2713,8 +2711,8 @@ \subsubsection{Statement \label{subsec:f-Statement-constant-functor}\ref{subsec: The \lstinline!fmap! function is defined by: \begin{align*} -\text{fmap}_{\text{Const}} & :\left(A\rightarrow B\right)\rightarrow\text{Const}^{Z,A}\rightarrow\text{Const}^{Z,B}\cong\left(A\rightarrow B\right)\rightarrow Z\rightarrow Z\quad,\\ -\text{fmap}_{\text{Const}}(f^{:A\rightarrow B}) & \triangleq(z^{:Z}\rightarrow z)=\text{id}^{:Z\rightarrow Z}\quad. + & \text{fmap}_{\text{Const}}:\left(A\rightarrow B\right)\rightarrow\text{Const}^{Z,A}\rightarrow\text{Const}^{Z,B}\cong\left(A\rightarrow B\right)\rightarrow Z\rightarrow Z\quad,\\ + & \text{fmap}_{\text{Const}}(f^{:A\rightarrow B})\triangleq(z^{:Z}\rightarrow z)=\text{id}^{:Z\rightarrow Z}\quad. \end{align*} It is a constant function that ignores $f$ and returns the identity $\text{id}^{:Z\rightarrow Z}$. The laws are satisfied: @@ -2728,10 +2726,9 @@ \subsubsection{Statement \label{subsec:f-Statement-constant-functor}\ref{subsec: def fmap[A, B](f: A => B): Const[Z, A] => Const[Z, B] = identity[Z] \end{lstlisting} -The identity functor $\text{Id}^{\bullet}$ and the constant functor -$\text{Const}^{Z,\bullet}$ are not often used: their \lstinline!fmap! -implementations are identity functions, and so they rarely provide -useful functionality. +The identity functor and the constant functor are not often used: +as their \lstinline!fmap! methods are identity functions, they rarely +provide useful functionality. We have seen that type constructors with product types, such as $L^{A}\triangleq A\times A\times A$, are functors. The next construction (the \index{functor product}\textbf{functor @@ -2739,7 +2736,7 @@ \subsubsection{Statement \label{subsec:f-Statement-constant-functor}\ref{subsec: \subsubsection{Statement \label{subsec:functor-Statement-functor-product}\ref{subsec:functor-Statement-functor-product}} -If $L^{\bullet}$ and $M^{\bullet}$ are two functors then the product\index{functor product} +If $L$ and $M$ are two functors then the product\index{functor product} $P^{A}\triangleq L^{A}\times M^{A}$ is also a functor. \subparagraph{Proof} @@ -2788,14 +2785,15 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-product}\ref{su Apply both sides of this equation to an arbitrary value of type $L^{A}\times M^{A}$: \begin{align*} {\color{greenunder}\text{expect to equal }(l\times m)\triangleright(f\bef g)^{\uparrow P}:}\quad & (l^{:L^{A}}\times m^{:M^{A}})\triangleright f^{\uparrow P}\gunderline{\,\bef\,}g^{\uparrow P}\\ -{\color{greenunder}\triangleright\text{ notation}:}\quad & =\gunderline (l^{:L^{A}}\times m^{:M^{A}}\gunderline{)\triangleright f^{\uparrow P}}\triangleright g^{\uparrow P}\\ +{\color{greenunder}\triangleright\text{-notation}:}\quad & =\gunderline (l^{:L^{A}}\times m^{:M^{A}}\gunderline{)\triangleright f^{\uparrow P}}\triangleright g^{\uparrow P}\\ {\color{greenunder}\text{use Eq.~(\ref{eq:f-def-of-functor-product-lift})}:}\quad & =\gunderline{\big(}(l\triangleright f^{\uparrow L})\times(m\triangleright f^{\uparrow M})\gunderline{\big)\triangleright g^{\uparrow P}}\\ {\color{greenunder}\text{use Eq.~(\ref{eq:f-def-of-functor-product-lift})}:}\quad & =(l\triangleright f^{\uparrow L}\gunderline{\,\triangleright\,}g^{\uparrow L})\times(m\triangleright f^{\uparrow M}\gunderline{\,\triangleright\,}g^{\uparrow M})\\ -{\color{greenunder}\triangleright\text{ notation}:}\quad & =(l\triangleright\gunderline{f^{\uparrow L}\bef g^{\uparrow L}})\times(m\triangleright\gunderline{f^{\uparrow M}\bef g^{\uparrow M}})\\ +{\color{greenunder}\triangleright\text{-notation}:}\quad & =(l\triangleright\gunderline{f^{\uparrow L}\bef g^{\uparrow L}})\times(m\triangleright\gunderline{f^{\uparrow M}\bef g^{\uparrow M}})\\ {\color{greenunder}\text{composition laws of }L\text{ and }M:}\quad & =(l\triangleright(f\bef g)^{\uparrow L})\times(m\triangleright(f\bef g)^{\uparrow M})\\ {\color{greenunder}\text{use Eq.~(\ref{eq:f-def-of-functor-product-lift})}:}\quad & =(l\times m)\triangleright(f\bef g)^{\uparrow P}\quad. \end{align*} -The calculations are shorter if we use the pair product operation: +The calculations are shorter if we use the pair product operation +($\boxtimes$): \begin{align*} {\color{greenunder}\text{expect to equal }(f\bef g)^{\uparrow P}:}\quad & f^{\uparrow P}\bef g^{\uparrow P}=(f^{\uparrow L}\boxtimes f^{\uparrow M})\bef(g^{\uparrow L}\boxtimes g^{\uparrow M})\\ {\color{greenunder}\text{composition of functions under }\boxtimes:}\quad & =(\gunderline{f^{\uparrow L}\bef g^{\uparrow L}})\boxtimes(\gunderline{f^{\uparrow M}\bef g^{\uparrow M}})\\ @@ -2822,28 +2820,27 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-product}\ref{su \[ m\triangleright f^{\uparrow M}\triangleright g^{\uparrow M}=m\triangleright f^{\uparrow M}\bef g^{\uparrow M}=m\triangleright(f\bef g)^{\uparrow M}\quad. \] -By the convention of the pipe notation, it groups to the left, so -we have: +By convention, the pipe symbol ($\triangleright$) groups to the left, +and so we can write: \[ \left(x\triangleright f\right)\triangleright g=x\triangleright f\triangleright g=x\triangleright f\bef g=x\triangleright(f\bef g)=(f\bef g)(x)=g(f(x))\quad. \] -We will often use this notation in derivations. (Chapter~\ref{chap:Reasoning-about-code} -gives an overview of the derivation techniques, including some more -details about the pipe notation.) +We will often use the pipe notation in derivations. (Chapter~\ref{chap:Reasoning-about-code} +gives an overview of the derivation techniques.) \subsubsection{Statement \label{subsec:functor-Statement-functor-coproduct}\ref{subsec:functor-Statement-functor-coproduct}} -If $P^{A}$ and $Q^{A}$ are functors then $L^{A}\triangleq P^{A}+Q^{A}$ -is a functor, with \lstinline!fmap! defined by: +If $P$ and $Q$ are functors then the type constructor $L$ defined +by $L^{A}\triangleq P^{A}+Q^{A}$ is a functor whose \lstinline!fmap! +method is given by this code: \begin{lstlisting} def fmap[A, B](f: A => B): Either[P[A], Q[A]] => Either[P[B], Q[B]] = { case Left(pa) => Left(fmap_P(f)(pa)) // Use fmap for P. case Right(qa) => Right(fmap_Q(f)(qa)) // Use fmap for Q. } \end{lstlisting} -The functor $L^{\bullet}$ is the \textbf{functor co-product}\index{functor co-product} -of $P^{\bullet}$ and $Q^{\bullet}$. The code notation for the \lstinline!fmap! -function is: +The functor $L$ is the \textbf{functor co-product}\index{functor co-product} +of $P$ and $Q$. The code notation for \lstinline!fmap! is: \[ \text{fmap}_{L}(f^{:A\rightarrow B})=f^{\uparrow L}\triangleq\,\begin{array}{|c||cc|} & P^{B} & Q^{B}\\ @@ -2904,11 +2901,11 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-coproduct}\ref{ $\times$) makes this analogy visually clear. Implementing \lstinline!fmap! for a polynomial functor is straightforward: -\lstinline!fmap! replaces each occurrence of the a value of type -$A$ by the corresponding value of type $B$, leaving constant types -unchanged and keeping the order of parts in all products and disjunctive -types. Previously, our implementations of \lstinline!fmap! for various -type constructors (such as shown in Example~\ref{subsec:f-Example-functors-1}) +\lstinline!fmap! replaces each occurrence of a value of type $A$ +by the corresponding value of type $B$, leaving constant types unchanged +and keeping the order of parts in all products and disjunctive types. +Previously, our implementations of \lstinline!fmap! for various type +constructors (such as shown in Example~\ref{subsec:f-Example-functors-1}) were guided by the idea of preserving information. Statements~\ref{subsec:functor-Statement-functor-product}\textendash \ref{subsec:functor-Statement-functor-coproduct} explain why those implementations of the \lstinline!fmap! are correct (i.e., obey the functor laws). @@ -2964,8 +2961,8 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-exponential}\re \end{align*} $\square$ -It is important for this proof that the order of function compositions -is reversed when lifting to a contrafunctor $C$: $(f\bef g)^{\downarrow C}=g^{\downarrow C}\bef f^{\downarrow C}$. +This proof uses the fact that the order of function compositions is +reversed when lifting to a contrafunctor $C$ as in $(f\bef g)^{\downarrow C}=g^{\downarrow C}\bef f^{\downarrow C}$. If $C$ were a functor, the proof would not work because we would have obtained $f^{\uparrow C}\bef g^{\uparrow C}$ instead of $g^{\downarrow C}\bef f^{\downarrow C}$. The order of composition cannot be permuted for arbitrary functions @@ -2974,34 +2971,38 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-exponential}\re Examples of functors obtained via the exponential\index{functor exponential} construction are $L^{A}\triangleq Z\rightarrow A$ (with the contrafunctor -$C^{A}$ chosen as the constant contrafunctor $Z$, where $Z$ is -a fixed type) and $L^{A}\triangleq\left(A\rightarrow Z\right)\rightarrow A$ +$C$ chosen as the constant contrafunctor, $C^{A}\triangleq Z$, where +$Z$ is a fixed type) and $L^{A}\triangleq\left(A\rightarrow Z\right)\rightarrow A$ (with the contrafunctor $C^{A}\triangleq A\rightarrow Z$). Statement~\ref{subsec:functor-Statement-functor-exponential} -generalizes those examples to arbitrary contrafunctors $C^{A}$ used -as arguments of function types. +generalizes those examples to arbitrary contrafunctors $C$. Similarly, one can prove that $P^{A}\rightarrow C^{A}$ is a contrafunctor -(Exercise~\ref{subsec:functor-Exercise-functor-laws}). Together -with Statements~\ref{subsec:functor-Statement-functor-product}\textendash \ref{subsec:functor-Statement-functor-exponential}, -this gives us the rules of reasoning about covariance and contravariance +(Exercise~\ref{subsec:functor-Exercise-functor-laws}). + +Together with Statements~\ref{subsec:functor-Statement-functor-product}\textendash \ref{subsec:functor-Statement-functor-exponential}, +this gives us rules of reasoning about covariance and contravariance of type parameters in arbitrary type expressions. Every function arrow ($\rightarrow$) flips the variance from covariant to contravariant and back. For instance, the identity functor $L^{A}\triangleq A$ is covariant in $A$, while $A\rightarrow Z$ is contravariant in -$A$, and $\left(A\rightarrow Z\right)\rightarrow Z$ is again covariant -in $A$. As we have seen, $A\rightarrow A\rightarrow Z$ is contravariant -in $A$, so any number of curried arrows count as one in this reasoning -(and, in any case, $A\rightarrow A\rightarrow Z\cong A\times A\rightarrow Z$). -Products and disjunctions do not change variance, so $\left(A\rightarrow Z_{1}\right)\times\left(A\rightarrow Z_{2}\right)+\left(A\rightarrow Z_{3}\right)$ -is contravariant in $A$. This is shown in more detail in Section~\ref{subsec:Solved-examples:-How-to-recognize-functors}. +$A$. But $\left(A\rightarrow Z\right)\rightarrow Z$ is again covariant +in $A$. -The remaining constructions set a type parameter to another type. -The \textbf{functor composition}\index{functor composition} $P^{Q^{A}}$, -written in Scala as \lstinline!P[Q[A]]!, is analogous to a function -composition such as $f(g(x))$ except for using type constructors. -Viewed in this way, type constructors are \textbf{type-level functions}\index{type-level function} -(i.e., mappings of types). So, functor composition may be denoted -by $P\circ Q$, like the function composition $f\circ g$. +As we have seen, $A\rightarrow A\rightarrow Z$ is contravariant in +$A$, so any number of curried arrows count as one in this reasoning +(and, in any case, $A\rightarrow A\rightarrow Z\cong A\times A\rightarrow Z$). +Products and co-products preserve variance; for example, $\left(A\rightarrow Z_{1}\right)\times\left(A\rightarrow Z_{2}\right)+\left(A\rightarrow Z_{3}\right)$ +is contravariant in $A$. More examples illustrating these techniques +are given in Section~\ref{subsec:Solved-examples:-How-to-recognize-functors} +below. + +The remaining construction is the \textbf{functor composition}\index{functor composition} +$P^{Q^{A}}$, written in Scala as \lstinline!P[Q[A]]!. This is analogous +to a function composition such as $f(g(x))$ except for using type +constructors. Viewed in this way, type constructors are \textbf{type-level +functions}\index{type-level function} (i.e., mappings from types +to types). So, we will denote functor composition by $P\circ Q$, +similarly to the function composition $f\circ g$. An example of functor composition in Scala is \lstinline!List[Option[A]]!. Since both \lstinline!List! and \lstinline!Option! have a \lstinline!map! @@ -3015,7 +3016,7 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-exponential}\re The code \lstinline!p.map(_.map(f))! lifts an $f^{:A\rightarrow B}$ into a function of type \lstinline!List[Option[A]] => List[Option[B]]!. In this way, we may perform the \lstinline!map! operation on the -nested data type \lstinline!List[Option[_]]!. +nested data type \lstinline!List[Option[A]]!. The next statement shows that this code always produces a lawful \lstinline!map! function. In other words, the composition\index{functor composition} @@ -3023,15 +3024,15 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-exponential}\re \subsubsection{Statement \label{subsec:functor-Statement-functor-composition-1}\ref{subsec:functor-Statement-functor-composition-1}} -If $P^{A}$ and $Q^{A}$ are functors then $L^{A}\triangleq P^{Q^{A}}$ -is also a functor, with \lstinline!fmap! defined by: +If $P$ and $Q$ are functors then $L^{A}\triangleq P^{Q^{A}}$ is +also a functor, with \lstinline!fmap! defined by: \begin{lstlisting} def fmap_L[A, B](f: A => B): P[Q[A]] => P[Q[B]] = fmap_P(fmap_Q(f)) \end{lstlisting} Here we assumed that the functions $\text{fmap}_{P}$ and $\text{fmap}_{Q}$ are known and satisfy the functor laws. -In the code notation, $\text{fmap}_{L}$ is written equivalently as: +In the code notation, $\text{fmap}_{L}$ is written as: \begin{align} {\color{greenunder}\text{type signature}:}\quad & \text{fmap}_{L}:f^{:A\rightarrow B}\rightarrow P^{Q^{A}}\rightarrow P^{Q^{B}}\quad,\nonumber \\ {\color{greenunder}\text{implementation}:}\quad & \text{fmap}_{L}(f)\triangleq\text{fmap}_{P}(\text{fmap}_{Q}(f))\quad,\nonumber \\ @@ -3052,6 +3053,7 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-composition-1}\ \[ (f\bef g)^{\uparrow L}=\big((\gunderline{f\bef g})^{\uparrow Q}\big)^{\uparrow P}=(\gunderline{f^{\uparrow Q}\bef g^{\uparrow Q}}\big)^{\uparrow P}=f^{\uparrow Q\uparrow P}\bef g^{\uparrow Q\uparrow P}\quad. \] +$\square$ Finally, we consider recursive data types such as lists and trees (Section~\ref{sec:Lists-and-trees:recursive-disjunctive-types}). @@ -3072,20 +3074,20 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-composition-1}\ final case class Leaf[A](a: A) extends Tree2[A] final case class Branch[A](x: Tree2[A], y: Tree2[A]) extends Tree2[A] \end{lstlisting} -is defined by $\text{Tree}_{2}^{A}\triangleq A+\text{Tree}_{2}^{A}\times\text{Tree}_{2}^{A}$. +is denoted by $\text{Tree}_{2}^{A}\triangleq A+\text{Tree}_{2}^{A}\times\text{Tree}_{2}^{A}$. Such definitions of recursive types look like \textsf{``}type equations\textsf{''}. We can generalize these examples to a recursive definition: \begin{equation} L^{A}\triangleq S^{A,L^{A}}\quad,\label{eq:f-def-recursive-functor} \end{equation} where $S^{A,R}$ is a suitably chosen type constructor (called a \index{recursion scheme|textit}\textbf{recursion -scheme}) with two type parameters $A,R$. If a recursion scheme $S^{\bullet,\bullet}$ -is given, the Scala code defining $L^{\bullet}$ can be written as: +scheme}) with two type parameters $A,R$. If a recursion scheme $S$ +is given, the Scala code defining $L$ can be written as: \begin{lstlisting} -type S[A, R] = ... // Must be defined previously as type alias, class, or trait. +type S[A, R] = ... // Must be defined as type alias, class, or trait. final case class L[A](x: S[A, L[A]]) // Define a recursive type L via a recursion scheme S. \end{lstlisting} -We must use a \emph{class} to define \lstinline!L! because Scala +We must use a \emph{class} to define \lstinline!L[A]! because Scala does not support recursive type aliases: \begin{lstlisting} scala> type L[A] = Either[A, L[A]] @@ -3097,10 +3099,10 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-composition-1}\ Table~\ref{tab:Examples-of-recursive-disjunctive-type-equations} summarizes our previous examples of recursive disjunctive types and -shows the relevant choices of $S^{A,R}$, which turns out to be always -a bifunctor. For abstract syntax trees, the functors $P^{\bullet}$ -and $Q^{\bullet}$ must be given; they specify the available shapes -of leaves and branches respectively. +shows the relevant choices of $S$, which turns out to be always a +bifunctor. For abstract syntax trees, the functors $P$ and $Q$ must +be given; they specify the available shapes of leaves and branches +respectively. \begin{table} \begin{centering} @@ -3129,17 +3131,15 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-composition-1}\ \end{table} We will now prove that Eq.~(\ref{eq:f-def-recursive-functor}) always -defines a functor when $S^{\bullet,\bullet}$ is a bifunctor. The -proof is by induction with an inductive assumption that the laws already -hold for all recursive calls of \lstinline!fmap!. This generalizes -the technique used in the proof for the \lstinline!List! functor -(Example~\ref{subsec:Example-rec-poly-functor-List}). +defines a functor when $S$ is a bifunctor. The proof is by induction +with an inductive assumption that the laws already hold for all recursive +calls of \lstinline!fmap!. This generalizes the technique used in +the proof for the \lstinline!List! functor (Example~\ref{subsec:Example-rec-poly-functor-List}). \subsubsection{Statement \label{subsec:functor-Statement-functor-recursive}\ref{subsec:functor-Statement-functor-recursive}} -If $S^{A,B}$ is a bifunctor (a functor with respect to both type -parameters $A$ and $B$) then the recursively defined type constructor -$L^{A}$ is a lawful functor: +If $S$ is a bifunctor then the recursively defined type constructor +$L$ is a lawful functor: \[ L^{A}\triangleq S^{A,L^{A}}\quad. \] @@ -3151,7 +3151,7 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-recursive}\ref{ The corresponding Scala code (assuming that $S$ and $\text{bimap}_{S}$ are already defined): \begin{lstlisting} -final case class L[A](x: S[A, L[A]]) // Must define S[_, _] previously. +final case class L[A](x: S[A, L[A]]) // Must have defined S[_, _] before. def bimap_S[A, B, C, D](f: A => C)(g: B => D): S[A, B] => S[C, D] = ??? // Must define it here. @@ -3165,19 +3165,19 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-recursive}\ref{ \subparagraph{Proof} Usually, laws for a \index{recursive function!proving laws for}recursive -function (such as $\text{fmap}_{L}$) must be proved by induction. -In the recursive implementation of $\text{fmap}_{L}$, its code calls -itself in some cases but returns without recursive calls in other -cases. So, the base case of induction corresponds to the non-recursive -evaluations in the code of $\text{fmap}_{L}$, and we need to prove -that the law is then satisfied. The inductive step must prove that -the code of $\text{fmap}_{L}$ obeys the law under the inductive assumption -that all recursive calls to $\text{fmap}_{L}$ already obey that law. -In the proof, we do not need to separate the base case from the inductive +function (such as $\text{fmap}_{L}$) are proved by induction. The +code of a recursive implementation of $\text{fmap}_{L}$ calls itself +in some cases but returns without recursive calls in other cases. +So, the base case of induction corresponds to the non-recursive evaluations +in the code of $\text{fmap}_{L}$, and we need to prove that the law +is then satisfied. The inductive step must prove that the code of +$\text{fmap}_{L}$ obeys the law under the inductive assumption that +all recursive calls to $\text{fmap}_{L}$ already obey that law. In +the proof, we do not need to separate the base case from the inductive step; we just derive the law using the inductive assumption whenever needed. -For clarity, we add an overline to recursive calls in the code formula: +For clarity, we add an overline to recursive calls in the code formulas: \[ \text{fmap}_{L}(f)\triangleq\text{bimap}_{S}(f)(\overline{\text{fmap}_{L}}(f))\quad. \] @@ -3198,22 +3198,22 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-recursive}\ref{ {\color{greenunder}\text{definition of }\text{fmap}_{L}:}\quad & =\text{fmap}_{L}(f\bef g)\quad. \end{align*} -For the perfect-shaped binary tree, the construction~(\ref{eq:f-def-recursive-functor}) +For a perfect-shaped binary tree, the construction~(\ref{eq:f-def-recursive-functor}) is insufficient: no bifunctor $S^{A,L^{A}}$ can replace the type -argument $A$ in $L^{A}$ to obtain $L^{A\times A}$. To see that, -consider that $S^{A,L^{A}}$ is an application of a type-level function -$S^{\bullet,\bullet}$ to its two type parameters, which are set to -$A$ and $L^{A}$. In Scala syntax, $S^{A,L^{A}}$ is written as \lstinline!S[A,L[A]]!. -No matter how we define the type constructor $S$, the resulting type -expression \lstinline!S[A,L[A]]! will always use the type constructor -\lstinline!L! as \lstinline!L[A]! and not as \lstinline!L[(A,A)]!. - -To describe perfect-shaped trees, we need to modify the construction -by adding another arbitrary functor, $P^{\bullet}$, in the type argument -of $L^{\bullet}$: +argument $A$ in $L^{A}$ to obtain $L^{A\times A}$. Indeed, $S^{A,L^{A}}$ +is an application of a type-level function $S$ to its two type parameters, +which are set to $A$ and $L^{A}$. In Scala syntax, $S^{A,L^{A}}$ +is written as \lstinline!S[A, L[A]]!. No matter how we define the +type constructor $S$, the resulting type expression \lstinline!S[A, L[A]]! +will always use the type constructor \lstinline!L! as \lstinline!L[A]! +and not as \lstinline!L[(A, A)]!. + +To describe perfect-shaped trees, we need to modify Eq.~(\ref{eq:f-def-recursive-functor}) +by introducing another arbitrary functor ($P$) like this: \begin{equation} L^{A}\triangleq S^{A,L^{P^{A}}}\quad.\label{eq:f-def-recursive-functor-2} \end{equation} +The Scala syntax for $S^{A,L^{P^{A}}}$ is \lstinline!S[A, L[P[A]]]!. Perfect-shaped binary trees are defined by Eq.~(\ref{eq:f-def-recursive-functor-2}) with $S^{A,R}\triangleq A+R$ and $P^{A}\triangleq A\times A$. The Scala code for these definitions is: @@ -3223,8 +3223,8 @@ \subsubsection{Statement \label{subsec:functor-Statement-functor-recursive}\ref{ final case class L[A](s: S[A, L[P[A]]]) // Equivalently: case class L[A](s: Either[A, L[(A, A)]]) \end{lstlisting} -Different choices of $P^{\bullet}$ will define perfect-shaped trees -with different kinds of branching. +Different choices of $P$ will define perfect-shaped trees with different +kinds of branching. Exercise~\ref{subsec:f-Exercise-recursive-functor-2} shows that type constructors defined by Eq.~(\ref{eq:f-def-recursive-functor-2}) @@ -3253,7 +3253,7 @@ \subsection{Constructions of contrafunctors\label{subsec:f-Contrafunctor-constru \hline {\footnotesize{}disjunctive type} & {\footnotesize{}$C^{A}\triangleq P^{A}+Q^{A}$} & {\footnotesize{}$P$ and $Q$ are contrafunctors}\tabularnewline \hline -{\footnotesize{}function type} & {\footnotesize{}$C^{A}\triangleq L^{A}\rightarrow H^{A}$} & {\footnotesize{}$L$ is a functor and $H$ a contrafunctor}\tabularnewline +{\footnotesize{}function type} & {\footnotesize{}$C^{A}\triangleq L^{A}\rightarrow H^{A}$} & {\footnotesize{}$L$ is a functor and $H$ is a contrafunctor}\tabularnewline \hline {\footnotesize{}type parameter} & {\footnotesize{}$C^{A}\triangleq Z$} & {\footnotesize{}$Z$ is a fixed type}\tabularnewline \hline @@ -3264,7 +3264,7 @@ \subsection{Constructions of contrafunctors\label{subsec:f-Contrafunctor-constru \hline \end{tabular} \par\end{centering} -\caption{Type constructions defining a contrafunctor $C^{A}$.\label{tab:f-Contrafunctor-constructions}} +\caption{Type constructions defining a contrafunctor $C$.\label{tab:f-Contrafunctor-constructions}} \end{table} Let us now prove the validity of some of these constructions. @@ -3291,8 +3291,8 @@ \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-constant} \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-composition-1}\ref{subsec:functor-Statement-contrafunctor-composition-1}} -If $P^{A}$ is a functor and $Q^{A}$ is a contrafunctor then $L^{A}\triangleq P^{Q^{A}}$ -is a contrafunctor with \lstinline!cmap! defined by +If $P$ is a functor and $Q$ is a contrafunctor then $L^{A}\triangleq P^{Q^{A}}$ +is a contrafunctor with \lstinline!cmap! defined by: \begin{lstlisting} def cmap[A, B](f: B => A): P[Q[A]] => P[Q[B]] = fmap_P(cmap_Q(f)) \end{lstlisting} @@ -3306,7 +3306,8 @@ \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-compositi \[ \text{cmap}_{L}(f^{:B\rightarrow A})\triangleq\text{fmap}_{P}(\text{cmap}_{Q}(f))\quad. \] -It is easier to reason about this function if we rewrite it as: +It is easier to reason about this function if we rewrite the previous +line as: \[ f^{\downarrow L}\triangleq\big(f^{\downarrow Q}\big)^{\uparrow P}\quad. \] @@ -3316,18 +3317,18 @@ \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-compositi {\color{greenunder}\text{composition law}:}\quad & f^{\downarrow L}\bef g^{\downarrow L}=(f^{\downarrow Q})^{\uparrow P}\gunderline{\,\bef\,}(g^{\downarrow Q})^{\uparrow P}\\ {\color{greenunder}\text{use }P\text{\textsf{'}s composition law}:}\quad & \quad=\big(\gunderline{f^{\downarrow Q}\bef g^{\downarrow Q}}\big)^{\uparrow P}=\big((g\bef f\gunderline{)^{\downarrow Q}\big)^{\uparrow P}}=\left(g\bef f\right)^{\downarrow L}\quad. \end{align*} +$\square$ -Finally, the recursive construction works for contrafunctors, except -that the type constructor $S^{A,R}$ must be a contrafunctor in $A$ -(but still a functor in $R$). An example of such a type constructor -is: +The recursive construction is the same for contrafunctors, except +that $S^{A,R}$ must be a contravariant in $A$ (but still covariant +in $R$). An example of such a type constructor $S$ is: \begin{equation} S^{A,R}\triangleq\left(A\rightarrow\text{Int}\right)+R\times R\quad.\label{eq:f-example-contra-bifunctor} \end{equation} -The type constructor $S^{\bullet,\bullet}$ is not a bifunctor because -it is contravariant in its first type parameter; so we cannot define -a \lstinline!bimap! function for it. However, we can define an analogous -function called \lstinline!xmap! with the type signature: +This $S$ is not a bifunctor because it is contravariant in its first +type parameter; so we cannot define a \lstinline!bimap! function +for $S$. However, we can define an analogous function called \lstinline!xmap! +with the type signature: \begin{lstlisting} def xmap[A, B, Q, R](f: B => A)(g: Q => R): S[A, Q] => S[B, R] \end{lstlisting} @@ -3349,8 +3350,7 @@ \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-compositi are fully parametric. We omit the details since they are quite similar to what we saw in Section~\ref{subsec:Bifunctors} for bifunctors. -If we define a type constructor $L^{\bullet}$ using the recursive -\textsf{``}type equation\index{type equation}\textsf{''}: +If we define a type constructor $L$ using the recursive \textsf{``}type equation\index{type equation}\textsf{''}: \[ L^{A}\triangleq S^{A,L^{A}}\triangleq\left(A\rightarrow\text{Int}\right)+L^{A}\times L^{A}\quad, \] @@ -3360,14 +3360,14 @@ \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-compositi \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-recursive-1}\ref{subsec:functor-Statement-contrafunctor-recursive-1}} -If $S^{A,R}$ is a contrafunctor with respect to $A$ and a functor +If $S^{A,R}$ is contravariant with respect to $A$ and covariant with respect to $R$ then the recursively defined type constructor -$C^{A}$ is a contrafunctor: +$C$ is a contrafunctor: \[ C^{A}\triangleq S^{A,C^{A}}\quad. \] Given the functions \lstinline!cmap!$_{S^{\bullet,R}}$ and \lstinline!fmap!$_{S^{A,\bullet}}$ -for $S$, we implement \lstinline!cmap!$_{C}$ as +for $S$, we implement \lstinline!cmap!$_{C}$ as: \begin{align*} \text{cmap}_{C}(f^{:B\rightarrow A}) & :C^{A}\rightarrow C^{B}\cong S^{A,C^{A}}\rightarrow S^{B,C^{B}}\quad,\\ \text{cmap}_{C}(f^{:B\rightarrow A}) & \triangleq\text{xmap}_{S}(f)(\overline{\text{cmap}_{C}}(f))\quad. @@ -3387,8 +3387,7 @@ \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-recursive \subparagraph{Proof} -The code of \lstinline!cmap! is recursive, and the recursive call -is marked by an overline: +We mark by an overline the recursive call in the code of \lstinline!cmap!: \[ \text{cmap}_{C}(f)\triangleq f^{\downarrow C}\triangleq\text{xmap}_{S}(f)(\overline{\text{cmap}_{C}}(f))\quad. \] @@ -3410,10 +3409,9 @@ \subsubsection{Statement \label{subsec:functor-Statement-contrafunctor-recursive \subsection{Examples: Recognizing functors and contrafunctors\label{subsec:Solved-examples:-How-to-recognize-functors}} Sections~\ref{subsec:f-Functor-constructions} and~\ref{subsec:f-Contrafunctor-constructions} -describe how functors and contrafunctors are built from other type -expressions. We can see from Tables~\ref{tab:f-Functor-constructions} -and~\ref{tab:f-Contrafunctor-constructions} that \emph{every} one -of the six basic type constructions\index{six type constructions} +describe how functors and contrafunctors are built from various type +expressions. Tables~\ref{tab:f-Functor-constructions} and~\ref{tab:f-Contrafunctor-constructions} +show that \emph{every} one of the six basic type constructions\index{six type constructions} (unit type, type parameters, product types, co-product types, function types, recursive types) gives either a new functor or a new contrafunctor. The six type constructions generate all \index{exponential-polynomial type}exponential-polynomial @@ -3461,10 +3459,10 @@ \subsection{Examples: Recognizing functors and contrafunctors\label{subsec:Solve This is shown in Exercises~\ref{subsec:functor-Exercise-functor-laws}(c), (d). \end{itemize} -To see how this works, consider any exponential-polynomial type expression, -such as Eq.~(\ref{eq:f-example-complicated-z}): +To see how this works in practice, consider any exponential-polynomial +type expression, such as Eq.~(\ref{eq:f-example-complicated-z}): \[ -Z^{A,R}\triangleq\left(\left(A\rightarrow A\rightarrow R\right)\rightarrow R\right)\times A+\left(\bbnum 1+R\rightarrow A+\text{Int}\right)+A\times A\times\text{Int}\times\text{Int}\quad, +Z^{A,R}\triangleq\left(\left(A\rightarrow A\rightarrow R\right)\rightarrow R\right)\times A+\left(\bbnum 1+R\rightarrow A+\text{Int}\right)+A\times A\times\text{Int}\times\text{Int}\quad. \] Mark the position of each type parameter as either covariant ($+$) or contravariant ($-$), according to the number of nested \emph{uncurried} @@ -3481,7 +3479,7 @@ \subsection{Examples: Recognizing functors and contrafunctors\label{subsec:Solve we need to implement a suitable \lstinline!map! method and verify that the functor laws hold. To do that from scratch, we could use the techniques explained in this and the previous chapters: starting -from the type signature +from the type signature, \[ \text{map}_{Z}:Z^{A,R}\rightarrow\left(A\rightarrow B\right)\rightarrow Z^{B,R}\quad, \] @@ -3490,13 +3488,13 @@ \subsection{Examples: Recognizing functors and contrafunctors\label{subsec:Solve and composition laws for that \lstinline!map! function. This would require a lot of work for a complicated type constructor such as $Z^{A,R}$. -However, that work can be avoided if we find a way of building up -$Z^{A,R}$ step by step via the known functor and contrafunctor constructions. -Each step automatically provides both a fragment of the code of \lstinline!map! -and a proof that the functor laws hold up to that step. In this way, -we will avoid the need to look for an implementation of \lstinline!map! -or prove the laws for each new functor and contrafunctor. The next -examples illustrate this procedure on a simpler type constructor. +All that work can be avoided if we build $Z^{A,R}$ step by step via +the known functor and contrafunctor constructions. Each step automatically +provides both a fragment of the code of \lstinline!map! and a proof +that the functor laws hold up to that step. In this way, we will avoid +the need to guess an implementation of \lstinline!map! or prove the +laws for each new functor and contrafunctor. The next examples illustrate +this procedure on a simpler type constructor. \subsubsection{Example \label{subsec:f-Example-recognize-type-variance-1}\ref{subsec:f-Example-recognize-type-variance-1}\index{examples (with code)}} @@ -3517,7 +3515,7 @@ \subsubsection{Example \label{subsec:f-Example-recognize-type-variance-1}\ref{su & G^{A,Z}\triangleq(\text{Int}+A)\times(\bbnum 1+(Z\rightarrow F^{A,Z}))=(\text{Int}+A)\times(\bbnum 1+(Z\rightarrow\text{Int}\rightarrow Z\rightarrow\text{Int}\times A))\quad. \end{align*} Mark the covariant and the contravariant positions in the type expressions -for $F$ and $G$: +for $F^{A,Z}$ and $G^{A,Z}$: \[ F^{A,Z}=\text{Int}\rightarrow\underset{-}{Z}\rightarrow\text{Int}\times\underset{+}{A}\quad,\quad G^{A,Z}=(\text{Int}+\underset{+}{A})\times(\bbnum 1+(\underset{-}{Z}\rightarrow\text{Int}\rightarrow\underset{-}{Z}\rightarrow\text{Int}\times\underset{+}{A}))\quad. \] @@ -3535,10 +3533,8 @@ \subsubsection{Example \label{subsec:f-Example-recognize-type-variance-1-1}\ref{ \subparagraph{Solution} -We need to build $G^{A,Z}$ via step-by-step constructions that start -from primitive types and type parameters. At the top level of its -type expression, $G^{A,Z}$ is a product type. So, we begin by using -the \textsf{``}functor product\textsf{''} construction (Statement~\ref{subsec:functor-Statement-functor-product}): +At the top level of its type expression, $G^{A,Z}$ is a product type. +So, we begin by using the \textsf{``}functor product\textsf{''} construction (Statement~\ref{subsec:functor-Statement-functor-product}): \begin{align*} & G^{A,Z}\cong G_{1}^{A}\times G_{2}^{A,Z}\quad,\\ \text{where }\quad & G_{1}^{A}\triangleq\text{Int}+A\quad\text{ and }\quad G_{2}^{A,Z}\triangleq\bbnum 1+(Z\rightarrow\text{Int}\rightarrow Z\rightarrow\text{Int}\times A)\quad. @@ -3565,19 +3561,19 @@ \subsubsection{Example \label{subsec:f-Example-recognize-type-variance-1-1}\ref{ $G_{2}^{A,Z}$: \begin{align*} & G_{2}^{A,Z}\triangleq\bbnum 1+(Z\rightarrow\text{Int}\rightarrow Z\rightarrow\text{Int}\times A)\quad.\\ -{\color{greenunder}\text{co-product}:}\quad & G_{2}^{A,Z}\cong\bbnum 1+G_{3}^{A,Z}\quad\text{ where }G_{3}^{A,Z}\triangleq Z\rightarrow\text{Int}\rightarrow Z\rightarrow\text{Int}\times A\quad.\\ -{\color{greenunder}\text{exponential}:}\quad & G_{3}^{A,Z}\cong Z\rightarrow G_{4}^{A,Z}\quad\text{ where }G_{4}^{A,Z}\triangleq\text{Int}\rightarrow Z\rightarrow\text{Int}\times A\quad.\\ -{\color{greenunder}\text{exponential}:}\quad & G_{4}^{A,Z}\cong\text{Int}\rightarrow G_{5}^{A,Z}\quad\text{ where }G_{5}^{A,Z}\triangleq Z\rightarrow\text{Int}\times A\quad.\\ -{\color{greenunder}\text{exponential}:}\quad & G_{5}^{A,Z}\cong Z\rightarrow G_{6}^{A}\quad\text{ where }G_{6}^{A}\triangleq\text{Int}\times A\quad.\\ +{\color{greenunder}\text{co-product}:}\quad & G_{2}^{A,Z}\cong\bbnum 1+G_{3}^{A,Z}\quad\text{ where }\quad G_{3}^{A,Z}\triangleq Z\rightarrow\text{Int}\rightarrow Z\rightarrow\text{Int}\times A\quad.\\ +{\color{greenunder}\text{exponential}:}\quad & G_{3}^{A,Z}\cong Z\rightarrow G_{4}^{A,Z}\quad\text{ where \quad}G_{4}^{A,Z}\triangleq\text{Int}\rightarrow Z\rightarrow\text{Int}\times A\quad.\\ +{\color{greenunder}\text{exponential}:}\quad & G_{4}^{A,Z}\cong\text{Int}\rightarrow G_{5}^{A,Z}\quad\text{ where }\quad G_{5}^{A,Z}\triangleq Z\rightarrow\text{Int}\times A\quad.\\ +{\color{greenunder}\text{exponential}:}\quad & G_{5}^{A,Z}\cong Z\rightarrow G_{6}^{A}\quad\text{ where }\quad G_{6}^{A}\triangleq\text{Int}\times A\quad.\\ {\color{greenunder}\text{product}:}\quad & G_{6}^{A}\cong\text{Int}\times A\cong\text{Const}^{\text{Int},A}\times\text{Id}^{A}\quad. \end{align*} Each of the type constructors $G_{1}$, ..., $G_{6}$ is a functor -in $A$ because all of the functor constructions preserve the functor -laws. Therefore, $G^{A,Z}$ is a functor in $A$. +in $A$ because all of the listed constructions preserve the functor +laws. Therefore, $G^{A,Z}$ is a functor in $A$. It remains to derive the code for the \lstinline!fmap! method of $G$. Each of the functor constructions combines the \lstinline!fmap! -implementations from previously defined functors into a new \lstinline!map! +implementations from previously defined functors into a new \lstinline!fmap! implementation, so we just need to combine the code fragments in the order of constructions. For brevity, we will use the notations $f^{\uparrow L}\triangleq\text{fmap}_{L}(f)$ and $x\triangleright f^{\uparrow L}$ instead of the Scala code \lstinline!x.map(f)! @@ -3587,7 +3583,9 @@ \subsubsection{Example \label{subsec:f-Example-recognize-type-variance-1-1}\ref{ {\color{greenunder}\text{co-product}:}\quad & G_{1}^{A}\triangleq\text{Int}+A\quad,\quad f^{\uparrow G_{1}}=\,\begin{array}{||cc|} \text{id} & \bbnum 0\\ \bbnum 0 & f -\end{array}\quad.\\ +\end{array}\quad. +\end{align*} +\begin{align*} {\color{greenunder}\text{co-product}:}\quad & G_{2}^{A,Z}\triangleq\bbnum 1+G_{3}^{A,Z}\quad,\quad f^{\uparrow G_{2}}=\,\begin{array}{||cc|} \text{id} & \bbnum 0\\ \bbnum 0 & f^{\uparrow G_{3}} @@ -3608,7 +3606,7 @@ \subsubsection{Example \label{subsec:f-Example-recognize-type-variance-1-1}\ref{ \begin{lstlisting} def fmap_G[A, B, Z](f: A => B): G[A, Z] => G[B, Z] = { case G(p, q) => val newP: Either[Int, B] = p.map(f) // Use the standard map method for Either[Int, A]. - val newQ: Option[Z => Int => Z => (Int, B)] = q.map { // Use the map method for Option[_]. + val newQ: Option[Z => Int => Z => (Int, B)] = q.map { // Use the map method for Option. (g3: Z => Int => Z => (Int, A)) => z1 => n => z2 => // The code of map for G_3. val (i, a) = g3(z1)(n)(z2) @@ -3618,10 +3616,10 @@ \subsubsection{Example \label{subsec:f-Example-recognize-type-variance-1-1}\ref{ } \end{lstlisting} In this way, the correct-by-construction code of $\text{fmap}_{G}$ -may be \emph{derived} for any functor $G$ given the type expression -of $G^{A}$, and similarly the code for $\text{cmap}_{C}$ for any +may be \emph{derived} for any functor $G$ whose type expression is +given, and similarly the code for $\text{cmap}_{C}$ for any given contrafunctor $C$. The corresponding algorithms could be implemented -as a Scala library that derives the code at compile time. +as a Scala macro library that derives the code at compile time. \section{Summary} @@ -3632,9 +3630,9 @@ \section{Summary} \item Implement a \lstinline!fmap! or a \lstinline!cmap! function that satisfies the appropriate laws. \item Use constructions to derive the correct code of \lstinline!fmap! -or \lstinline!cmap! without trial and error. -\item Use functor blocks to manipulate data wrapped in functors with more -readable code. +or \lstinline!cmap! with no guessing. +\item Write more readable code using functor blocks to manipulate data wrapped +in functors. \end{itemize} \subsection{Exercises: Functor and contrafunctor constructions \index{exercises}} @@ -3675,9 +3673,8 @@ \subsubsection{Exercise \label{subsec:functor-Exercise-functor-laws}\ref{subsec: \subsubsection{Exercise \label{subsec:functor-Exercise-functor-constructions-0}\ref{subsec:functor-Exercise-functor-constructions-0}} Show that $L^{A}\triangleq F^{A}\rightarrow G^{A}$ is, in general, -neither a functor nor a contrafunctor if both $F^{A}$ and $G^{A}$ -are functors or both are contrafunctors (give an example of suitable -$F^{A}$ and $G^{A}$). +neither a functor nor a contrafunctor if both $F$ and $G$ are functors +or both are contrafunctors (give examples of suitable $F$ and $G$). \subsubsection{Exercise \label{subsec:functor-Exercise-functor-constructions-1}\ref{subsec:functor-Exercise-functor-constructions-1}} @@ -3708,7 +3705,7 @@ \subsubsection{Exercise \label{subsec:functor-Exercise-functor-constructions-2}\ \subsubsection{Exercise \label{subsec:functor-Exercise-functor-constructions-3}\ref{subsec:functor-Exercise-functor-constructions-3}} -Show that the recursive type constructor $L^{\bullet}$ defined by: +Show that the recursive type constructor $L$ defined by: \[ L^{A}\triangleq\bbnum 1+A+L^{A} \] @@ -3717,7 +3714,7 @@ \subsubsection{Exercise \label{subsec:functor-Exercise-functor-constructions-3}\ \subsubsection{Exercise \label{subsec:functor-Exercise-functor-constructions-3-1}\ref{subsec:functor-Exercise-functor-constructions-3-1}} -Show that the perfect-shaped tree $L^{\bullet}$ defined by: +Show that the perfect-shaped tree $L$ defined by: \[ L^{A}\triangleq A\times A+L^{A\times A\times A} \] @@ -3732,9 +3729,9 @@ \subsubsection{Exercise \label{subsec:f-Exercise-recursive-functor-2}\ref{subsec \subsubsection{Exercise \label{subsec:functor-Exercise-functor-lifted-equivalence}\ref{subsec:functor-Exercise-functor-lifted-equivalence}} \textbf{(a)} Given any functor $F$, show that if two types $A$ and -$B$ are equivalent then the types $F^{A}$and $F^{B}$ are also equivalent. -\textbf{(b)} Show that the same property holds for any contrafunctor -$F$. +$B$ are equivalent then the types $F^{A}$ and $F^{B}$ are also +equivalent. \textbf{(b)} Show that the same property holds for any +contrafunctor $F$. \section{Further developments} @@ -3780,26 +3777,26 @@ \subsection{Profunctors\label{subsec:f-Profunctors}} is called a profunctor\index{profunctor}. We will sometimes also call the type constructor $P^{A}\triangleq\tilde{P}^{A,A}$ a profunctor. -Consider an exponential-polynomial type constructor $P^{A}$, no matter -how complicated, such as: +Consider an exponential-polynomial type constructor $P^{A}$ such +as: \[ P^{A}\triangleq\left(\bbnum 1+A\times A\rightarrow A\right)\times A\rightarrow\bbnum 1+\left(A\rightarrow A+\text{Int}\right)\quad. \] Each copy of the type parameter $A$ will occur either in covariant or in a contravariant position because no other possibility is available in exponential-polynomial types. So, we can always rename all contravariant -occurrences of the type parameter $A$ to $Z$ and so obtain a new -type constructor $\tilde{P}^{Z,A}$, which will be covariant in $A$ -and contravariant in $Z$. Since $\tilde{P}^{A,Z}$ is a functor in -$A$ and a contrafunctor in $Z$, we will be able to define a function +occurrences of the type parameter $A$ to $Z$ and obtain a new type +constructor $\tilde{P}^{Z,A}$, which will be covariant in $A$ and +contravariant in $Z$. Since $\tilde{P}^{A,Z}$ is a functor in $A$ +and a contrafunctor in $Z$, we will be able to define a function \lstinline!xmap!$_{\tilde{P}}$ satisfying the identity and composition laws. Setting $Z=A$, we will obtain a lawful \lstinline!xmap!$_{P}$, -which makes $P$ a profunctor. Thus, \emph{every} exponential-polynomial +which makes $P$ a profunctor. So, \emph{every} exponential-polynomial type constructor is a profunctor. GADTs\index{GADT}, such as the disjunctive type \lstinline!ServerAction[R]! shown in Section~\ref{subsec:Examples-of-non-functors}, cannot be -made into a profunctor. The type signature of \lstinline!xmap! cannot +made into profunctors. The type signature of \lstinline!xmap! cannot be implemented for \lstinline!ServerAction[R]! because it is not a fully parametric type constructor (and so is not exponential-polynomial). @@ -3844,7 +3841,7 @@ \subsection{Subtyping with injective or surjective conversion functions\label{su Another example of subtyping is found when using object-oriented inheritance.\index{object-oriented inheritance} Consider this code where a class inherits a value from a parent class: \begin{lstlisting} -sealed class HasID(val id: Long) +class HasID(val id: Long) final case class Person(name: String, override val id: Long) extends HasID(id) \end{lstlisting} The type \lstinline!Person! is a subtype of \lstinline!HasID!. The @@ -3867,8 +3864,11 @@ \subsection{Subtyping with injective or surjective conversion functions\label{su inverse} to $f$. We see that subtyping does not always involve an injective conversion -function. (Sometimes, the conversion function is surjective, and sometimes -neither injective nor surjective.) +function. Sometimes, the conversion function is surjective, and sometimes +neither injective nor surjective. As an example of the latter, consider +the product type \lstinline!(Option[A], HasID)! and its subtype \lstinline!(Some[A], Person)!. +The corresponding type conversion function is neither injective nor +surjective. Functors preserve subtyping; it turns out that functor liftings also preserve injectivity and surjectivity. If a function $f^{:A\rightarrow B}$ @@ -3894,7 +3894,7 @@ \subsubsection{Statement \label{subsec:f-Statement-functor-preserves-injective}\ {\color{greenunder}\text{use }f\bef g=\text{id}:}\quad & =\text{fmap}_{L}(\text{id})\\ {\color{greenunder}\text{identity law of }L:}\quad & =\text{id}\quad. \end{align*} -It follows that $\text{fmap}_{L}(f)\bef\text{fmap}_{L}(f)=\text{id}$. +It follows that $\text{fmap}_{L}(f)\bef\text{fmap}_{L}(g)=\text{id}$. So, $\text{fmap}_{L}(g)$ is a left inverse function for $\text{fmap}_{L}(f)$, which means that $\text{fmap}_{L}(f)$ is injective. diff --git a/sofp-src/tex/sofp-induction.tex b/sofp-src/tex/sofp-induction.tex index bd90069b6..b5016b0bd 100644 --- a/sofp-src/tex/sofp-induction.tex +++ b/sofp-src/tex/sofp-induction.tex @@ -1460,7 +1460,9 @@ \subsection{Tail recursion\label{subsec:Tail-recursion}} if (s.isEmpty) res else lengthT(s.tail, res + 1) \end{lstlisting} -Let us trace the evaluation of this function on an example: +(The \lstinline!import! declaration is needed whenever the code uses +the \lstinline!tailrec! annotation.) Let us trace the evaluation +of this function on an example: \begin{lstlisting} lengthT(Seq(1, 2, 3), 0) = lengthT(Seq(2, 3), 0 + 1) // = lengthT(Seq(2, 3), 1) @@ -1504,8 +1506,6 @@ \subsection{Tail recursion\label{subsec:Tail-recursion}} \lstinline!length! to achieve tail recursion, we define \lstinline!lengthT! as a nested function inside the code of \lstinline!length!: \begin{lstlisting} -import scala.annotation.tailrec - def length[A](xs: Seq[A]): Int = { @tailrec def lengthT(s: Seq[A], res: Int): Int = { if (s.isEmpty) res @@ -1690,18 +1690,18 @@ \subsubsection{Statement \label{subsec:Statement-fromdigitsT-equals-fromdigits}\ \subsection{Implementing general aggregation (\texttt{foldLeft})\label{subsec:implementing-general-aggregation-foldleft}} -An \textbf{aggregation}\index{aggregation} converts a sequence of -values into a single value. In general, the type of the result may -be different from the type of sequence elements. To describe that -general situation, we introduce type parameters \lstinline!A! and -\lstinline!B!, so that the input sequence is of type \lstinline!Seq[A]! +As a rule, an \textbf{aggregation}\index{aggregation} computes a +single value from a sequence of values. In general, the type of the +result may be different from the type of sequence elements. To describe +that general situation, we introduce type parameters \lstinline!A! +and \lstinline!B!, so that the input sequence is of type \lstinline!Seq[A]! and the aggregated value is of type \lstinline!B!. Then an inductive definition of any aggregation function \lstinline!f: Seq[A] => B! looks like this: \begin{itemize} \item (Base case.) For an empty sequence, we have \lstinline!f(Seq()) = b0!, where \lstinline!b0: B! is a given value. -\item (Inductive step.) Assuming that \lstinline!f(xs) = b! is already +\item (Inductive step.) Assuming that \lstinline!b = f(xs)! is already computed, we define \lstinline!f(xs :+ x) = g(x, b)!, where \lstinline!g! is a given function, \lstinline!g: (A, B) => B!. \end{itemize} @@ -1718,7 +1718,7 @@ \subsection{Implementing general aggregation (\texttt{foldLeft})\label{subsec:im \begin{lstlisting} def f[A, B](s: Seq[A], b: B, g: (A, B) => B): B = if (s.isEmpty) b - else g(s.last, f(s.init, b, g) + else g(s.last, f(s.init, b, g)) \end{lstlisting} However, this implementation is not tail-recursive. Applying \lstinline!f! to a sequence of, say, three elements, \lstinline!Seq(x, y, z)!, @@ -2267,12 +2267,12 @@ \subsubsection{Exercise \label{subsec:Exercise-2.2-foldleft-6}\ref{subsec:Exerci \end{lstlisting} -\section{Converting a single value into a sequence\label{sec:ch2Converting-a-single}} +\section{Generating a sequence from a single value\label{sec:ch2Converting-a-single}} An aggregation converts (\textsf{``}folds\textsf{''}) a sequence into a single value; -the opposite operation (\textsf{``}unfolding\textsf{''}) converts a single value into -a sequence. An example of this task is to compute the sequence of -decimal digits for a given integer: +the opposite operation (\textsf{``}unfolding\textsf{''}) builds a new sequence from +a single value and other needed information. An example is computing +the decimal digits of a given integer: \begin{lstlisting} def digitsOf(x: Int): Seq[Int] = ??? diff --git a/sofp-src/tex/sofp-monads.tex b/sofp-src/tex/sofp-monads.tex index e964e9e57..f78b0f69a 100644 --- a/sofp-src/tex/sofp-monads.tex +++ b/sofp-src/tex/sofp-monads.tex @@ -19,7 +19,7 @@ \subsection{Motivation for semi-monads: Nested iteration} How can we translate into code a computation that contains nested iterations, such as: \begin{equation} -\sum_{i=1}^{n}\sum_{j=1}^{n}\sum_{k=1}^{n}\frac{1}{1+i+j+k}=\,?\label{eq:semimonads-numerical-example-1} +\sum_{i=1}^{n}\sum_{j=1}^{i}\sum_{k=1}^{j}\frac{1}{1+i+j+k}=\,?\label{eq:semimonads-numerical-example-1} \end{equation} Recall that a \textsf{``}flat\textsf{''} (non-nested) iteration is translated into the \lstinline!map! method applied to a sequence: @@ -43,40 +43,52 @@ \subsection{Motivation for semi-monads: Nested iteration} is computed as \lstinline!(1 to n).map { i => 1.0 / (1 + i) }!, holding the values $\frac{1}{1+i}$ for $i=1,...,n$, and only then the \lstinline!sum! function is applied to the sequence. This separation is useful because -it gives us full flexibility to transform or aggregate the sequence. +it gives programmers full flexibility to transform or to aggregate +the sequence. So, we will treat nested iterations in a similar way: first, compute a sequence of values that result from nested iterations, and then apply transformations or aggregations to that sequence. If we use nested \lstinline!map! operations, we will obtain a nested -data structure, e.g., a vector of vectors: +data structure, e.g., a vector of vectors. As an example, consider +this computation: +\[ +\sum_{i=1}^{4}\sum_{j=1}^{i}i*j\quad. +\] +Begin to implement this computation via this code: \begin{lstlisting} -scala> (1 to 5).map(i => (1 to i).map(j => i * j)) -res0: IndexedSeq[IndexedSeq[Int]] = Vector(Vector(1), Vector(2, 4), Vector(3, 6, 9), Vector(4, 8, 12, 16), Vector(5, 10, 15, 20, 25)) +scala> (1 to 4).map(i => (1 to i).map(j => i * j)) +res0: IndexedSeq[IndexedSeq[Int]] = Vector(Vector(1), Vector(2, 4), Vector(3, 6, 9), Vector(4, 8, 12, 16)) \end{lstlisting} -We need to \textsf{``}flatten\textsf{''} this nested structure into a simple, non-nested -sequence. The standard method for that is \lstinline!flatten!, and -its combination with \lstinline!map! can be replaced by \lstinline!flatMap!: +To compute the sum, we need to accumulate all the values from all +the nested lists. So, we need to \textsf{``}flatten\textsf{''} this nested structure +into a simple, non-nested sequence. The standard method for that is +\lstinline!flatten!, and its combination with \lstinline!map! can +be replaced by \lstinline!flatMap!: \begin{lstlisting} scala> (1 to 4).map(i => (1 to i).map(j => i * j)).flatten res1: IndexedSeq[Int] = Vector(1, 2, 4, 3, 6, 9, 4, 8, 12, 16) scala> (1 to 4).flatMap(i => (1 to i).map(j => i * j)) // Same result as above. res2: IndexedSeq[Int] = Vector(1, 2, 4, 3, 6, 9, 4, 8, 12, 16) + +scala> (1 to 4).flatMap(i => (1 to i).map(j => i * j)).sum +res3: Int = 65 \end{lstlisting} + To represent more nesting, we use more \lstinline!flatMap! operations. For example, to implement Eq.~(\ref{eq:semimonads-numerical-example-1}): \begin{lstlisting} def example(n: Int): Double = (1 to n).flatMap { i => - (1 to n).flatMap { j => - (1 to n).map { k => - 1.0 / (1.0 + i + j + k) } + (1 to i).flatMap { j => + (1 to j).map { k => + 1.0 / (1 + i + j + k) } } }.sum scala> example(10) -res3: Double = 63.20950497687006 +res4: Double = 14.250481740989049 \end{lstlisting} These examples show that converting nested iterations into a simple iteration means replacing all \lstinline!map! functions by \lstinline!flatMap! @@ -93,9 +105,9 @@ \subsection{Motivation for semi-monads: Nested iteration} \texttt{\textcolor{blue}{\footnotesize{}}} \begin{lstlisting} (for { i <- 1 to n - j <- 1 to n - k <- 1 to n - } yield 1.0 / (1.0 + i + j + k) + j <- 1 to i + k <- 1 to j + } yield 1.0 / (1 + i + j + k) ).sum \end{lstlisting} % @@ -104,9 +116,9 @@ \subsection{Motivation for semi-monads: Nested iteration} \texttt{\textcolor{blue}{\footnotesize{}}} \begin{lstlisting} (1 to n).flatMap { i => - (1 to n).flatMap { j => - (1 to n).map { k => - 1.0 / (1.0 + i + j + k) + (1 to i).flatMap { j => + (1 to j).map { k => + 1.0 / (1 + i + j + k) }}}.sum \end{lstlisting} % @@ -133,18 +145,16 @@ \subsection{Motivation for semi-monads: Nested iteration} \[ T=\left\{ \left.x+y+z~\right|~x\in P,\,y\in Q,\,z\in R,\,f(x,y,z)=0\,\right\} \quad. \] -Here, $P$, $Q$, $R$ are given sets of numbers, and the result is +Here $P$, $Q$, $R$ are given sets of numbers, and the result is a set $T$ of numbers obtained by adding some $x$ from $P$, some -$y$ from $Q$, and some $z$ from $R$ such that $f(x,y,z)=0$. A -direct implementation of this formula is the code shown at left. Here, -\lstinline!p!, \lstinline!q!, \lstinline!r! are given collections -(say, arrays) and the result \lstinline!t! is again an array. Just -like the mathematical formula\textsf{'}s result is a collection of some $x+y+z$ -values, the functor block\textsf{'}s result is a collection of values computed -after the \lstinline!yield! keyword. +$y$ from $Q$, and some $z$ from $R$ such that $f(x,y,z)=0$. An +implementation of this formula is the code shown above where \lstinline!p!, +\lstinline!q!, \lstinline!r! are given collections (say, arrays) +and the result \lstinline!t! is again an array that collects some +$x+y+z$ values. To develop more intuition about using functor blocks with multiple -left arrows, look at this code: +left arrows, look at this sample code: \noindent \texttt{\textcolor{blue}{\footnotesize{}}}% \begin{minipage}[c]{0.475\columnwidth}% @@ -177,13 +187,12 @@ \subsection{Motivation for semi-monads: Nested iteration} \vspace{0.2\baselineskip} One can imagine that each line (which we can read as \textsf{``}for all $i$ in $\left[1,...,m\right]$\textsf{''}, \textsf{``}for all $j$ in $\left[1,...,n\right]$\textsf{''}, -etc.) will produce an intermediate sequence of the same type. Each -next line continues the calculation from the previous intermediate -sequence. +etc.) produces an intermediate sequence of the same type. Each next +line continues the calculation from the previous intermediate sequence. If this intuition is correct, we should be able to refactor the code by cutting the calculation at any place and continuing in another -functor block, without changing the result value: +functor block: \noindent \texttt{\textcolor{blue}{\footnotesize{}}}% \begin{minipage}[c]{0.475\columnwidth}% @@ -192,7 +201,7 @@ \subsection{Motivation for semi-monads: Nested iteration} val result = for { i <- 1 to m j <- 1 to n -// We will cut the block here, making i and j available for further computations. +// Cut the functor block here, making i and j available for further computations. x = f(i, j) k <- 1 to p y = g(i, j, k) @@ -225,15 +234,16 @@ \subsection{Motivation for semi-monads: Nested iteration} \item Each \textsf{``}source line\textsf{''} computes an intermediate collection of the same type, so all values to the right of \lstinline!<-! must use \emph{the same} type constructor (or its subtypes). -\item The entire functor block\textsf{'}s result is again a collection using the -same type constructor. The result is \emph{not} the expression under -\lstinline!yield!; instead, it is a collection of those expressions. +\item The entire functor block\textsf{'}s result is again a collection that uses +the same type constructor. The result is \emph{not} the expression +under \lstinline!yield!; instead, the result is a collection of those +expressions. \end{itemize} -So far, we have been using sequences as the main type constructor. -However, functor blocks with several left arrows will work with any -other type constructor that has \lstinline!map! and \lstinline!flatMap! +So far, we have been using sequences as the collection type. However, +functor blocks with several left arrows will also work with any other +type constructor that has \lstinline!map! and \lstinline!flatMap! methods. In the next sections, we will see how to use functor blocks -with different type constructors. +with different type constructors. Functors having \lstinline!flatMap! methods are called \textbf{semi-monads} in\index{semi-monads} this book.\footnote{There is no single accepted name. The libraries \texttt{scalaz} and @@ -243,12 +253,12 @@ \subsection{Motivation for semi-monads: Nested iteration} Semi-monads with a \lstinline!pure! method (and obeying the appropriate laws) are called \textbf{monads}.\index{monads} This chapter will study semi-monads and monads in detail. For now, we note that the -functor block syntax does not require functors to have a \lstinline!pure! -method; it works just as well with semi-monads. +functor block syntax does not require a \lstinline!pure! method; +it works just as well with semi-monads. If a functor has a \lstinline!withFilter! method, Scala\textsf{'}s functor block will also support the \lstinline!if! operation (see Section~\ref{sec:Practical-uses-of-filterable-functors}). -So, the full functionality of functor blocks can be used with \emph{filterable +So, the full functionality of functor blocks is available for \emph{filterable semi-monads}. \subsection{List-like monads} @@ -386,8 +396,8 @@ \subsubsection{Example \label{subsec:Example-list-monads-4}\ref{subsec:Example-l Iterating over \lstinline!permutations(xsWithoutX)!, we obtain the permutations \lstinline!"bc"! and \lstinline!"cb"!. These permutations need to be concatenated with \lstinline!x == "a"!, yielding \lstinline!"abc"! -and \lstinline!"acb"!, which is the correct part of the final answer. -So, we write a nested iteration and concatenate the results: +and \lstinline!"acb"!, which is part of a correct answer. So, we +write a nested iteration and concatenate the results: \begin{lstlisting} def permutations(xs: Seq[String]): Seq[String] = for { x <- xs @@ -404,11 +414,12 @@ \subsubsection{Example \label{subsec:Example-list-monads-4}\ref{subsec:Example-l an empty list always makes the final result also an empty list. To fix this, add a base case: \begin{lstlisting} -def permutations(xs: Seq[String]): Seq[String] = if (xs.length == 1) xs else for { - x <- xs - xsWithoutX = xs.filter(_ != x) - rest <- permutations(xsWithoutX) -} yield x + rest +def permutations(xs: Seq[String]): Seq[String] = if (xs.length == 1) xs else + for { + x <- xs + xsWithoutX = xs.filter(_ != x) + rest <- permutations(xsWithoutX) + } yield x + rest scala> permutations(Seq("a", "b", "c", "d")) res1: Seq[String] = List(abcd, abdc, acbd, acdb, adbc, adcb, bacd, badc, bcad, bcda, bdac, bdca, cabd, cadb, cbad, cbda, cdab, cdba, dabc, dacb, dbac, dbca, dcab, dcba) @@ -479,11 +490,11 @@ \subsubsection{Example \label{subsec:Example-list-monads-5}\ref{subsec:Example-l \subparagraph{Solution} -The $8$ queens must be placed on an $8\times8$ chess board so that -no queen threatens any other queen. To make our work easier, we note -that each queen must be placed in a different row. So, it is sufficient -to find the column index for each queen. A solution is a sequence -of $8$ indices. +The $8$ queens must be placed on an $8\times8$ chess board in such +a way that no queen threatens any other queen. To make our work easier, +we note that each queen must be placed in a different row. So, it +is sufficient to find the column index for each queen. A solution +is a sequence of $8$ indices. Begin by iterating over all possible combinations of column indices: \begin{lstlisting} @@ -625,7 +636,7 @@ \subsubsection{Example \label{subsec:Example-list-monads-7}\ref{subsec:Example-l We begin by designing a data type to represent CNFs. Let the type parameter $A$ stand for the elementary expressions that we denoted -by \lstinline!a!, \lstinline!b!, \lstinline*!c*, etc., i.e., Boolean +by \lstinline!a!, \lstinline!b!, \lstinline*!c*, ..., i.e., Boolean formulas that contain no conjunctions or no disjunctions. Then we may represent a conjunction as a \lstinline!Set[A]!, and a disjunction of conjunctions as a \lstinline!Set[Set[A]]!. For instance, \lstinline!(a || b) && (c || d || e)! @@ -775,7 +786,7 @@ \subsubsection{Example \label{subsec:Example-list-monads-7}\ref{subsec:Example-l \begin{lstlisting} def dnf2cnf[A](dnf: DNF[A]): CNF[A] = simplifyCNF(toCNF(dnf)) def cnf2dnf[A](cnf: CNF[A]): DNF[A] = DNF(dnf2cnf(DNF(cnf.s)).s) - // Verify that dnf2cnf and cnf2dnf are inverses: + // Verify that dnf2cnf and cnf2dnf are inverses: scala> (dnf2cnf(cnf2dnf(cnf1)) == cnf1, cnf2dnf(dnf2cnf(dnf1)) == dnf1) res4: (Boolean, Boolean) = (true,true) \end{lstlisting} @@ -882,7 +893,7 @@ \subsubsection{Example \label{subsec:Example-matrix-products}\ref{subsec:Example pairs.sum } -scala> vectorMatrixProduct(Seq(3,4,5), matrix_T) +scala> vectorMatrixProduct(Seq(3, 4, 5), matrix_T) res1: Seq[Int] = Vector(26, 260) \end{lstlisting} @@ -972,8 +983,8 @@ \subsection{Pass/fail monads\label{subsec:Pass/fail-monads}} What would a \emph{nested} \textsf{``}iteration\textsf{''} over several \lstinline!Option! values do? When all of the \lstinline!Option! values are non-empty, the \textsf{``}iteration\textsf{''} will perform some computations using the wrapped -values. However, if even one of the \lstinline!Option! values happens -to be empty, the computed result will be an empty value: +values. However, if even just one of the \lstinline!Option! values +happens to be empty, the computed result will be empty: \begin{lstlisting} scala> for { x <- Some(123) @@ -1021,7 +1032,7 @@ \subsection{Pass/fail monads\label{subsec:Pass/fail-monads}} where a \textsf{``}safe integer\textsf{''} computation continues only as long as every result is a success; the chain of operations stops at the first failure. The code of Example~\ref{subsec:disj-Example-resultA} introduced -custom data type with hand-coded methods such as \lstinline!add!, +a custom data type with hand-coded methods such as \lstinline!add!, \lstinline!mul!, and \lstinline!div!. We can now implement equivalent functionality using functor blocks and a standard type \lstinline!Either[String, Int]!: \begin{lstlisting} @@ -1037,8 +1048,8 @@ \subsection{Pass/fail monads\label{subsec:Pass/fail-monads}} } yield z + 10 result: Result = Left("error: sqrt(-50)") \end{lstlisting} -The concise and readable code of \lstinline!val result! replaces -more verbose implementations such as: +This concise and readable code replaces verbose implementations such +as: \begin{lstlisting} val result: Result = previous match { case Left(error) => Left(error) @@ -1054,7 +1065,7 @@ \subsection{Pass/fail monads\label{subsec:Pass/fail-monads}} that may fail; the first failure is then returned as a value. \subsubsection{Example \label{subsec:Example-:chain-with-option}\ref{subsec:Example-:chain-with-option}: -chaining computations with \texttt{Option}\index{examples (with code)}} +chaining with \texttt{Option}\index{examples (with code)}} Some clients have placed some orders with some companies. The information is made available via Java system properties, for example: @@ -1103,7 +1114,7 @@ \subsubsection{Example \label{subsec:Example-:chain-with-option}\ref{subsec:Exam consider a task where we need to keep track of error information. \subsubsection{Example \label{subsec:Example-:chain-with-option-1}\ref{subsec:Example-:chain-with-option-1}: -chaining computations with \texttt{Try}} +chaining with \texttt{Try}} Three given functions $f$, $g$, $h$ all have Scala type \lstinline!Int => Int! but may throw exceptions. Given an integer $x$, compute $f(g(h(x)))$ @@ -1141,13 +1152,13 @@ \subsubsection{Example \label{subsec:Example-chaining-future}\ref{subsec:Example be run in parallel on another CPU thread. For this reason, \lstinline!Future!\textsf{'}s methods (such as \lstinline!map! and \lstinline!flatMap!) require an implicit \lstinline!ExecutionContext! argument, which provides -access to a JVM thread pool where computations will be scheduled. +access to a JVM thread pool where parallel computations will be scheduled. As soon as a \lstinline!Future! value is created, its computation is scheduled immediately. So, several \lstinline!Future! values may run their computations in parallel. Nevertheless, computations chained via \lstinline!flatMap! (or in a functor block) will run sequentially -if new values need to wait for previous values: +if new \lstinline!Future! values depend on previous results: \begin{lstlisting} import scala.concurrent.ExecutionContext.Implicits.global def longComputation(x: Double): Future[Double] = Future { ... } // A long computation. @@ -1167,11 +1178,10 @@ \subsubsection{Example \label{subsec:Example-chaining-future}\ref{subsec:Example calls in parallel, since each call depends on the result of the previous one. -Another possibility is that each \lstinline!longComputation(...)! -is independent of the results of the other computations. Then the -three \lstinline!Future! values may be created up front, and the -functor block code represents three \textsf{``}long computations\textsf{''} running -in parallel: +What if each \lstinline!longComputation(...)! were independent of +the results of the other computations? Then the three \lstinline!Future! +values may be created up front, and the functor block code will represent +three \textsf{``}long computations\textsf{''} running in parallel: \begin{lstlisting} val long1 = longComputation(10.0) val long2 = longComputation(50.0) @@ -1195,25 +1205,25 @@ \subsection{Tree-like semi-monads and monads\label{subsec:Tree-like-semimonads-a \text{BT}^{A}\triangleq A+\text{BT}^{A}\times\text{BT}^{A}. \] -To show that $\text{BT}^{A}$ is a functor, Statement~\ref{subsec:functor-Statement-functor-composition-1} +To show that $\text{BT}$ is a functor, Statement~\ref{subsec:functor-Statement-functor-composition-1} replaces the right-hand side $A+\text{BT}^{A}\times\text{BT}^{A}$ by an arbitrary \textsf{``}recursion scheme\textsf{''}\index{recursion scheme} $S^{A,\text{BT}^{A}}$ -and then shows that the recursive type constructor $L^{\bullet}$ -defined by $L^{A}\triangleq S^{A,L^{A}}$ is a functor. (The type -$\text{BT}^{A}$ is obtained with $S^{A,R}\triangleq A+R\times R$.) -As we will see, the type constructor $L^{A}$ will be a semi-monad -or a monad with certain choices of $S^{\bullet,\bullet}$. - -For lists, nested iteration goes over inner lists contained in an -outer list. How does nested iteration work for a tree-shaped collection? -An iteration over a tree enumerates the values at the \emph{leaves} -of a tree. So, a tree analog of nested iteration implies that each -leaf of an outer tree contains an inner tree. A \lstinline!flatMap! -function must concatenate all nested trees into a single \textsf{``}flattened\textsf{''} -tree. +and then shows that the recursive type constructor $L$ defined by +$L^{A}\triangleq S^{A,L^{A}}$ is a functor. (The type constructor +$\text{BT}$ is obtained with $S^{A,R}\triangleq A+R\times R$.) As +we will see, the type constructor $L$ will be a semi-monad or a monad +with certain choices of $S$. + +For lists, nested iteration goes over inner lists stored in an outer +list. How does nested iteration work for a tree-shaped collection? +An iteration over a value of type $\text{BT}^{A}$ enumerates the +values of type $A$ stored at the \emph{leaves} of a tree. So, a \textsf{``}nested +tree\textsf{''} means that each leaf of an outer tree contains an inner tree. +To convert a nested tree into a single \textsf{``}flattened\textsf{''} tree, we just +need to graft a subtree in place of a leaf. Let us implement the \lstinline!flatMap! method for the binary tree -$BT^{\bullet}$ in that way. It is convenient to define an equivalent +$\text{BT}$ in that way. It is convenient to define an equivalent curried function (denoted by \textsf{``}$\text{flm}$\textsf{''}) with type signature: \[ \text{flm}^{A,B}:(A\rightarrow\text{BT}^{B})\rightarrow\text{BT}^{A}\rightarrow\text{BT}^{B}\quad. @@ -1238,24 +1248,23 @@ \subsection{Tree-like semi-monads and monads\label{subsec:Tree-like-semimonads-a \] To visualize how the \lstinline!flatMap! method operates on binary -trees, let us compute \lstinline!tree1.flatMap(f)!, where we take -\lstinline!tree1 = !{\scriptsize{} \Tree[ [ $a_1$ ] [ [ $a_2$ ] [ $a_3$ ] ] ] } +trees, let us compute \lstinline!flm(f)(tree1)!, where \lstinline!tree1 = !{\scriptsize{} \Tree[ [ $a_1$ ] [ [ $a_2$ ] [ $a_3$ ] ] ] } and a function $f:A\rightarrow\text{BT}^{B}$ that has $f(a_{1})=${\scriptsize{} \Tree[ [ $b_0$ ] [ $b_1$ ] ] }, $f(a_{2})=b_{2}$, and $f(a_{3})=${\scriptsize{} \Tree[ [ $b_3$ ] [ $b_4$ ] ]\relax}. (Here $a_{i}$ for $i=1,2,3$ are some values of type $A$ and $b_{i}$ for $i=0,...,4$ are some values of type $B$.) Evaluating the code -of \lstinline!flatMap!, we find \lstinline!tree1.flatMap(f)! ={\scriptsize{} \Tree[ [ [ $b_0$ ] [ $b_1$ ] ] [ [ $b_2$ ] [ [ $b_3$ ] [ $b_4$ ] ] ] ]\relax}. +of \lstinline!flm!, we find \lstinline!flm(f)(tree1)! ={\scriptsize{} \Tree[ [ [ $b_0$ ] [ $b_1$ ] ] [ [ $b_2$ ] [ [ $b_3$ ] [ $b_4$ ] ] ] ]\relax}. So, we see that \lstinline!flatMap! works by grafting a subtree into every \lstinline!Leaf! of a given tree: A leaf is replaced by a new -tree in line~6 in the code of \lstinline!flatMap!. That code can -be generalized to the recursive type $\text{PT}^{A}$ (representing +tree in line~6 in the code of \lstinline!flm!. That code can be +generalized to a recursive type constructor $\text{PT}$ (representing a \textsf{``}tree with $P$-shaped branches\textsf{''}) defined by: \[ \text{PT}^{A}\triangleq A+P^{\text{PT}^{A}}\quad, \] -for any given functor $P$. The disjunctive part $A+\bbnum 0$ is -replaced by a new tree: +where $P$ is any given functor. The disjunctive part $A+\bbnum 0\,$ +is replaced by a new tree: \begin{lstlisting} sealed abstract class PT[P[_] : Functor, A] // Need an `abstract class` due to implicits. final case class Leaf[P[_] : Functor, A](x: A) extends PT[P, A] @@ -1263,7 +1272,7 @@ \subsection{Tree-like semi-monads and monads\label{subsec:Tree-like-semimonads-a def flm[P[_]: Functor, A, B](f: A => PT[P, B]): PT[P, A] => PT[P, B] = { case Leaf(x) => f(x) // Here f(x) has type PT, which could be a Leaf or a Branch. - case Branch(p) => Branch(p.map(t => flm(f)(t)) // Conceptually, Branch(p.map(flm(f))). + case Branch(p) => Branch(p.map(t => flm(f)(t)) // Equivalently, this is Branch(p.map(flm(f))). } \end{lstlisting} The same function is written in the code notation as: @@ -1277,8 +1286,8 @@ \subsection{Tree-like semi-monads and monads\label{subsec:Tree-like-semimonads-a We can also implement \lstinline!flatMap! for more general type constructors $L$ defined by $L^{A}\triangleq P^{A}+P^{L^{A}}$ for some functor -$P$. Such $L^{A}$ can be visualized as trees with $P$-shaped branches -and $P$-shaped leaves. +$P$. Values of type $L^{A}$ can be visualized as trees with $P$-shaped +branches and $P$-shaped leaves. \begin{lstlisting} sealed abstract class PLPT[P[_] : Functor, A] final case class Leaf[P[_] : Functor, A](px: P[A]) extends PLPT[P, A] @@ -1309,7 +1318,7 @@ \subsubsection{Example \label{subsec:Example-monad-branching-properties}\ref{sub Implement the \lstinline!flatMap! operation for a tree of configuration properties of the form: -\begin{lstlisting} +\begin{lstlisting}[language=bash] url: http://server:8000 users: user: @@ -1326,8 +1335,7 @@ \subsubsection{Example \label{subsec:Example-monad-branching-properties}\ref{sub The code for this data structure must support any number of simple properties or branches with \lstinline!String! labels. A suitable structure is a tree with $P$-shaped leaves and $P$-shaped branches, -where the functor $P$ is defined as $P^{A}\triangleq\text{List}^{\text{String}\times A}$. -Implement the tree type: +where we define $P^{A}\triangleq\text{List}^{\text{String}\times A}$: \begin{lstlisting} sealed trait PropTree[A] // Introduce the type parameter A for the values of properties. final case class Simple[A](props: List[(String, A)]) extends PropTree[A] @@ -1423,7 +1431,8 @@ \subsubsection{Example \label{subsec:Example-monad-substitution-language}\ref{su The code of the \lstinline!map! method is: \begin{lstlisting} -def map[B](f: A => B): Term[B] = this match { // This code must be within `trait Term[A]`. +// This code must be within `trait Term[A]`. +def map[B](f: A => B): Term[B] = this match { case Const(value) => Const(value) case Var(name) => Var(f(name)) case Plus(t1, t2) => Plus(t1 map f, t2 map f) @@ -1433,7 +1442,8 @@ \subsubsection{Example \label{subsec:Example-monad-substitution-language}\ref{su The code of \lstinline!flatMap! replaces variables by new trees, leaving everything else unchanged: \begin{lstlisting} -def flatMap[B](f: A => Term[B]): Term[B] = this match { // This code must be within `trait Term[A]`. +// This code must be within `trait Term[A]`. +def flatMap[B](f: A => Term[B]): Term[B] = this match { case Const(value) => Const(value) case Var(name) => f(name) case Plus(t1, t2) => Plus(t1 flatMap f, t2 flatMap f) @@ -1486,23 +1496,23 @@ \subsection{The \texttt{Reader} monad\label{subsec:The-Reader-monad}} \index{monads!Reader monad@\texttt{Reader} monad}This chapter started with the list-like monads whose \lstinline!flatMap! method is motivated by the requirements of nested iteration. We then looked at tree-like -monads, which generalize nested list iterations to tree grafting. -It turns out that the \lstinline!flatMap! method can be generalized -to many other type constructors that are useful for various programming -tasks not limited to nested iteration. - -A general (semi)monad type constructor $L^{A}$ no longer represents -a collection of data items of type $A$. Instead, we regard $L^{A}$ -informally as a value of type $A$ wrapped in a special \textsf{``}computational -effect\textsf{''}. We view \textsf{``}computations with an $L$-effect\textsf{''} as functions -of type $A\rightarrow L^{B}$ (as in \lstinline!flatMap!\textsf{'}s argument -type). In this view, different monads \textemdash{} such as list-like, -pass/fail, or tree-like \textemdash{} implement different kinds of -effects. An ordinary function of type $A\rightarrow B$ is a computation -with a \textsf{``}trivial effect\textsf{''}. +monads whose \lstinline!flatMap! methods work via tree grafting. +It turns out that the \lstinline!flatMap! method can be supported +by many other type constructors useful for various programming tasks +not necessarily related to nested iteration. + +A general (semi)monad type constructor $L$ no longer represents a +collection of data items. Instead, we regard $L^{A}$ informally as +a value of type $A$ wrapped in a special \textsf{``}computational effect\textsf{''}. +We view \textsf{``}computations with an $L$-effect\textsf{''} as functions of type +$A\rightarrow L^{B}$ (as in \lstinline!flatMap!\textsf{'}s argument type). +In this view, different monads \textemdash{} such as list-like, pass/fail, +or tree-like \textemdash{} implement different kinds of effects. An +ordinary function of type $A\rightarrow B$ is a computation with +a \textsf{``}trivial effect\textsf{''}. In this sense, monadic effects are \emph{not} side effects.\index{side effect} -Functions of type $A\rightarrow L^{B}$ can be referentially transparent\index{referential transparency} +Functions of type $A\rightarrow L^{B}$ are referentially transparent\index{referential transparency} and behave as values.\index{value-like behavior} Informally, an \textsf{``}$L$-effect\textsf{''} describes the information computed by a function of type $A\rightarrow L^{B}$ in addition to a value of type $B$. To make the vague idea of \textsf{``}effect\textsf{''} @@ -1519,17 +1529,18 @@ \subsection{The \texttt{Reader} monad\label{subsec:The-Reader-monad}} will have type $A\times Z\rightarrow B$ instead of $A\rightarrow B$. It remains to rewrite the type $A\times Z\rightarrow B$ in the form $A\rightarrow L^{B}$ with a suitable choice of a type constructor -$L^{\bullet}$. By currying, we obtain an equivalent type: +$L$. By currying, we obtain an equivalent type: \[ (A\times Z\rightarrow B)\cong(A\rightarrow Z\rightarrow B)\quad, \] which has the form $A\rightarrow L^{B}$ if we define $L^{A}\triangleq Z\rightarrow A$. This type constructor is called the \lstinline!Reader! monad\index{monads!Reader monad@\texttt{Reader} monad} and is denoted by $\text{Read}^{Z,A}\triangleq Z\rightarrow A$. The -Scala definition is \lstinline!type Reader[Z, A] = Z => A!. +Scala definition is: \lstinline!type Reader[Z, A] = Z => A!. Fully parametric implementations of \lstinline!map! and \lstinline!flatMap! -directly follow from their type signatures: +for \lstinline!Reader! can be derived unambiguously from their type +signatures: \begin{lstlisting}[mathescape=true] def map[A, B](r: Z => A)(f: A => B): (Z => B) = r andThen f // Example $\color{dkgreen} \ref{subsec:ch-solvedExample-5}$. def flatMap[A, B](r: Z => A)(f: A => Z => B): (Z => B) = { z => f(r(z))(z) } // Exercise $\color{dkgreen} \ref{subsec:ch-Exercise-5}$(c). @@ -1592,7 +1603,8 @@ \subsection{The \texttt{Reader} monad\label{subsec:The-Reader-monad}} def filterFiles(runSh: RunSh, files: String, patterns: String): String = runSh(s"grep -f $patterns", files)._2 def lineCounts(runSh: RunSh, files: String): Array[Int] = files.split("\n") // Array of file names. - .map { file => runSh("wc -l $file", "")._2.replaceAll("^ +", "").split(" ")(0).toInt } + .map { file => + runSh("wc -l $file", "")._2.replaceAll("^ +", "").split(" ")(0).toInt } \end{lstlisting} This code assumes that file names do not contain the newline character \lstinline!"\n"!. Use this code only as an illustration of a use @@ -1624,11 +1636,14 @@ \subsection{The \texttt{Reader} monad\label{subsec:The-Reader-monad}} choices of type parameters \lstinline!A!, \lstinline!B!: \begin{lstlisting} type Reader[Z, A] = Z => A -def listFilesR(dir: String): Reader[RunSh, String] = runSh => runSh(s"find $dir -type f", "")._2 -def filterFilesR(patterns: String): String => Reader[RunSh, String] = files => runSh => - runSh(s"grep -f $patterns", files)._2 +def listFilesR(dir: String): Reader[RunSh, String] = runSh => + runSh(s"find $dir -type f", "")._2 +def filterFilesR(patterns: String): String => Reader[RunSh, String] = files => + runSh => + runSh(s"grep -f $patterns", files)._2 def lineCountsR(files: String): Reader[RunSh, Array[Int]] = runSh => files.split("\n") - .map { file => runSh("wc -l $file", "")._2.replaceAll("^ +", "").split(" ")(0).toInt } + .map { file => + runSh("wc -l $file", "")._2.replaceAll("^ +", "").split(" ")(0).toInt } \end{lstlisting} This allows us to express \lstinline!getLineCount! as a combination of the three \textsf{``}procedures\textsf{''} by using the \lstinline!Reader! monad\textsf{'}s @@ -1759,27 +1774,28 @@ \subsection{The \texttt{Writer} monad} \begin{lstlisting} final case class Logs(begin: LocalDateTime, end: LocalDateTime, message: String) { def |+|(other: Logs): Logs = Logs(begin, other.end, message + "\n" + other.message) -} // For simplicity, we assume that timestamps will be monotonically increasing. +} // We assume that timestamps will be monotonically increasing. \end{lstlisting} The type \lstinline!Logs! is not a monoid because its binary operation discards some of the input data, so we cannot define an \textsf{``}empty\textsf{''} value satisfying the identity laws (see Eq.~(\ref{eq:identity-laws-of-monoid}) in Example~\ref{subsec:tc-Example-Monoids}). -We can now use the semi-monad \lstinline!Writer[A, Logs]!. Here are -some example computations: +We can now use the semi-monad \lstinline!Writer[A, Logs]!. Here is +some example code where we simulate long-running computations via +\lstinline!Thread.sleep!: \begin{lstlisting} type Logged[A] = Writer[A, Logs] -def log[A](message: String)(x: A): Logged[A] = { // Define this function for convenience. +def log[A](message: String)(x: A): Logged[A] = { // A helper function. val timestamp = LocalDateTime.now new Logged(x, Logs(timestamp, timestamp, message)) } -def compute[A](x: => A): A = { Thread.sleep(100L); x } // Simulate a long computation. +def compute[A](x: => A): A = { Thread.sleep(100L); x } scala> val result: Logged[Double] = for { - x <- log("begin with 3")(compute(3)) // The initial source type is `Logged[Int]`. + x <- log("begin with 3")(compute(3)) // The initial source type is `Logged[Int]`. y <- log("add 1")(compute(x + 1)) - z <- log("multiply by 2.0")(compute(y * 2.0)) // The type of result becomes `Logged[Double]`. + z <- log("multiply by 2.0")(compute(y * 2.0)) // The type of result becomes `Logged[Double]`. } yield z // The computation takes between 300ms and 400 ms. res0: Logged[Double] = Writer(8.0,Logs(2020-02-15T22:02:42.313,2020-02-15T22:02:42.484,begin with 3 add 1 @@ -1793,17 +1809,16 @@ \subsection{The \texttt{Writer} monad} \subsection{The \texttt{State} monad\label{subsec:The-State-monad}} -Heuristically, the \lstinline!Reader! monad $\text{Read}^{S,A}$ -is able to \textsf{``}read\textsf{''} values of type $S$, while the \lstinline!Writer! -monad $\text{Writer}^{A,S}$ may \textsf{``}write\textsf{''} values of type $S$, -in addition to computing the result of type $A$. The \lstinline!State! -monad, denoted by $\text{State}^{S,A}$, combines the functionality -of \lstinline!Reader! and \lstinline!Writer! in a special way: an -extra value of type $S$ is updated and automatically passed from -one computation to the next. +Heuristically, the effect of $\text{Reader}^{S,A}$ is to be able +to \textsf{``}read\textsf{''} values of type $S$, while the effect of $\text{Writer}^{A,S}$ +is to be able to \textsf{``}write\textsf{''} values of type $S$, in addition to computing +the result of type $A$. The \lstinline!State! monad, denoted by +$\text{State}^{S,A}$, combines the functionality of \lstinline!Reader! +and \lstinline!Writer! in a special way: an extra value of type $S$ +is updated and automatically passed from one computation to the next. To derive the required type constructor, consider a computation of -type $A\rightarrow B$ that additionally needs to read and to write +type $A\rightarrow B$ that additionally needs to read and to update a value of type $S$. Since the total input is a pair of $A$ and $S$, and the total output is a pair of $B$ and $S$, this kind of computation is represented by a function of type $A\times S\rightarrow B\times S$. @@ -1823,56 +1838,53 @@ \subsection{The \texttt{State} monad\label{subsec:The-State-monad}} passes the updated state value to the next computation: \begin{lstlisting} type State[S, A] = S => (A, S) -def flatMap[S, A, B](prev: State[S, A])(f: A => State[S, B]): State[S, B] = { s => - val (a, newState) = prev(s) // Compute result of type `A`, updating the state. - f(a)(newState) // Pass the updated state to the next computation. +def flatMap[S, A, B](p: State[S, A])(f: A => State[S, B]): State[S, B] = { s => + val (a, newState) = p(s) // Compute result of type `A` and the new state. + f(a)(newState) // Pass the new state to the next computation. } \end{lstlisting} -An example of using the \lstinline!State! monad is when implementing -a random number generator. A simple generator is the \textbf{Lehmer\textsf{'}s +An example of using the \lstinline!State! monad is for implementing +pseudo-random number generators. A simple such generator is the \textbf{Lehmer\textsf{'}s algorithm}\index{Lehmer\textsf{'}s algorithm},\footnote{See \texttt{\href{https://en.wikipedia.org/wiki/Lehmer_random_number_generator}{https://en.wikipedia.org/wiki/Lehmer\_random\_number\_generator}}} which generates integer sequences $x_{n}$ defined by: \[ x_{n+1}\triangleq\left(48271*x_{n}\right)\%\,(2^{31}-1)\quad,\quad\quad1\leq x_{n}\leq2^{31}-2\quad,\quad\quad n=0,1,2,... \] The \textsf{``}updating\textsf{''} function for this sequence, $x_{n+1}=\text{lehmer}\,(x_{n})$, -can be implemented as: +can be written as: \begin{lstlisting} def lehmer(x: Long): Long = (x * 48271L) % ((1L << 31) - 1L) \end{lstlisting} -In many applications, one needs uniformly distributed floating-point -numbers in the interval $\left[0,1\right]$. To produce such numbers, -let us define a helper function: -\begin{lstlisting} -def uniform(x: Long): Double = (x - 1L).toDouble / ((1L << 31) - 3L) // Enforce the interval [0, 1]. -\end{lstlisting} - -To use the uniform generator, we need to provide an initial value -$x_{0}$ (the \textsf{``}seed\textsf{''}) and then call the function \lstinline!lehmer! + To use the random number generator, we need to provide an initial +value $x_{0}$ (the \textsf{``}seed\textsf{''}) and then call the function \lstinline!lehmer! repeatedly on successive values. The code would look like this: \begin{lstlisting} val s0 = 123456789L // A "seed" value. val s1 = lehmer(s0) -val r1 = uniform(s1) -... // Use pseudo-random value r1. +... // Use pseudo-random value s1. val s2 = lehmer(s1) -val r2 = uniform(s2) -... // Use pseudo-random value r2. +... // Use pseudo-random value s2. val s3 = lehmer(s2) // And so on. \end{lstlisting} -We need to keep track of the generator\textsf{'}s state values \lstinline!s1!, -\lstinline!s2!, ..., that are not directly needed for other computations. -This \textsf{``}book\nobreakdash-keeping\textsf{''} is error-prone since we might -reuse a previous generator state by mistake. The \lstinline!State! -monad keeps track of the updated state values automatically and correctly. -This comes at a cost: we need to convert all computations into \lstinline!State!-typed -monadic programs. +We need to keep track of the generator\textsf{'}s successive state values \lstinline!s1!, +\lstinline!s2!, etc., and never reuse older values. This \textsf{``}book\nobreakdash-keeping\textsf{''} +is error-prone since we might by mistake reuse a previous generator +state. The \lstinline!State! monad keeps track of the updated state +values automatically and correctly. This comes at a cost: we need +to convert all computations into \lstinline!State!-typed monadic +programs. -As a simple example, consider the task of generating uniformly distributed -floating-point numbers in the interval $\left[0,1\right]$. We need -to maintain the generator state while computing the result. The floating-point -generator is implemented as a monadic value of type \lstinline!State[Long, Double]!: +As an example, consider the task of generating uniformly distributed +floating-point numbers in the interval $\left[0,1\right]$. To produce +such numbers, define a helper function: +\begin{lstlisting} +def uniform(x: Long): Double = (x - 1L).toDouble / ((1L << 31) - 3L) // Enforce the interval [0, 1]. +\end{lstlisting} + +We need to maintain the generator state while computing the result. +The floating-point generator is implemented as a monadic value of +type \lstinline!State[Long, Double]!: \begin{lstlisting} val rngUniform: State[Long, Double] = { oldState => val result = uniform(oldState) @@ -1919,7 +1931,7 @@ \subsection{The eager/lazy evaluation monad\label{subsec:The-eager-lazy-evaluati The first of these monads is called \lstinline!Eval!, and its task is to encapsulate lazy and eager evaluations into a single type. A value of type \lstinline!Eval[A]! can be eager (available now) or -lazy (available later). Values of these sub-types can be combined +lazy (available later). Values of these sub-types should be combined with correct logic: for instance, a combination of eager and lazy values automatically becomes lazy. @@ -1992,9 +2004,8 @@ \subsection{The continuation monad\label{subsec:The-continuation-monad}} \end{lstlisting} To make the pattern more clear, replace the constant \lstinline!10! by a function \lstinline!pure! with a callback argument: -\begin{lstlisting} -def pure(x: Int)(callback: Int => Unit): Unit = - callback(x) +\begin{lstlisting}[numbers=left] +def pure(x: Int)(callback: Int => Unit): Unit = callback(x) def result(callback: Int => Unit): Unit = pure(10) { x => @@ -2010,9 +2021,9 @@ \subsection{The continuation monad\label{subsec:The-continuation-monad}} it as the last curried argument. The final result of the calculation is not returned by the function \lstinline!result! but is available only as the bound variable \lstinline!z! in a deeply nested function -scope where \lstinline!callback(z)! is run. If we need to extend -this program with some more calculation steps, we would have to add -extra code within the deepest-nested scope of \lstinline!result!, +scope where \lstinline!callback(z)! is run (line 7 above). If we +need to extend this program with some more calculation steps, we would +have to add extra code within the deepest-nested scope of \lstinline!result!, or we could pass a \lstinline!callback! argument that contains further steps. @@ -2061,7 +2072,7 @@ \subsection{The continuation monad\label{subsec:The-continuation-monad}} It is sometimes helpful if the callback returns a more informative value than \lstinline!Unit!. For instance, that value could show error information or give access to processes that were scheduled -concurrently. So, we generalize the type constructor $L^{A}$ to $\left(A\rightarrow R\right)\rightarrow R$, +concurrently. So, we generalize the type constructor $L$ to $\left(A\rightarrow R\right)\rightarrow R$, where $R$ is a fixed \textsf{``}result\textsf{''} type. This type constructor is called the \textbf{continuation monad}\index{monads!continuation monad (Cont)@continuation monad (\texttt{Cont})} and is denoted by $\text{Cont}^{R,A}\triangleq\left(A\rightarrow R\right)\rightarrow R$. @@ -2123,10 +2134,10 @@ \subsection{The continuation monad\label{subsec:The-continuation-monad}} \subsubsection{Example \label{subsec:Example-continuation-monad-computation-cost}\ref{subsec:Example-continuation-monad-computation-cost}\index{examples (with code)}} Suppose that each arithmetic operation, such as \lstinline!add3! -or \lstinline!mult4!, has a certain cost, which is a value of a monoid -type $W$. Use the monad \lstinline!Cont[W, A]! to implement computations -with a cost. The total cost must add up automatically when computations -are chained using \lstinline!flatMap!. +or \lstinline!mult4!, has a certain cost represented by a value of +a monoid type $W$. Use the monad \lstinline!Cont[W, A]! to implement +computations with a cost. The total cost must add up automatically +when computations are chained using \lstinline!flatMap!. \subparagraph{Solution} @@ -2164,7 +2175,8 @@ \subsubsection{Example \label{subsec:Example-continuation-monad-computation-cost time. Instead, we can easily implement a function that adds a given cost to any given monadic operation: \begin{lstlisting} -def addCost[A](c: Cont[W, A], cost: W): Cont[W, A] = { callback => c(callback) |+| cost } +def addCost[A](c: Cont[W, A], cost: W): Cont[W, A] = { callback => + c(callback) |+| cost } \end{lstlisting} @@ -2292,7 +2304,7 @@ \subsubsection{Exercise \label{subsec:Exercise-monads-p1-1}\ref{subsec:Exercise- \subsubsection{Exercise \label{subsec:Exercise-monads-p1-2}\ref{subsec:Exercise-monads-p1-2}} -Solve the $n$-queens problem on an $3\times3\times3$ cube. +Solve the $n$-queens problem on a $3\times3\times3$ cube. \subsubsection{Exercise \label{subsec:Exercise-monads-p1-4}\ref{subsec:Exercise-monads-p1-4}} @@ -2327,7 +2339,7 @@ \subsubsection{Exercise \label{subsec:Exercise-monads-p1-7}\ref{subsec:Exercise- Find the largest prime number below $1000$ via a simple sieve of Eratosthenes.\footnote{See \texttt{\href{https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes}{https://en.wikipedia.org/wiki/Sieve\_of\_Eratosthenes}}} -Use the \lstinline!State[S, Int]! monad with \lstinline!S = Array[Boolean]!. +Use the monad type \lstinline!State[S, A]! with \lstinline!S = Array[Boolean]!. \begin{comment} in this part of the tutorial I will talk about Mona\textsf{'}s and semi Mona\textsf{'}s @@ -4110,12 +4122,12 @@ \subsection{Motivating the semi-monad laws} functor block programs with multiple source lines. What properties do we intuitively expect such programs to have? -When functor blocks describe iterations over data collections, a source -line \lstinline!x <- c! means that the value of \lstinline!x! iterates +In a functor block that iterates over data collections, a source line +\lstinline!x <- c! means that the value of \lstinline!x! iterates over items in the collection \lstinline!c!. An assignment line \lstinline!y = f(x)! means that we define a local variable \lstinline!y! to equal the -expression \lstinline!f(x)!. We expect to get the same result by -iterating over a collection \lstinline!c! whose values \lstinline!x! +expression \lstinline!f(x)!. We expect to get the same result if +\lstinline!y! iterates over a collection whose values \lstinline!x! were replaced by \lstinline!f(x)!, i.e., by iterating over \lstinline!c.map(f)!. It means that the following two code fragments should always give the same results: @@ -4222,9 +4234,9 @@ \subsection{Motivating the semi-monad laws} method. Let us now formulate these requirements as equations (or \textsf{``}laws\textsf{''}). For brevity, we will denote the \lstinline!flatMap! method for a -semi-monad \lstinline!S[_]! by \textsf{``}$\text{flm}$\textsf{''}. We will write -$\text{flm}_{S}$ when we want to indicate explicitly the type constructor -($S$) being used. The type signature is: +semi-monad \lstinline!S! by \textsf{``}$\text{flm}$\textsf{''}. We will write $\text{flm}_{S}$ +when we want to indicate explicitly the type constructor ($S$) being +used. The type signature is: \begin{lstlisting} def flm[A, B](f: A => S[B]): S[A] => S[B] \end{lstlisting} @@ -4244,17 +4256,17 @@ \subsection{Motivating the semi-monad laws} by omitting the argument \lstinline!c!: \begin{equation} -\text{flm}\,(f^{:A\rightarrow B}\bef g^{:B\rightarrow S^{C}})=f^{\uparrow S}\bef\text{flm}\,(g)\quad.\label{eq:left-naturality-law-flatMap} +\text{flm}^{A,C}(f^{:A\rightarrow B}\bef g^{:B\rightarrow S^{C}})=f^{\uparrow S}\bef\text{flm}^{B,C}(g)\quad.\label{eq:left-naturality-law-flatMap} \end{equation} \noindent This equation holds for arbitrary $f^{:A\rightarrow B}$ and $g^{:B\rightarrow S^{C}}$. This is a \textsf{``}\textbf{left naturality} law\textsf{''}\index{naturality law!of flatMap@of \texttt{flatMap}} of \lstinline!flatMap! since it exchanges the order of lifted functions to the \emph{left} -of \lstinline!flatMap!. More precisely, we may call this equation -the naturality law of \lstinline!flatMap[A, B]! \textsf{``}with respect to -\lstinline!A!\textsf{''} since $f^{\uparrow S}$ acts on the type parameter -\lstinline!A!. +of \lstinline!flatMap!. We also call this equation the naturality +law of \lstinline!flatMap[A, C]! \textsf{``}with respect to \lstinline!A!\textsf{''} +since the law operates on the type parameter \lstinline!A! while +\lstinline!C! is unchanged. The second law holds for arbitrary $f^{:A\rightarrow S^{B}}$ and $g^{:B\rightarrow C}$: @@ -4268,11 +4280,11 @@ \subsection{Motivating the semi-monad laws} \] \begin{equation} -\text{flm}\,(f^{:A\rightarrow S^{B}}\bef g^{\uparrow S})=\text{flm}\,(f)\bef g^{\uparrow S}\quad.\label{eq:right-naturality-law-flatMap} +\text{flm}^{A,C}(f^{:A\rightarrow S^{B}}\bef g^{\uparrow S})=\text{flm}^{A,B}(f)\bef g^{\uparrow S}\quad.\label{eq:right-naturality-law-flatMap} \end{equation} -\noindent This is a \index{naturality law!of flatMap@of \texttt{flatMap}}\textbf{right -naturality} law or \textsf{``}naturality with respect to \lstinline!B!\textsf{''} +\noindent This is the \index{naturality law!of flatMap@of \texttt{flatMap}}\textbf{right +naturality} law or the \textsf{``}naturality law with respect to \lstinline!B!\textsf{''} of \lstinline!flatMap[A, B]!. It manipulates a lifted function $g^{\uparrow S}$ to the right of \lstinline!flatMap!, acting on the type parameter \lstinline!B!. @@ -4287,7 +4299,7 @@ \subsection{Motivating the semi-monad laws} } \] \begin{equation} -\text{flm}\,\big(f^{:A\rightarrow S^{B}}\bef\text{flm}\,(g^{:B\rightarrow S^{C}})\big)=\text{flm}\left(f\right)\bef\text{flm}\left(g\right)\quad.\label{eq:associativity-law-flatMap} +\text{flm}^{A,C}\big(f^{:A\rightarrow S^{B}}\bef\text{flm}^{B,C}(g^{:B\rightarrow S^{C}})\big)=\text{flm}^{A,B}(f)\bef\text{flm}^{B,C}(g)\quad.\label{eq:associativity-law-flatMap} \end{equation} This equation is called the \textbf{associativity law}\index{associativity law!of flatMap@of \texttt{flatMap}} of \lstinline!flatMap!, for reasons we will explain later. @@ -4302,22 +4314,22 @@ \subsection{Motivating the semi-monad laws} At this point, the three laws of semi-monads may appear complicated and hard to understand and to verify. In the next subsections, we will derive a shorter and clearer formulation of those laws. For now, -let us define a \lstinline!Semi-monad! typeclass\index{typeclass!Semi-monad@\texttt{Semi-monad}} +let us define a \lstinline!Semimonad! typeclass\index{typeclass!Semi-monad@\texttt{Semi-monad}} and test the laws using the \lstinline!scalacheck! library:\index{scalacheck library@\texttt{scalacheck} library}\index{verifying laws with scalacheck@verifying laws with \texttt{scalacheck}} \begin{lstlisting} -abstract class Semi-monad[F[_]: Functor] { +abstract class Semimonad[F[_]: Functor] { def flatMap[A, B](fa: F[A])(f: A => F[B]): F[B] } -implicit class Semi-monadOps[F[_]: Semi-monad, A](fa: F[A]) { // Define flatMap as an extension method. - def flatMap[B](f: A => F[B]): F[B] = implicitly[Semi-monad[F]].flatMap(fa)(f) +implicit class SemimonadOps[F[_]: Semimonad, A](fa: F[A]) { // Define flatMap as an extension method. + def flatMap[B](f: A => F[B]): F[B] = implicitly[Semimonad[F]].flatMap(fa)(f) } -def checkSemi-monadLaws[F[_], A, B, C]()(implicit ff: Semi-monad[F], // Use the `Arbitrary` typeclass - fa: Arbitrary[F[A]], ab: Arbitrary[A => F[B]], bc: Arbitrary[B => F[C]]) = { // from `scalacheck`. - forAll { (f: A => F[B], g: B => F[C], fa: F[A]) => // Associativity law of flatMap. +def checkSemimonadLaws[F[_], A, B, C]()(implicit ff: Semimonad[F], // Use the `Arbitrary` typeclass from `scalacheck`: + fa: Arbitrary[F[A]], ab: Arbitrary[A => F[B]], bc: Arbitrary[B => F[C]]) = { + forAll { (f: A => F[B], g: B => F[C], fa: F[A]) => // Associativity law of flatMap. fa.flatMap(x => f(x).flatMap(g)) shouldEqual fa.flatMap(f).flatMap(g) } -} // Assuming that a Semi-monad instance was defined for Seq[_], check the laws with specific A, B, C. -checkSemi-monadLaws[Seq, Int, String, Double]() +} // Assuming that a Semimonad instance was defined for Seq, check the laws +checkSemimonadLaws[Seq, Int, String, Double]() // with specific A, B, C. \end{lstlisting} @@ -4325,12 +4337,12 @@ \subsection{The laws of \texttt{flatten}} In Section~\ref{subsec:Simplifying-the-filtering-laws-deflate} we simplified the laws of the \lstinline!filter! operation by passing -to a simpler \lstinline!deflate! function. We then showed that these -two functions are equivalent if certain laws are assumed to hold for -\lstinline!filter!. We will now derive a similar relationship between -the methods \lstinline!flatMap! and \lstinline!flatten!. We will -see that \lstinline!flatten! has fewer laws, and that its laws are -simpler to verify. +to a simpler \lstinline!deflate! function. We then showed that \lstinline!filter! +and \lstinline!deflate! are equivalent if certain laws are assumed +to hold for \lstinline!filter!. We will now derive a similar relationship +between the methods \lstinline!flatMap! and \lstinline!flatten!. +We will see that \lstinline!flatten! (denoted by \textsf{``}$\text{ftn}$\textsf{''}) +has fewer laws, and that its laws are simpler to verify. By definition, \lstinline!flatMap! is expressed as a composition of \lstinline!map! and \lstinline!flatten!: @@ -4372,8 +4384,8 @@ \subsubsection{Statement \label{subsec:Statement-flatten-equivalent-to-flatMap}\ \textbf{(1)} Start with an arbitrary function $p:S^{S^{A}}\rightarrow S^{A}$ and define $q(f)\triangleq f^{\uparrow S}\bef p$ (as we would define \lstinline!flatMap! via \lstinline!flatten!). Then define a new -function $p^{\prime}\triangleq q\left(\text{id}\right)$. Prove that -$p^{\prime}=p$: +function $p^{\prime}\triangleq q\left(\text{id}\right)$. To verify +that $p^{\prime}=p$, write: \[ p^{\prime}=q\left(\text{id}\right)=\gunderline{\text{id}^{\uparrow S}}\bef p=\text{id}\bef p=p\quad. \] @@ -4387,8 +4399,8 @@ \subsubsection{Statement \label{subsec:Statement-flatten-equivalent-to-flatMap}\ \textbf{(2)} Start with an arbitrary function $q:\forall(A,B).\,(A\rightarrow S^{B})\rightarrow S^{A}\rightarrow S^{B}$ that satisfies Eq.~(\ref{eq:left-naturality-law-flatMap}) and define $p\triangleq q\left(\text{id}\right)$. Then define a new function -$q^{\prime}(f)\triangleq f^{\uparrow S}\bef\text{ftn}$. Prove that -$q^{\prime}(f)=q(f)$ for any $f^{:A\rightarrow S^{B}}$: +$q^{\prime}(f)\triangleq f^{\uparrow S}\bef p$. To verify that $q^{\prime}(f)=q(f)$ +for any $f^{:A\rightarrow S^{B}}$: \begin{align*} {\color{greenunder}\text{expect to equal }q(f):}\quad & q^{\prime}(f)=f^{\uparrow S}\bef p=f^{\uparrow S}\bef q\left(\text{id}\right)\\ {\color{greenunder}\text{left naturality law of }q:}\quad & =q(f\bef\text{id})=q(f)\quad. @@ -4400,12 +4412,13 @@ \subsubsection{Statement \label{subsec:Statement-flatten-has-2-laws}\ref{subsec: If a \lstinline!flatMap! function satisfies the three laws~(\ref{eq:left-naturality-law-flatMap})\textendash (\ref{eq:associativity-law-flatMap}), the corresponding \lstinline!flatten! function\index{naturality law!of flatten@of \lstinline!flatten!} defined as $\text{ftn}\triangleq\text{flm}\left(\text{id}\right)$ -satisfies its \emph{two} laws, with an arbitrary $f^{:A\rightarrow B}$: +satisfies the following \emph{two} laws (for an arbitrary $f^{:A\rightarrow B}$): \begin{align} -{\color{greenunder}\text{naturality law of }\text{ftn}:}\quad & f^{\uparrow S\uparrow S}\bef\text{ftn}=\text{ftn}\bef f^{\uparrow S}\quad,\label{eq:naturality-law-of-flatten}\\ -{\color{greenunder}\text{associativity law of }\text{ftn}:}\quad & \text{ftn}^{\uparrow S}\bef\text{ftn}=\text{ftn}\bef\text{ftn}\quad.\label{eq:associativity-law-of-flatten} +{\color{greenunder}\text{naturality law of \texttt{flatten}}:}\quad & f^{\uparrow S\uparrow S}\bef\text{ftn}=\text{ftn}\bef f^{\uparrow S}\quad,\label{eq:naturality-law-of-flatten}\\ +{\color{greenunder}\text{associativity law of \texttt{flatten}}:}\quad & \text{ftn}^{\uparrow S}\bef\text{ftn}=\text{ftn}\bef\text{ftn}\quad.\label{eq:associativity-law-of-flatten} \end{align} -The following type diagrams illustrate these laws: +The following type diagrams show the type parameters used in these +laws: \vspace{-1.5\baselineskip} \begin{minipage}[t]{0.45\columnwidth}% @@ -4502,12 +4515,13 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad}\ref{subse The standard Scala types \lstinline!Either! and \lstinline!Try! are examples of the monad $F^{A}\triangleq Z+A$, where $Z$ is a -fixed type. Show that this monad satisfies the associativity law. +fixed type. Show that this monad satisfies the associativity law, +assuming a fully parametric implementation. \subparagraph{Solution} -The type signature of \lstinline!flatten! is $\text{ftn}:Z+\left(Z+A\right)\rightarrow Z+A$, -and its code is: +The type signature of \lstinline!flatten! is $Z+\left(Z+A\right)\rightarrow Z+A$; +the code is: \begin{lstlisting} def flatten[A]: Either[Z, Either[Z, A]] => Either[Z, A] = { case Left(z) => Left(z) @@ -4523,17 +4537,15 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad}\ref{subse A & \bbnum 0 & \text{id} \end{array}\quad. \] +Both sides of the associativity law are fully parametric functions +with the type signature $Z+Z+Z+A\rightarrow Z+A$. This type signature +has \emph{only one} fully parametric implementation: since it is not +possible to produce values of unknown types $A$ and $Z$ from scratch, +an implementation of $Z+Z+Z+A\rightarrow Z+A$ must return $Z+\bbnum 0$ +when the input contains a value of type $Z$; otherwise it must return +$\bbnum 0+A$. So, both sides of the law must have the same code. -Since \lstinline!flatten! is fully parametric, both sides of the -associativity law are fully parametric functions with the type signature -$Z+Z+Z+A\rightarrow Z+A$. This type signature has \emph{only one} -fully parametric implementation: since it is not possible to produce -values of unknown types $A$ and $Z$ from scratch, an implementation -of $Z+Z+Z+A\rightarrow Z+A$ must return $Z+\bbnum 0$ when the input -contains a value of type $Z$; otherwise it must return $\bbnum 0+A$. -So, both sides of the law must have the same code. - -To make this argument rigorous, we may use the Curry-Howard correspondence +To make this argument rigorous, we could use the Curry-Howard correspondence and the LJT algorithm (see Section~\ref{app:The-LJT-algorithm}). Instead, let us verify the associativity law by an explicit derivation. First, we need to lift \lstinline!flatten! to the functor $F$. The @@ -4551,8 +4563,8 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad}\ref{subse A & \bbnum 0 & \bbnum 0 & \text{id} \end{array}\quad. \] -For comparison, the Scala code for $\text{ftn}^{\uparrow F}$ (had -we needed to write it) would look like this: +For comparison, the Scala code for $\text{ftn}^{\uparrow F}$ looks +like this: \begin{lstlisting} def fmapFlatten[A]: Either[Z, Either[Z, Either[Z, A]]] => Either[Z, Either[Z, A]] = { case Left(z) => Left(z) @@ -4660,16 +4672,16 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad-1-1}\ref{s Flattening that result gives a list of all values $x_{ij}$, $y_{ij}$, ..., in the order they appear in $p$: \[ -p\triangleright\text{ftn}^{\uparrow\text{List}}\triangleright\text{ftn}=\left[x_{11},x_{12},...,x_{21},x_{22},...,y_{11},y_{12},...,y_{21},y_{22},...,...\right]\quad. +p\triangleright\text{ftn}^{\uparrow\text{List}}\triangleright\text{ftn}^{A}=\left[x_{11},x_{12},...,x_{21},x_{22},...,y_{11},y_{12},...,y_{21},y_{22},...,...\right]\quad. \] -Applying $\text{ftn}^{\text{List}^{A}}$ to $p$ will flatten the -outer lists: +Applying $\text{ftn}^{\text{List}^{A}}$ to $p$ will first flatten +the outer lists: \[ p\triangleright\text{ftn}^{\text{List}^{A}}=\left[\left[x_{11},x_{12},...\right],\left[x_{21},x_{22},...\right],...,\left[y_{11},y_{12},...\right],\left[y_{21},y_{22},...\right],...\right]\quad. \] -Flattening that value results in: +Flattening that value again will give: \[ -p\triangleright\text{ftn}^{\text{List}^{A}}\triangleright\text{ftn}=\left[x_{11},x_{12},...,x_{21},x_{22},...,y_{11},y_{12},...,y_{21},y_{22},...,...\right]\quad. +p\triangleright\text{ftn}^{\text{List}^{A}}\triangleright\text{ftn}^{A}=\left[x_{11},x_{12},...,x_{21},x_{22},...,y_{11},y_{12},...,y_{21},y_{22},...,...\right]\quad. \] This is exactly the same as the result of $p\triangleright\text{ftn}^{\uparrow\text{List}}\triangleright\text{ftn}$: namely, the list of all values in the order they appear in $p$. @@ -4756,7 +4768,7 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad-1-1}\ref{s & =\gunderline{\left(1+\bbnum 0\right)\pplus}\big(t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\bef\text{ftn}^{A}\big)\\ {\color{greenunder}\text{concatenate with empty list}:}\quad & =t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\triangleright\text{ftn}^{A}\quad. \end{align*} -Substituting this value $h$ into $\text{ftn}^{\uparrow\text{List}}\bef\text{ftn}^{A}$, +Substituting the same value $h$ into $\text{ftn}^{\uparrow\text{List}}\bef\text{ftn}^{A}$, we get: \[ \left(\bbnum 0+\left(1+\bbnum 0\right)\times t\right)\triangleright\text{ftn}^{\uparrow\text{List}}\bef\text{ftn}^{A}=t\triangleright\overline{\text{ftn}^{\overline{\uparrow\text{List}}}}\triangleright\overline{\text{ftn}}\quad. @@ -4767,14 +4779,14 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad-1-1}\ref{s \] This holds by the inductive assumption. -It remains to examine the second possibility, $h=\bbnum 0+g\times k$: +It remains to examine the second possibility ($h=\bbnum 0+g\times k$): \begin{align*} {\color{greenunder}\text{with }h=\bbnum 0+g\times k:}\quad & \left(\bbnum 0+\left(\bbnum 0+g\times k\right)\times t\right)\triangleright\text{ftn}^{\text{List}^{A}}\bef\text{ftn}^{A}\\ & =\big(\left(\bbnum 0+g\times k\right)\pplus\big(t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\big)\big)\triangleright\text{ftn}^{A}\\ {\color{greenunder}\text{code of }\pplus:}\quad & =\big(\bbnum 0+g\times\big(k\pplus\big(t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\big)\big)\big)\triangleright\text{ftn}^{A}\\ {\color{greenunder}\text{code of }\text{ftn}^{A}:}\quad & =g\pplus\big(k\pplus\big(t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\big)\big)\triangleright\overline{\text{ftn}}\\ {\color{greenunder}\text{Exercise~\ref{subsec:Exercise-flatten-concat-distributive-law}}:}\quad & =\gunderline{g\pplus\big(k\triangleright\overline{\text{ftn}}\big)}\pplus\big(t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\triangleright\overline{\text{ftn}}\big)\\ - & =\left(\bbnum 0+g\times k\right)\triangleright\overline{\text{ftn}}\pplus\big(\gunderline{t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\triangleright\overline{\text{ftn}}}\big)\\ + & =\left(\bbnum 0+g\times k\right)\triangleright\overline{\text{ftn}}\,\pplus\big(\gunderline{t\triangleright\overline{\text{ftn}^{\text{List}^{A}}}\triangleright\overline{\text{ftn}}}\big)\\ {\color{greenunder}\text{inductive assumption}:}\quad & =\text{ftn}\,(h)\pplus\big(t\triangleright\overline{\text{ftn}^{\uparrow\text{List}}}\triangleright\overline{\text{ftn}}\big)\quad. \end{align*} This is the same as the result of substituting $\bbnum 0+h\times t$ @@ -4817,8 +4829,7 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad-2}\ref{sub \[ \text{ftn}^{\uparrow F}=\left(\left(a\times w_{1}\right)\times w_{2}\right)\times w_{3}\rightarrow\left(\left(\left(a\times w_{1}\right)\times w_{2}\right)\triangleright\text{ftn}\right)\times w_{3}=\left(a\times\left(w_{1}\oplus w_{2}\right)\right)\times w_{3}\quad. \] -To verify the associativity law, it is convenient to substitute a -value $\left(\left(a\times w_{1}\right)\times w_{2}\right)\times w_{3}$ +To verify the associativity law, substitute a value $\left(\left(a\times w_{1}\right)\times w_{2}\right)\times w_{3}$ of type $F^{F^{F^{A}}}$ into both sides of the law: \begin{align*} & \left(\left(\left(a\times w_{1}\right)\times w_{2}\right)\times w_{3}\right)\triangleright\text{ftn}^{\uparrow F}\triangleright\text{ftn}=\left(\left(a\times\left(w_{1}\oplus w_{2}\right)\right)\times w_{3}\right)\triangleright\text{ftn}\\ @@ -4826,8 +4837,8 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad-2}\ref{sub & \left(\left(\left(a\times w_{1}\right)\times w_{2}\right)\times w_{3}\right)\triangleright\text{ftn}^{F^{A}}\triangleright\text{ftn}=\left(\left(a\times w_{1}\right)\times\left(w_{2}\oplus w_{3}\right)\right)\triangleright\text{ftn}\\ & \quad=a\times\left(w_{1}\oplus\left(w_{2}\oplus w_{3}\right)\right)\quad. \end{align*} -The operation $\oplus$ is associative since $W$ is a semigroup. -So, both sides of the law are equal. +The operation $\oplus$ is associative because, by assumption, $W$ +is a lawful semigroup. So, both sides of the law are equal. \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad-4}\ref{subsec:Example-flatten-verify-for-monad-4}} @@ -4851,16 +4862,16 @@ \subsubsection{Example \label{subsec:Example-flatten-verify-for-monad-4}\ref{sub Had the code not exchanged the order of $w_{1}$ and $w_{2}$, the law would have held. -\subsection{From semi-monads to monads: Motivating the identity laws} +\subsection{From semi-monads to monads: The identity laws} Semi-monads are heuristically viewed as values with a special \textsf{``}computational effect\textsf{''}. Semi-monad-valued computations can be composed using the -\lstinline!flatMap! method, which will \textsf{``}merge\textsf{''} the effects associatively. -It is generally useful to be able to create values with an \textsf{``}empty -effect\textsf{''}, such that merging the empty effect leaves other effects -unchanged. A monad $M$ is a semi-monad that has a method for creating -values with \textsf{``}empty effect\textsf{''}. That method is called \lstinline!pure! -(notation $\text{pu}_{M}$): +\lstinline!flatMap! method, which will \textsf{``}merge\textsf{''} the effects. It +is generally useful to be able to create values with an \textsf{``}empty effect\textsf{''}, +such that merging the empty effect leaves other effects unchanged. +A monad $M$ is a semi-monad that has a method for creating values +with \textsf{``}empty effect\textsf{''}. That method is called \lstinline!pure! (notation +$\text{pu}_{M}$): \begin{wrapfigure}{l}{0.475\columnwidth}% \vspace{-0.4\baselineskip} @@ -4877,12 +4888,12 @@ \subsection{From semi-monads to monads: Motivating the identity laws} To get intuition about the properties of a vaguely defined \textsf{``}empty effect\textsf{''}, again consider nested iteration over arrays. The \textsf{``}empty -effect\textsf{''} is an array containing \emph{one} element, because an iteration -of such an array goes over a single value, which is equivalent to -no iteration. In a functor block, this intuition says that a source -line with an \textsf{``}empty effect\textsf{''}, \lstinline!y <- pure(x)!, should -be equivalent to just \lstinline!y = x!. This line must occur either -before or after another source line. So, we need to examine two situations: +effect\textsf{''} is an array containing \emph{one} element. An iteration +over such an array will just need to process that single value. In +a functor block, this intuition says that a source line with an \textsf{``}empty +effect\textsf{''}, \lstinline!y <- pure(x)!, should be equivalent to just +\lstinline!y = x! with no iteration. This line may occur either before +or after another source line. So, we need to examine two situations: first, when an empty effect comes before another source line: \noindent \texttt{\textcolor{blue}{\footnotesize{}}}% @@ -4892,8 +4903,8 @@ \subsection{From semi-monads to monads: Motivating the identity laws} result1 = for { ... // Some code, then: y <- pure(x) // Empty effect, x:A - z <- p(y) // p: A => M[B] - // Same as z <- pure(x).flatMap(p) + z <- g(y) // g: A => M[B] + // Same as z <- pure(x).flatMap(g) \end{lstlisting} % \end{minipage}\texttt{\textcolor{blue}{\footnotesize{}\hspace*{\fill}}}% @@ -4903,8 +4914,8 @@ \subsection{From semi-monads to monads: Motivating the identity laws} result2 = for { ... // Some code, then: y = x // x: A - z <- p(y) // p: A => M[B] - // Same as z <- p(x) + z <- g(y) // g: A => M[B] + // Same as z <- g(x) \end{lstlisting} % \end{minipage}{\footnotesize\par} @@ -4948,7 +4959,7 @@ \subsection{From semi-monads to monads: Motivating the identity laws} Then the equality of \lstinline!result1! and \lstinline!result2! gives the law: \begin{lstlisting} -g.flatMap(pure) == g +p.flatMap(pure) == p \end{lstlisting} \begin{align} {\color{greenunder}\text{right identity law of }M:}\quad & \text{flm}_{M}(\text{pu}_{M})=\text{id}^{:M^{A}\rightarrow M^{A}}\quad.\label{eq:monad-right-identity-law-for-flatMap} @@ -4965,8 +4976,8 @@ \subsection{From semi-monads to monads: Motivating the identity laws} forAll { (x: A, g: A => F[B]) => mf.pure(x).flatMap(g) shouldEqual g(x) // Left identity law. } - forAll { (fa: F[A]) => - fa.flatMap(mf.pure[A]) shouldEqual fa // Right identity law. + forAll { (p: F[A]) => + p.flatMap(mf.pure[A]) shouldEqual p // Right identity law. } } \end{lstlisting} @@ -4976,8 +4987,8 @@ \subsection{From semi-monads to monads: Motivating the identity laws} So, we could say that a monad is a pointed semi-monad whose \lstinline!pure! method obeys the two identity laws~(\ref{eq:monad-left-identity-law-for-flatMap})\textendash (\ref{eq:monad-right-identity-law-for-flatMap}). Although the \lstinline!pure! method can be replaced by a simpler -\textsf{``}wrapped unit\textsf{''} value ($\text{wu}_{M}$), derivations turn out -to be easier when using \lstinline!pure!. +\textsf{``}wrapped unit\textsf{''} value ($\text{wu}_{M}$), derivations are easier +when using \lstinline!pure!. The \lstinline!Pointed! typeclass requires the \lstinline!pure! method to satisfy the naturality law~(\ref{eq:naturality-law-of-pure}). @@ -4991,9 +5002,9 @@ \subsection{From semi-monads to monads: Motivating the identity laws} \subsection{The monad identity laws in terms of \texttt{pure} and \texttt{flatten}} -Since the laws of semi-monads are simpler when formulated via the -\lstinline!flatten! method, let us convert the identity laws to that -form. We use the code for \lstinline!flatMap! in terms of \lstinline!flatten!: +The laws of semi-monads are simpler when formulated via \lstinline!flatten!, +so let us convert the identity laws to that form. We use the code +for \lstinline!flatMap! in terms of \lstinline!flatten!: \[ \text{flm}_{M}(f^{:A\rightarrow M^{B}})=f^{\uparrow M}\bef\text{ftn}_{M}\quad. \] @@ -5016,9 +5027,9 @@ \subsection{The monad identity laws in terms of \texttt{pure} and \texttt{flatte \vspace{-0.5\baselineskip} \noindent This is the \textbf{left identity law} of \lstinline!flatten!. -Conversely, if Eq.~(\ref{eq:left-identity-law-for-flatten}) holds, -we can compose both sides with an arbitrary function $f^{:A\rightarrow M^{B}}$ -and recover the left identity law of \lstinline!flatMap! (Exercise~\ref{subsec:Exercise-1-monads-2}). +If Eq.~(\ref{eq:left-identity-law-for-flatten}) holds, we can compose +both sides with an arbitrary function $f^{:A\rightarrow M^{B}}$ and +recover the left identity law of \lstinline!flatMap! (Exercise~\ref{subsec:Exercise-1-monads-2}). The \index{identity laws!of pure and flatten@of \texttt{pure} and \texttt{flatten}}\textbf{right identity law} of \lstinline!flatten! is written as: @@ -5031,8 +5042,7 @@ \subsection{The monad identity laws in terms of \texttt{pure} and \texttt{flatte & \text{flm}_{M}(\text{pu}_{M})=\text{pu}_{M}^{\uparrow M}\bef\text{ftn}_{M}\overset{!}{=}\text{id}\quad.\label{eq:right-identity-law-for-flatten} \end{align} -In the next section, we will see reasons why these laws have their -names. +In the next section, we will give reasons for the names of these laws. \subsection{Monad laws in terms of Kleisli functions} @@ -5042,14 +5052,14 @@ \subsection{Monad laws in terms of Kleisli functions} when deriving the laws of filterable functors using the \lstinline!liftOpt! method. At that point, $M$ was the simple \lstinline!Option! monad. We found that functions of type $A\rightarrow\bbnum 1+B$ can be composed -using the Kleisli composition denoted by $\diamond_{_{\text{Opt}}}$ +using the Kleisli composition operator denoted by $\diamond_{_{\text{Opt}}}$ (see page~\pageref{kleisli-composition}). Later, Section~\ref{subsec:Generalizing-the-laws-of-liftings-kleisli-functions} stated the general properties of Kleisli composition. We will now show that the Kleisli composition gives a useful way of formulating -the laws of a monad. +the monad laws. The Kleisli composition\index{Kleisli composition} operation for -a monad $M$, denoted $\diamond_{_{M}}$, is a function with type +a monad $M$, denoted by $\diamond_{_{M}}$, is a function with type signature: \[ \diamond_{_{M}}:(A\rightarrow M^{B})\rightarrow(B\rightarrow M^{C})\rightarrow A\rightarrow M^{C}\quad. @@ -5087,34 +5097,36 @@ \subsection{Monad laws in terms of Kleisli functions} \subsubsection{Statement \label{subsec:Statement-identity-laws-for-kleisli}\ref{subsec:Statement-identity-laws-for-kleisli}} For a lawful monad $M$, the Kleisli composition $\diamond_{_{M}}$ -satisfies the identity laws: +satisfies the identity laws: for any $f^{:A\rightarrow M^{B}}$, \begin{align} -{\color{greenunder}\text{left identity law of }\diamond_{_{M}}:}\quad & \text{pu}_{M}\diamond_{_{M}}f=f\quad,\quad\forall f^{:A\rightarrow M^{B}}\quad,\label{eq:kleisli-left-identity-law}\\ -{\color{greenunder}\text{right identity law of }\diamond_{_{M}}:}\quad & f\diamond_{_{M}}\text{pu}_{M}=f\quad,\quad\forall f^{:A\rightarrow M^{B}}\quad.\label{eq:kleisli-right-identity-law} +{\color{greenunder}\text{left identity law of }\diamond_{_{M}}:}\quad & \text{pu}_{M}\diamond_{_{M}}f=f\quad,\label{eq:kleisli-left-identity-law}\\ +{\color{greenunder}\text{right identity law of }\diamond_{_{M}}:}\quad & f\diamond_{_{M}}\text{pu}_{M}=f\quad.\label{eq:kleisli-right-identity-law} \end{align} \subparagraph{Proof} We may assume that Eqs.~(\ref{eq:monad-left-identity-law-for-flatMap})\textendash (\ref{eq:monad-right-identity-law-for-flatMap}) -hold. Using the definition~(\ref{eq:def-of-kleisli-composition-for-monad-via-flatMap}), -we find: +hold. Using definition~(\ref{eq:def-of-kleisli-composition-for-monad-via-flatMap}), +we directly verify the identity laws: \begin{align*} -{\color{greenunder}\text{left identity law of }\diamond_{_{M}},\text{ should equal }f:}\quad & \text{pu}_{M}\diamond_{_{M}}f=\gunderline{\text{pu}_{M}\bef\text{flm}_{M}}(f)\\ +{\color{greenunder}\text{left identity law of }\diamond_{_{M}}:}\quad & \text{pu}_{M}\diamond_{_{M}}f=\gunderline{\text{pu}_{M}\bef\text{flm}_{M}}(f)\\ {\color{greenunder}\text{use Eq.~(\ref{eq:monad-left-identity-law-for-flatMap})}:}\quad & \quad=f\quad,\\ -{\color{greenunder}\text{right identity law of }\diamond_{_{M}},\text{ should equal }f:}\quad & f\diamond_{_{M}}\text{pu}_{M}=f\bef\gunderline{\text{flm}_{M}(\text{pu}_{M})}\\ +{\color{greenunder}\text{right identity law of }\diamond_{_{M}}:}\quad & f\diamond_{_{M}}\text{pu}_{M}=f\bef\gunderline{\text{flm}_{M}(\text{pu}_{M})}\\ {\color{greenunder}\text{use Eq.~(\ref{eq:monad-right-identity-law-for-flatMap})}:}\quad & \quad=f\bef\text{id}=f\quad. \end{align*} +$\square$ The following statement and the identity law~(\ref{eq:monad-right-identity-law-for-flatMap}) show that \lstinline!flatMap! can be viewed as a \textsf{``}lifting\textsf{''}: \[ -\text{flm}_{M}:(A\rightarrow M^{B})\rightarrow(M^{A}\rightarrow M^{B})\quad, +\text{flm}_{M}:(A\rightarrow M^{B})\rightarrow(M^{A}\rightarrow M^{B})\quad. \] -from Kleisli functions $A\rightarrow M^{B}$ to $M$-lifted functions -$M^{A}\rightarrow M^{B}$, except that Kleisli functions must be composed -using $\diamond_{_{M}}$, while $\text{pu}_{M}$ plays the role of -the Kleisli-identity function. +It transforms Kleisli functions $A\rightarrow M^{B}$ into $M$-lifted +functions $M^{A}\rightarrow M^{B}$. Keep in mind that that Kleisli +functions must be composed using $\diamond_{_{M}}$, and that the +function $\text{pu}_{M}$ plays the role of the identity functions +among the Kleisli functions. \subsubsection{Statement \label{subsec:Statement-flatMap-lifting-composition-law-for-kleisli}\ref{subsec:Statement-flatMap-lifting-composition-law-for-kleisli}} @@ -5168,7 +5180,7 @@ \subsubsection{Statement \label{subsec:Statement-associativity-law-for-kleisli}\ similar to the identity and associativity properties of the function composition $f\bef g$ except for using $\text{pu}_{M}$ instead of the identity function.\footnote{It means that Kleisli functions satisfy the properties of morphisms -of a category; see Section~\ref{subsec:Motivation-for-using-category-theory}.} +in a category; see Section~\ref{subsec:Motivation-for-using-category-theory}.} Since the Kleisli composition describes the chaining of consecutive lines in functor blocks, its associativity means that multiple lines @@ -5180,7 +5192,8 @@ \subsubsection{Statement \label{subsec:Statement-associativity-law-for-kleisli}\ t <- h(z) } yield t \end{lstlisting} -corresponds to this Kleisli composition: +corresponds to this Kleisli composition $f\diamond_{_{_{M}}}g\diamond_{_{_{M}}}h$, +or more verbosely: \[ (x\rightarrow f(x))\diamond_{_{_{M}}}(y\rightarrow g(y))\diamond_{_{_{M}}}(z\rightarrow h(z))\quad. \] @@ -5189,9 +5202,9 @@ \subsubsection{Statement \label{subsec:Statement-associativity-law-for-kleisli}\ or lines 3 and 4 are chained before prepending line 2. We will now prove that the Kleisli composition with its laws is equivalent -to \lstinline!flatMap! with \emph{its} laws. In other words, we may -equally well use the Kleisli composition when formulating the requirements -for a functor $M$ to be a monad. +to \lstinline!flatMap! with \emph{its} laws. One may equally well +use the Kleisli composition instead of \lstinline!flatMap! when formulating +the requirements for a functor $M$ to be a monad. \subsubsection{Statement \label{subsec:Statement-equivalence-kleisli-composition-and-flatMap}\ref{subsec:Statement-equivalence-kleisli-composition-and-flatMap}} @@ -5205,8 +5218,7 @@ \subsubsection{Statement \label{subsec:Statement-equivalence-kleisli-composition \begin{align} {\color{greenunder}\text{left naturality of }\diamond_{_{M}}:}\quad & (f^{:A\rightarrow B}\bef g^{:B\rightarrow M^{C}}\big)\diamond_{_{M}}h^{:C\rightarrow M^{D}}=f\bef\big(g\diamond_{_{M}}h\big)\quad.\label{eq:left-naturality-of-kleisli-composition} \end{align} -Note that this law makes parentheses unnecessary in the expression -$f\bef g\diamond_{_{M}}h$.\index{Kleisli composition!with function composition} +This law makes parentheses unnecessary in the expression $f\bef g\diamond_{_{M}}h$.\index{Kleisli composition!with function composition} \subparagraph{Proof} @@ -5227,7 +5239,7 @@ \subsubsection{Statement \label{subsec:Statement-equivalence-kleisli-composition When $\diamond_{_{M}}$ is defined via $\text{flm}_{M}$, the left naturality law~(\ref{eq:left-naturality-of-kleisli-composition}) -will hold because \textsf{``}$\bef$\textsf{''} is associative, +will hold because \textsf{``}$\bef$\textsf{''} is associative: \begin{align*} & (f\bef g)\diamond_{_{M}}h=(f\bef g)\bef\text{flm}_{M}(h)=f\bef(g\bef\text{flm}_{M}(h))=f\bef(g\diamond_{_{M}}h)\quad. \end{align*} @@ -5239,6 +5251,7 @@ \subsubsection{Statement \label{subsec:Statement-equivalence-kleisli-composition \begin{align*} {\color{greenunder}\text{use Eq.~(\ref{eq:express-kleisli-composition-via-flatMap-and-back})}:}\quad & \text{flm}_{M}^{\prime}(f)=\text{id}^{M^{A}}\diamond_{_{M}}f=\gunderline{\text{id}}\bef\text{flm}_{M}(f)=\text{flm}_{M}(f)\quad. \end{align*} +$\square$ We have already derived the laws of Kleisli composition from the laws of \lstinline!flatMap!. We will now derive the converse statement. @@ -5261,18 +5274,17 @@ \subsubsection{Statement \label{subsec:Statement-equivalence-kleisli-laws-and-fl To derive the associativity law~(\ref{eq:associativity-law-flatMap}) of \lstinline!flatMap!, substitute the definition of $\text{flm}_{M}$ -into both sides: +into both sides of that law: \begin{align*} {\color{greenunder}\text{left-hand side}:}\quad & \text{flm}_{M}(f\bef\text{flm}_{M}(g))=\text{id}\diamond_{_{M}}(\gunderline{f\bef\text{id}}\diamond_{_{M}}g)=\text{id}\diamond_{_{M}}(f\diamond_{_{M}}g)\\ {\color{greenunder}\text{associativity law~(\ref{eq:kleisli-associativity-law})}:}\quad & \quad=(\text{id}\diamond_{_{M}}f)\diamond_{_{M}}g\quad,\\ {\color{greenunder}\text{right-hand side}:}\quad & \text{flm}_{M}(f)\bef\text{flm}_{M}(g)=(\text{id}\diamond_{_{M}}f)\,\gunderline{\bef(\text{id}}\diamond_{_{M}}g)\\ {\color{greenunder}\text{left naturality~(\ref{eq:left-naturality-of-kleisli-composition})}:}\quad & \quad=(\text{id}\diamond_{_{M}}f)\,\gunderline{\bef\text{id}}\diamond_{_{M}}g=(\text{id}\diamond_{_{M}}f)\diamond_{_{M}}g\quad. \end{align*} -Both sides of the law are now equal. +Both sides of the law are now equal. $\square$ The two naturality laws of \lstinline!flatMap! are equivalent to -the three naturality laws of $\diamond_{_{M}}$, but we omit those -derivations. +the three naturality laws of $\diamond_{_{M}}$. We omit those derivations. \subsection{Verifying the monad laws using Kleisli functions} @@ -5284,8 +5296,8 @@ \subsection{Verifying the monad laws using Kleisli functions} type parameters, while the corresponding law~(\ref{eq:associativity-law-of-flatten}) for \lstinline!flatten! has only one. For certain monads, however, a trick called \index{flipped@\textsf{``}flipped Kleisli\textsf{''} technique|textit}\textbf{flipped -Kleisli} makes direct proofs of laws much shorter. That trick applies -to monads of a function type, such as the continuation and the state +Kleisli} makes direct proofs of laws shorter. That trick applies to +monads of a function type, such as the continuation and the state monads. \subsubsection{Statement \label{subsec:Statement-continuation-monad-is-lawful}\ref{subsec:Statement-continuation-monad-is-lawful}} @@ -5304,7 +5316,7 @@ \subsubsection{Statement \label{subsec:Statement-continuation-monad-is-lawful}\r flipped Kleisli technique is to change the types of the Kleisli functions by flipping their two curried arguments. We obtain: \[ -\left(B\rightarrow R\right)\rightarrow A\rightarrow R\quad. +A\rightarrow\text{Cont}^{R,B}\cong\left(B\rightarrow R\right)\rightarrow A\rightarrow R\quad. \] This type looks like a function of the form $K^{B}\rightarrow K^{A}$, where we temporarily defined $K^{A}\triangleq A\rightarrow R$. The @@ -5386,7 +5398,7 @@ \subsubsection{Statement \label{subsec:Statement-state-monad-is-lawful}\ref{subs ordinary composition of those functions. Since we already know that the laws of identity and associativity hold for ordinary functions (Section~\ref{subsec:Laws-of-function-composition}), the proof is -finished. +finished. $\square$ For comparison, look at the type signatures of \lstinline!flatten! for the state and continuation monads: @@ -5395,8 +5407,8 @@ \subsubsection{Statement \label{subsec:Statement-state-monad-is-lawful}\ref{subs \text{ftn}_{\text{Cont}^{R,\bullet}} & :\left(\left(\left(\left(A\rightarrow R\right)\rightarrow R\right)\rightarrow R\right)\rightarrow R\right)\rightarrow\left(A\rightarrow R\right)\rightarrow R\quad. \end{align*} These type signatures are complicated and confusing to read. Direct -proofs of the monad laws for these functions are much longer than -the proofs of Statements~\ref{subsec:Statement-state-monad-is-lawful}\textendash \ref{subsec:Statement-continuation-monad-is-lawful}. +proofs of the monad laws for these functions are more complicated +than the proofs shown in Statements~\ref{subsec:Statement-state-monad-is-lawful}\textendash \ref{subsec:Statement-continuation-monad-is-lawful}. When a monad $M$ has a function type, the Kleisli function $A\rightarrow M^{B}$ has two curried arguments. Flipping or uncurrying those arguments @@ -5417,8 +5429,8 @@ \subsection{Constructions of semi-monads and monads\label{subsec:Structural-anal functor ($\text{Id}^{A}\triangleq A$), and the functor composition ($L^{A}\triangleq F^{G^{A}}$). -A constant functor $F^{A}\triangleq Z$ is a lawful semi-monad because -we can implement: +A constant functor $F^{A}\triangleq Z$ is a lawful semi-monad with +$\text{ftn}_{F}$ defined by: \[ \text{ftn}_{F}=\text{id}^{:Z\rightarrow Z}\quad. \] @@ -5455,8 +5467,8 @@ \subsection{Constructions of semi-monads and monads\label{subsec:Structural-anal \subsubsection{Statement \label{subsec:Statement-not-semimonad-1+r-a}\ref{subsec:Statement-not-semimonad-1+r-a}} The functor $L^{A}\triangleq Z+\left(R\rightarrow A\right)$, where -$R$ and $Z$ are fixed but arbitrary types, cannot have a \lstinline!flatten! -method. +$R$ and $Z$ are fixed but arbitrary types, cannot have a fully parametric +\lstinline!flatten! method. \subparagraph{Proof} @@ -5474,13 +5486,13 @@ \subsubsection{Statement \label{subsec:Statement-not-semimonad-1+r-a}\ref{subsec \paragraph{Products} -The product construction works for semi-monads as well as for monads. +The product construction works for semi-monads and for monads. \subsubsection{Statement \label{subsec:Statement-monad-semimonad-product}\ref{subsec:Statement-monad-semimonad-product}} -Given two semi-monads $F^{A}$ and $G^{A}$, the functor $L^{A}\triangleq F^{A}\times G^{A}$ -is a semi-monad. If both $F^{A}$ and $G^{A}$ are monads then $L^{A}$ -is also a monad. +Given two semi-monads $F$ and $G$, the functor $L^{A}\triangleq F^{A}\times G^{A}$ +is a semi-monad. If both $F$ and $G$ are monads then $L$ is also +a monad. \subparagraph{Proof} @@ -5524,7 +5536,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-semimonad-product}\ref{su \text{ftn}_{F}^{\uparrow F}\bef\text{ftn}_{F}=\text{ftn}_{F}\bef\text{ftn}_{F}\quad. \] In order to use this law, we need to move the two functions $\text{ftn}_{F}$ -next to each other in the expressions +next to each other in the expressions: \[ \big(\text{ftn}_{L}^{\uparrow F}\bef\pi_{1}^{\uparrow F}\bef\text{ftn}_{F}\big)\quad\text{ and }\quad\big(\pi_{1}^{\uparrow F}\bef\text{ftn}_{F}\bef\pi_{1}^{\uparrow F}\bef\text{ftn}_{F}\big)\quad, \] @@ -5542,13 +5554,12 @@ \subsubsection{Statement \label{subsec:Statement-monad-semimonad-product}\ref{su \] So, both sides of the associativity law are equal. -Now we assume that $F$ and $G$ are monads with given \lstinline!pure! +Now we assume that $F$ and $G$ are lawful monads with given \lstinline!pure! methods $\text{pu}_{F}$ and $\text{pu}_{G}$. We define: \[ \text{pu}_{L}\triangleq a^{:A}\rightarrow\text{pu}_{F}(a)\times\text{pu}_{G}(a)=\Delta\bef(\text{pu}_{F}\boxtimes\text{pu}_{G})\quad. \] -Assuming that identity laws hold for $F$ and $G$, we can now verify -the identity laws for $L$: +To verify the identity laws for $L$: \begin{align*} {\color{greenunder}\text{left identity law of }L:}\quad & \text{pu}_{L}\bef\text{ftn}_{L}=\Delta\bef(\text{pu}_{F}\boxtimes\text{pu}_{G})\bef\big((\pi_{1}^{\uparrow F}\bef\text{ftn}_{F})\boxtimes(\pi_{2}^{\uparrow G}\bef\text{ftn}_{G})\big)\\ {\color{greenunder}\text{use Eq.~(\ref{eq:pair-product-composition-law})}:}\quad & \quad=\Delta\bef\big((\gunderline{\text{pu}_{F}\bef\pi_{1}^{\uparrow F}}\bef\text{ftn}_{F})\boxtimes(\gunderline{\text{pu}_{G}\bef\pi_{2}^{\uparrow G}}\bef\text{ftn}_{G})\big)\\ @@ -5562,19 +5573,18 @@ \subsubsection{Statement \label{subsec:Statement-monad-semimonad-product}\ref{su & \quad=\big(\big(\Delta\bef\gunderline{(\text{pu}_{F}\boxtimes\text{pu}_{G})\big)^{\uparrow F}\bef\pi_{1}^{\uparrow F}}\bef\text{ftn}_{F}\big)\boxtimes\big(\big(\Delta\bef\gunderline{(\text{pu}_{F}\boxtimes\text{pu}_{G})\big)^{\uparrow G}\bef\pi_{2}^{\uparrow G}}\bef\text{ftn}_{G}\big)\\ & \quad{\color{greenunder}\quad\text{projection laws~(\ref{eq:pair-product-left-projection-law}) and~(\ref{eq:pair-product-right-projection-law})}:}\quad\\ & \quad=\big((\gunderline{\Delta\bef\pi_{1}}\bef\text{pu}_{F})^{\uparrow F}\bef\text{ftn}_{F}\big)\boxtimes\big(\big(\gunderline{\Delta\bef\pi_{2}}\bef\text{pu}_{G}\big)^{\uparrow G}\bef\text{ftn}_{G}\big)\\ -\quad & \quad\quad\text{identity laws~(\ref{eq:pair-identity-law-left})}:\\ +\quad & \quad{\color{greenunder}\quad\text{identity laws~(\ref{eq:pair-identity-law-left})}:}\quad\\ & \quad=\big(\gunderline{\text{pu}_{F}^{\uparrow F}\bef\text{ftn}_{F}}\big)\boxtimes\big(\gunderline{\text{pu}_{G}^{\uparrow G}\bef\text{ftn}_{G}}\big)=\text{id}\boxtimes\text{id}=\text{id}\quad. \end{align*} Let us build some intuition about how the product of two monads works in practice. A simple example is the product of two identity monads, $L^{A}\triangleq A\times A$. This type constructor is a monad whose -\lstinline!flatten! function is defined by +\lstinline!flatten! function is defined by: \begin{lstlisting} type Pair[A] = (A, A) def flatten[A]: Pair[Pair[A]] => Pair[A] = { case ((a, b), (c, d)) => (a, d) } \end{lstlisting} -A sample calculation shows that \textsf{``}nested iterations\textsf{''} apply functions -element by element: +A \textsf{``}nested iteration\textsf{''} performs an element-by-element function application: \begin{lstlisting} final case class P[A](x: A, y: A) { def map[B](f: A => B): P[B] = P(f(x), f(y)) @@ -5670,8 +5680,8 @@ \subsubsection{Statement \label{subsec:Statement-semimonad-only-product-a-ga}\re The other possibility is to transform $F^{A\times F^{A}}$ to $F^{A}$ within the functor $F$: \begin{lstlisting} -def flatten2_L[A]: ((A, F[A]), F[(A, F[A])]) => (A, F[A]) = { case (afa, fafa) => - (afa._1, fafa.map(_._1)) +def flatten2_L[A]: ((A, F[A]), F[(A, F[A])]) => (A, F[A]) = { + case (afa, fafa) => (afa._1, fafa.map(_._1)) } \end{lstlisting} In the code notation, these alternative implementations may be written @@ -5717,31 +5727,29 @@ \subsubsection{Statement \label{subsec:Statement-semimonad-only-product-a-ga}\re \paragraph{Co-products} As a rule, the co-product of two monads ($F^{A}+G^{A}$) is not a -monad. For simple examples, see Exercise~\ref{subsec:Exercise-1-monads-6} -for $\bbnum 1+F^{A}$ (where $F^{A}\triangleq A\times A$) and Exercise~\ref{subsec:Exercise-1-monads-4} -for $M^{A}+M^{A}$ with an arbitrary monad $M$. An exception to that -rule is a co-product with the \emph{identity} monad: +monad. Examples are $\bbnum 1+A\times A$ (Exercise~\ref{subsec:Exercise-1-monads-6}) +and $M^{A}+M^{A}$ with an arbitrary monad $M$ (Exercise~\ref{subsec:Exercise-1-monads-4}). +An exception to that rule is a co-product with the \emph{identity} +monad: \subsubsection{Statement \label{subsec:Statement-co-product-with-identity-monad}\ref{subsec:Statement-co-product-with-identity-monad}} -If $F$ is any monad, the functor $L^{A}\triangleq A+F^{A}$ is a -monad. (The functor $L$ is called the \textbf{free pointed}\index{free pointed monad}\index{monads!free pointed}\index{free pointed functor} +If $F$ is a monad, the functor $L^{A}\triangleq A+F^{A}$ is also +a monad. (The functor $L$ is called the \textbf{free pointed}\index{free pointed monad}\index{monads!free pointed}\index{free pointed functor} \textbf{functor on} $F$, for reasons explained in Chapter~\ref{chap:Free-type-constructions}.) \subparagraph{Proof} We need to define the monad methods for $L$, for which we may use the \lstinline!pure! and \lstinline!flatten! methods of $F$. Begin -with the \lstinline!flatten! method, which needs to have the type -signature: +with the \lstinline!flatten! method of $F$: \[ -\text{ftn}_{L}:L^{L^{A}}\rightarrow L^{A}=A+F^{A}+F^{A+F^{A}}\rightarrow A+F^{A}\quad. +\text{ftn}_{L}:L^{L^{A}}\rightarrow L^{A}\quad,\quad\quad\text{or equivalently}:\quad\text{ftn}_{L}:A+F^{A}+F^{A+F^{A}}\rightarrow A+F^{A}\quad. \] -Since we know nothing about the specific monad $F$, we cannot extract +We know nothing about the specific monad $F$, so we cannot extract a value of type $A$ out of $F^{A}$. However, we can use $F$\textsf{'}s \lstinline!pure! -method to create a value of type $F^{A}$ out of $A$. This allows -us to convert $A+F^{A}$ into $F^{A}$ using the function we will -denote $\gamma$: +method for implementing a function we will denote by $\gamma$ that +converts $A+F^{A}$ into $F^{A}$: \begin{lstlisting} type L[A] = Either[A, F[A]] def gamma[A]: L[A] => F[A] = { @@ -5750,21 +5758,20 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-identity-monad} } \end{lstlisting} \[ -\gamma^{A}\triangleq\,\begin{array}{|c||c|} +\gamma^{A}:L^{A}\rightarrow F^{A}\quad,\quad\quad\gamma^{A}\triangleq\,\begin{array}{|c||c|} & F^{A}\\ \hline A & \text{pu}_{F}\\ F^{A} & \text{id} \end{array}\quad. \] Lifting this function to $F$, we can convert $F^{A+F^{A}}$ into -$F^{F^{A}}$ and finally into $F^{A}$ via $F$\textsf{'}s \lstinline!flatten! -method: +$F^{F^{A}}$ and finally into $F^{A}$: \begin{lstlisting} def flatten_L[A]: L[L[A]] => L[A] = { case Left(Left(a)) => Left(a) case Left(Right(fa)) => Right(fa) case Right(g) => Right(g.map(gamma).flatten) -} // The last line equals `Right(g.flatMap(gamma))`. +} // The last expression is equal to `Right(g.flatMap(gamma))`. \end{lstlisting} \[ \text{ftn}_{L}\triangleq\,\begin{array}{|c||cc|} @@ -5775,13 +5782,13 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-identity-monad} \end{array}\quad. \] -Is there another implementation for $\text{ftn}_{L}$? We could have +Is there another implementation of $\text{ftn}_{L}$? We could have replaced $A$ by $F^{A}$ using $\text{pu}_{F}$. However, that code would never return a result of type $A+\bbnum 0$, which makes it -impossible to satisfy identity laws such as $\text{pu}_{F}\bef\text{ftn}_{F}=\text{id}$. +impossible to satisfy the identity law $\text{pu}_{F}\bef\text{ftn}_{F}=\text{id}$. The \lstinline!pure! method for $L$ could be defined in two ways: -$\text{pu}_{L}\triangleq a^{:A}\rightarrow a+\bbnum 0$ or $\text{pu}_{L}\triangleq a\rightarrow\bbnum 0+\text{pu}_{F}(a)$. +$\text{pu}_{L}\triangleq a^{:A}\rightarrow a+\bbnum 0$ or $\text{pu}_{L}\triangleq a^{:A}\rightarrow\bbnum 0+\text{pu}_{F}(a)$. It turns out that only the first definition satisfies the monad $L$\textsf{'}s identity laws (Exercise~\ref{subsec:Exercise-1-monads-12}). @@ -5948,7 +5955,7 @@ \subsubsection{Statement \label{subsec:Statement-co-product-with-identity-monad} \begin{equation} \text{ftn}_{L}\bef\gamma\overset{?}{=}\gunderline{\gamma\bef\gamma^{\uparrow F}}\bef\text{ftn}_{F}=\gamma^{\uparrow L}\bef\gamma\bef\text{ftn}_{F}\quad,\label{eq:monad-construction-a+f-a-derivation1} \end{equation} -where in the last step we used the naturality law of $\gamma^{:L^{^{A}}\rightarrow F^{A}}$, +where in the last step we used the naturality law of $\gamma^{:L^{A}\rightarrow F^{A}}$, which is a natural transformation: \[ \gamma\bef f^{\uparrow F}=f^{\uparrow L}\bef\gamma\quad,\quad\text{for all }f^{:A\rightarrow B}\quad. @@ -6038,7 +6045,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-construction-1}\ref{subse and define $\tilde{\diamond}_{_{L}}$ and $\tilde{\text{pu}}_{L}$ by: \[ -z\triangleright(f\tilde{\diamond}_{_{L}}g)\triangleq(z\triangleright f)\diamond_{_{F}}(z\triangleright g)\quad,\quad\quad z\triangleright\tilde{\text{pu}}_{L}\triangleq\text{pu}_{F}\quad. +z\triangleright(f\,\tilde{\diamond}_{_{L}}g)\triangleq(z\triangleright f)\diamond_{_{F}}(z\triangleright g)\quad,\quad\quad z\triangleright\tilde{\text{pu}}_{L}\triangleq\text{pu}_{F}\quad. \] For the left-hand side of the associativity law, we write: \[ @@ -6062,7 +6069,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-construction-1}\ref{subse and $\tilde{\text{pu}}_{L}$ to the code of $L$\textsf{'}s \lstinline!flatMap! and \lstinline!pure!: \begin{lstlisting} -type L[A] = Z => F[A] // The type Z and a semi-monad F must be already defined. +type L[A] = Z => F[A] // A type Z and a semi-monad F must be already defined. def flatMap_L[A, B](la: L[A])(f: A => L[B]): L[B] = { z => la(z).flatMap(a => f(a)(z)) } def pure_L[A](a: A): L[A] = { _ => implicitly[Monad[F]].pure(a) } \end{lstlisting} @@ -6070,7 +6077,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-construction-1}\ref{subse \subsubsection{Statement \label{subsec:Statement-monad-construction-2}\ref{subsec:Statement-monad-construction-2}} -For any contrafunctor $H^{A}$, the functor $L^{A}\triangleq H^{A}\rightarrow A$ +For any contrafunctor $H$, the functor $L^{A}\triangleq H^{A}\rightarrow A$ is a monad. \subparagraph{Proof} @@ -6082,7 +6089,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-construction-2}\ref{subse and a $g^{:H^{C}\rightarrow B\rightarrow C}$ must have type $H^{C}\rightarrow A\rightarrow C$. To infer this function\textsf{'}s code, begin with a typed hole: \[ -f^{:H^{B}\rightarrow A\rightarrow B}\tilde{\diamond}_{_{L}}g^{:H^{C}\rightarrow B\rightarrow C}=k^{:H^{C}}\rightarrow\text{???}^{:A\rightarrow C}\quad. +f^{:H^{B}\rightarrow A\rightarrow B}\,\tilde{\diamond}_{_{L}}g^{:H^{C}\rightarrow B\rightarrow C}=k^{:H^{C}}\rightarrow\text{???}^{:A\rightarrow C}\quad. \] Looking at the available data, we notice that a value of type $A\rightarrow C$ will be found if we apply $f$ and $g$ to some arguments and then @@ -6104,7 +6111,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-construction-2}\ref{subse Putting the entire code together and substituting an arbitrary value $k^{:H^{C}}$, we get: \begin{equation} -k^{:H^{C}}\triangleright\big(f^{:H^{B}\rightarrow A\rightarrow B}\tilde{\diamond}_{_{L}}g^{:H^{C}\rightarrow B\rightarrow C}\big)\triangleq f\big(k\triangleright(g(k))^{\downarrow H}\big)\bef g(k)\quad.\label{eq:def-of-Kleisli-for-exp-construction-h-a-a} +k^{:H^{C}}\triangleright\big(f^{:H^{B}\rightarrow A\rightarrow B}\,\tilde{\diamond}_{_{L}}g^{:H^{C}\rightarrow B\rightarrow C}\big)\triangleq f\big(k\triangleright(g(k))^{\downarrow H}\big)\bef g(k)\quad.\label{eq:def-of-Kleisli-for-exp-construction-h-a-a} \end{equation} The flipped \lstinline!pure! method ($\tilde{\text{pu}}_{L}$) is @@ -6178,9 +6185,9 @@ \subsubsection{Statement \label{subsec:Statement-examples-of-filterable-contrafu In each case, we need to define the function $\text{lift}_{G,H}$ and verify its laws. If $G$ is only a semi-monad, we will derive -the composition law of $\text{lift}_{G,H}$ assuming only the associativity +the composition law of $\text{lift}_{G,H}$ from the associativity law of $G$ (but not assuming any identity laws). For a full monad -$G$, we will derive the identity law of $\text{lift}_{G,H}$ by using +$G$, we will derive the identity law of $\text{lift}_{G,H}$ from the identity laws of $G$. \textbf{(a)} Since $H^{A}\triangleq Z$ is a constant contrafunctor, @@ -6679,10 +6686,12 @@ \subsubsection{Exercise \label{subsec:Exercise-1-monads-6}\ref{subsec:Exercise-1 \subsubsection{Exercise \label{subsec:Exercise-1-monads-7-not-a-monad}\ref{subsec:Exercise-1-monads-7-not-a-monad}} Consider the functor $D^{A}\triangleq\bbnum 1+A\times A$ (in Scala, -\lstinline!type D[A] = Option[(A, A)]!). Implement the \lstinline!flatten! -and \lstinline!pure! methods for $D$ in at least two different ways. -Show that some of the monad laws fail to hold for those implementations.\footnote{One can prove that $D^{A}\triangleq\bbnum 1+A\times A$ cannot be -a lawful monad. For details, see \texttt{\href{https://stackoverflow.com/questions/49742377/}{https://stackoverflow.com/questions/49742377/}}} +this is defined by \lstinline!type D[A] = Option[(A, A)]!). Implement +the \lstinline!flatten! and \lstinline!pure! methods for $D$ in +at least two different ways. Show that some of the monad laws fail +to hold for every implementation.\footnote{There are several implementations of \lstinline!pure! and \lstinline!flatten! +for $D^{A}\triangleq\bbnum 1+A\times A$, but \emph{none} of them +obey the monad laws. For details, see \texttt{\href{https://stackoverflow.com/questions/49742377/}{https://stackoverflow.com/questions/49742377/}}} \subsubsection{Exercise \label{subsec:Exercise-1-monads-8}\ref{subsec:Exercise-1-monads-8}} @@ -6725,8 +6734,8 @@ \subsubsection{Exercise \label{subsec:Exercise-1-monads-9-1-1}\ref{subsec:Exerci \subsubsection{Exercise \label{subsec:Exercise-monad-of-monoid-is-monoid}\ref{subsec:Exercise-monad-of-monoid-is-monoid}} -Show that \lstinline!M[W]! is a monoid if \lstinline!M[_]! is a -monad and \lstinline!W! is a monoid. +Show that \lstinline!M[W]! is a monoid if \lstinline!M! is a monad +and \lstinline!W! is a monoid. \subsubsection{Exercise \label{subsec:Exercise-1-monads-9-1}\ref{subsec:Exercise-1-monads-9-1}} @@ -6799,14 +6808,14 @@ \subsubsection{Exercise \label{subsec:Exercise-1-monads-16}\ref{subsec:Exercise- \textbf{(b)} A polynomial functor $F^{A}\triangleq p(A)$ when $p(x)$ is a polynomial of the form $p(x)=x^{n_{1}}+x^{n_{2}}+...+x^{n_{k}}$ -with some positive integers $n_{1}$, ..., $n_{k}$. For example, +with some distinct positive integers $n_{1}<... A) final case class State[S, A](run: S => (A, S)) final case class Cont[R, A](run: (A => R) => R) \end{lstlisting} -In Scala syntax, this makes monadic programs appear to have a method -called \lstinline!run!: +In Scala syntax, this makes monadic programs appear to have a \lstinline!run! +method: \begin{lstlisting} val s: State[S, A] = for { ... } yield { ... } // A monadic program in the State monad. val init: S = ??? // An initial state. @@ -7279,10 +7286,10 @@ \subsection{Monads, effects, and runners} only use runners $\theta^{A}$ that work for a specific type $A$, for example, for $A=$ \lstinline!Int!: \begin{lstlisting} -def runner: Option[Int] => Int = _.getOrElse(0) // For empty Option values, return a default. +def runner: Option[Int] => Int = _.getOrElse(0) // For empty Option values, return some default Int value. \end{lstlisting} Even if we restrict all types to \lstinline!Int!, this runner will -fail to obey the composition law: +fail the composition law: \begin{lstlisting} val m1: Option[Int] = None val m2: Int => Option[Int] = { x => Some(x + 1) } @@ -7323,15 +7330,16 @@ \subsection{Monads, effects, and runners} \subsection{Monads in category theory. Monad morphisms\label{subsec:Monads-in-category-theory-monad-morphisms}} -For any monad $M$, one defines a category, called the $M$-\index{Kleisli!category}Kleisli -category where objects are all types (\lstinline!Int!, \lstinline!String!, +For any monad $M$, one defines a category, which we call the $M$-\index{Kleisli!category}Kleisli +category, where objects are all types (\lstinline!Int!, \lstinline!String!, etc.) and morphisms between types $A$ and $B$ are Kleisli functions of type $A\rightarrow M^{B}$. -One axiom of a category requires us to have an identity morphism $A\rightarrow M^{A}$ -for every object $A$; this is the monad $M$\textsf{'}s \lstinline!pure! -method, $\text{pu}_{M}:A\rightarrow M^{A}$. Another axiom is the -associativity of morphism composition operation, which must combine +One axiom of a category requires us to have an identity morphism for +every object $A$. For the $M$-Kleisli category, this is the monad +$M$\textsf{'}s \lstinline!pure! method, $\text{pu}_{M}:A\rightarrow M^{A}$. +Another axiom is the associativity of morphism composition operation. +For the $M$-Kleisli category, the composition operation must combine functions of types $A\rightarrow M^{B}$ and $B\rightarrow M^{C}$ into a function of type $A\rightarrow M^{C}$. The Kleisli composition $\diamond_{_{M}}$ is precisely that operation, and its associativity @@ -7339,7 +7347,7 @@ \subsection{Monads in category theory. Monad morphisms\label{subsec:Monads-in-ca is equivalent to a law of \lstinline!flatMap! (Statements~\ref{subsec:Statement-associativity-law-for-kleisli}\textendash \ref{subsec:Statement-equivalence-kleisli-laws-and-flatMap-laws}). So, a functor $M$ is a monad if and only if the corresponding $M$-Kleisli -category is lawful. This is an concise way of formulating the monad +category is lawful. This is a concise way of formulating the monad laws. We have seen that, for some monads, proofs of the laws are easier @@ -7399,8 +7407,8 @@ \subsubsection{Definition \label{subsec:Definition-monad-morphism}\ref{subsec:De \text{flm}_{M}(f^{:A\rightarrow M^{B}})\bef\phi^{:M^{B}\rightarrow N^{B}}=\phi\bef\text{flm}_{N}(f\bef\phi)\quad.\label{eq:monad-morphism-composition-law-using-flatMap} \end{equation} -In terms of the Kleisli composition operations $\diamond_{_{M}}$ -and $\diamond_{_{N}}$, the composition law is: +In terms of the operations $\diamond_{_{M}}$ and $\diamond_{_{N}}$, +the composition law is: \[ (f^{:A\rightarrow M^{B}}\bef\phi^{:M^{B}\rightarrow N^{B}})\diamond_{_{N}}(g^{:B\rightarrow M^{C}}\bef\phi^{:M^{C}\rightarrow N^{C}})=(f\diamond_{_{M}}g)\bef\phi^{:M^{C}\rightarrow N^{C}}\quad. \] @@ -7432,7 +7440,7 @@ \subsubsection{Example \label{subsec:Example-monad-morphism-either-option}\ref{s Show that the function $\phi^{A}:Z+A\rightarrow\bbnum 1+A$ defined below is a monad morphism between the \lstinline!Either! and \lstinline!Option! -monads. The implementation of $\phi$ is: +monads. The code of $\phi$ is: \begin{lstlisting} def toOption[Z, A]: Either[Z, A] => Option[A] = { case Left(z) => None @@ -7619,7 +7627,7 @@ \subsubsection{Statement \label{subsec:Statement-flatMap-formulation-of-monad-mo \textbf{(b)} Substitute the definitions $\text{flm}_{M}(f)=f^{\uparrow M}\bef\text{ftn}_{M}$ and $\text{flm}_{N}(f)=f^{\uparrow N}\bef\text{ftn}_{N}$ into Eq.~(\ref{eq:monad-morphism-composition-law-using-flatMap}): \begin{align*} - & f^{\uparrow M}\bef\text{ftn}_{M}\bef\phi=\gunderline{\phi\bef(f\bef\phi)^{\uparrow N}}\bef\text{ftn}_{N}\quad.\\ + & f^{\uparrow M}\bef\text{ftn}_{M}\bef\phi=\gunderline{\phi\bef(f\bef\phi)^{\uparrow N}}\bef\text{ftn}_{N}\\ {\color{greenunder}\text{use Eq.~(\ref{eq:monad-morphism-naturality-law})}:}\quad & =\gunderline{(f\bef\phi)^{\uparrow M}}\bef\phi\bef\text{ftn}_{N}=f^{\uparrow M}\bef\phi^{\uparrow M}\bef\phi\bef\text{ftn}_{M}\quad. \end{align*} This equality holds for any $f$, in particular with $f=\text{id}$, @@ -7652,7 +7660,7 @@ \subsubsection{Statement \label{subsec:Statement-pure-M-is-monad-morphism}\ref{s For any monad $M$, the method $\text{pu}_{M}:A\rightarrow M^{A}$ is a monad morphism $\text{pu}_{M}:\text{Id}\leadsto M$ between the -identity monad and $M$. +identity monad ($\text{Id}$) and $M$. \subparagraph{Proof} @@ -7679,7 +7687,7 @@ \subsubsection{Exercise \label{subsec:Exercise-fmap-is-not-monadic-morphism}\ref f:\left(Z\rightarrow A\right)\rightarrow M^{A}\quad,\quad\quad f\,(q^{:Z\rightarrow A})\triangleq m_{0}\triangleright q^{\uparrow M}\quad. \] Prove that $f$ is \emph{not} a monad morphism from the \lstinline!Reader! -monad $R^{A}\triangleq Z\rightarrow A$ to the monad $M^{A}$, despite +monad $R^{A}\triangleq Z\rightarrow A$ to the monad $M$, despite having the correct type signature. \textbf{(b)} Under the same assumptions, consider the function $\phi$ @@ -7688,15 +7696,37 @@ \subsubsection{Exercise \label{subsec:Exercise-fmap-is-not-monadic-morphism}\ref \phi:(Z\rightarrow M^{A})\rightarrow M^{A}\quad,\quad\quad\phi\,(q^{:Z\rightarrow M^{A}})\triangleq m_{0}\triangleright\text{flm}_{M}(q)\quad. \] Show that $\phi$ is \emph{not} a monad morphism from the monad $Q^{A}\triangleq Z\rightarrow M^{A}$ -to $M^{A}$. +to $M$. \subsubsection{Exercise \label{subsec:Exercise-fmap-is-not-monadic-morphism-1}\ref{subsec:Exercise-fmap-is-not-monadic-morphism-1}} Show that \lstinline!List!\textsf{'}s \lstinline!headOption! method viewed -as a function of type $\forall A.\:\text{List}^{A}\rightarrow\bbnum 1+A$ +as a function of type $\forall A.\,\text{List}^{A}\rightarrow\bbnum 1+A$ is a natural transformation but \emph{not} a monad morphism between the monads \lstinline!List! and \lstinline!Option!. +\subsubsection{Exercise \label{subsec:Exercise-reasoning-1-4-1}\ref{subsec:Exercise-reasoning-1-4-1}} + +Assume given functors $F$, $G$, $K$, $L$ and a natural transformation +$\phi:F^{A}\rightarrow G^{A}$. + +\textbf{(a)} Prove that $\phi^{\uparrow K}:K^{F^{A}}\rightarrow K^{G^{A}}$ +is also a natural transformation. + +\textbf{(b)} Given another natural transformation $\psi:K^{A}\rightarrow L^{A}$, +prove that the pair product of $\phi$ and $\psi$, that is, $\phi\boxtimes\psi:F^{A}\times K^{A}\rightarrow G^{A}\times L^{A}$, +as well as the pair co-product $\phi\boxplus\psi:F^{A}+K^{A}\rightarrow G^{A}+L^{A}$, +are also natural transformations. The \textbf{pair co-product}\index{pair co-product of functions|textit} +of two functions $\phi$ and $\psi$ is defined by: +\[ +(\phi\boxplus\psi):F^{A}+K^{A}\rightarrow G^{A}+L^{A}\quad,\quad\quad\phi\boxplus\psi\triangleq\,\begin{array}{|c||cc|} + & G^{A} & L^{A}\\ +\hline F^{A} & \phi & \bbnum 0\\ +K^{A} & \bbnum 0 & \psi +\end{array}\quad. +\] + + \subsection{Constructions of polynomial monads\label{subsec:Constructions-of-polynomial-monads}} \textsf{``}Polynomial monads\textsf{''} are polynomial functors that have lawful monad @@ -7705,22 +7735,21 @@ \subsection{Constructions of polynomial monads\label{subsec:Constructions-of-pol \begin{enumerate} \item Start with $F^{A}\triangleq Z+W\times A$, which is a monad (semi-monad) when $W$ is a monoid (semigroup). -\item Given a polynomial monad $F^{A}$, create the monad $L^{A}\triangleq A+F^{A}$. -\item Given two polynomial monads $F^{A}$ and $G^{A}$, create the monad -$L^{A}\triangleq F^{A}\times G^{A}$. -\item Given a polynomial monad $F^{A}$, create the monad $L^{A}\triangleq F^{Z+W\times A}$ +\item Given a polynomial monad $F$, create the monad $L^{A}\triangleq A+F^{A}$. +\item Given two polynomial monads $F$ and $G$, create the monad $L^{A}\triangleq F^{A}\times G^{A}$. +\item Given a polynomial monad $F$, create the monad $L^{A}\triangleq F^{Z+W\times A}$ (see Section~\ref{sec:transformers-linear-monads}). \end{enumerate} -It is an open question (see Problem~\ref{par:Problem-monads}) that +It is an open question (see Problem~\ref{par:Problem-monads}) whether these are the only constructions available for polynomial monads. -If the conjecture is true, we can create an algorithm that recognizes -whether a given polynomial functor can be made into a monad by suitable -definitions of \lstinline!flatten! and \lstinline!pure!. +If that is true, one could create an algorithm that recognizes whether +a given polynomial functor can be made into a monad by suitable definitions +of \lstinline!flatten! and \lstinline!pure!. As an example, consider the fact that the polynomial functor $F^{A}\triangleq\bbnum 1+A\times A$ cannot be made into a monad (Exercise~\ref{subsec:Exercise-1-monads-6}). -One can also show that $F^{A}$ cannot be obtained through the monad -constructions listed above. Indeed, the corresponding polynomial $f(x)=1+x^{2}$ +One can also show that $F$ cannot be obtained through the monad constructions +listed above. Indeed, the corresponding polynomial $f(x)=1+x^{2}$ does not contain any first powers of $x$. However, all constructions either start with a polynomial containing $x$, or add $x$, or take a product of two such polynomials. None of these operations could @@ -7785,8 +7814,8 @@ \subsection{Constructions of $M$-filterable functors and contrafunctors\label{su As in the case of ordinary filterable functors, it turns out that we must at the same time analyze $M$-filterable contrafunctors. -In the following constructions, we always assume that $M$ is a fixed, -lawful monad. +In the following constructions, we assume that $M$ is a fixed, lawful +monad. We omit the proofs of all following statements because they are fully analogous to the proofs of filterable functor and contrafunctor constructions @@ -7803,7 +7832,7 @@ \subsection{Constructions of $M$-filterable functors and contrafunctors\label{su The same statement shows that $F^{A}\triangleq M^{A}\rightarrow Z$ and $F^{A}\triangleq A\rightarrow M^{Z}$ are $M$-filterable contrafunctors. -The monad $M^{A}$ itself is $M$-filterable; $\text{lift}_{M,M}(f)\triangleq\text{flm}_{M}(f)$. +The monad $M$ itself is $M$-filterable; $\text{lift}_{M,M}(f)\triangleq\text{flm}_{M}(f)$. The identity functor is not $M$-filterable except when $M$ is the identity monad, $M^{A}=\text{Id}^{A}\triangleq A$. (However, with @@ -7818,17 +7847,17 @@ \subsection{Constructions of $M$-filterable functors and contrafunctors\label{su \paragraph{Products} -If $F^{A}$ and $G^{A}$ are $M$-filterable then $L^{A}\triangleq F^{A}\times G^{A}$ +If $F$ and $G$ are $M$-filterable then $L^{A}\triangleq F^{A}\times G^{A}$ is $M$-filterable (Statement~\ref{subsec:Statement-filterable-functor-product}). \paragraph{Co-products} -If $F^{A}$ and $G^{A}$ are $M$-filterable then $L^{A}\triangleq F^{A}+G^{A}$ +If $F$ and $G$ are $M$-filterable then $L^{A}\triangleq F^{A}+G^{A}$ is $M$-filterable (Statement~\ref{subsec:Statement-filterable-coproduct}). \paragraph{Function types} -If $F^{A}$ is an $M$-filterable functor and $G^{A}$ is an $M$-filterable +If $F$ is an $M$-filterable functor and $G$ is an $M$-filterable contrafunctor then $F^{A}\rightarrow G^{A}$ and $G^{A}\rightarrow F^{A}$ are $M$-filterable (contra)functors (Statements~\ref{subsec:Statement-filterable-function-type} and~\ref{subsec:Statement-function-type-exponential-filterable-contrafunctor}). @@ -7840,8 +7869,8 @@ \subsection{Constructions of $M$-filterable functors and contrafunctors\label{su \paragraph{Recursive types} If $S^{A,R}$ is a bifunctor that is $M$-filterable with respect -to $A$, the recursive functor $F^{A}$ defined by the type equation -$F^{A}\triangleq S^{A,F^{A}}$ is $M$-filterable (Statement~\ref{subsec:Statement-filterable-recursive-type-1}). +to $A$, the recursive functor $F$ defined by the type equation $F^{A}\triangleq S^{A,F^{A}}$ +is $M$-filterable (Statement~\ref{subsec:Statement-filterable-recursive-type-1}). If $S^{A,R}$ is a profunctor\index{profunctor} contravariant in $A$ and covariant in $R$, and additionally $S^{\bullet,R}$ is $M$-filterable @@ -8995,19 +9024,19 @@ \subsection{Constructions of $M$-filterable functors and contrafunctors\label{su if ting laws as we will see so what are the properties of this closely operation so let\textsf{'}s reformulate the laws of flat map in terms of the class the operation a class decomposition a diamond so the formulation -becomes a very elegant set of laws so that left and right identity -laws are like this so pure composed with F is if F composed with pure -is f now here F must be one of these functions in now it\textsf{'}s obvious -why they're called left and right identity loss pure is identity and -this is exactly like a binary operation in a mono ed which has left -identity right identity associative eighty law is written like this -which is very concise and it follows directly from flm law because -they're phalam law all you need to do is you write the FLN law which -is this the that equals that and you prepend it with some function -f arbitrary function f and then you rewrite this by definition F followed -by flatmap is the Dimond operation so that becomes directly the left-hand -side from here and the right-hand side from here now in written in -this way the laws are very suggestive of a monrad so these laws express +becomes an elegant set of laws so that left and right identity laws +are like this so pure composed with F is if F composed with pure is +f now here F must be one of these functions in now it\textsf{'}s obvious why +they're called left and right identity loss pure is identity and this +is exactly like a binary operation in a mono ed which has left identity +right identity associative eighty law is written like this which is +very concise and it follows directly from flm law because they're +phalam law all you need to do is you write the FLN law which is this +the that equals that and you prepend it with some function f arbitrary +function f and then you rewrite this by definition F followed by flatmap +is the Dimond operation so that becomes directly the left-hand side +from here and the right-hand side from here now in written in this +way the laws are very suggestive of a monrad so these laws express amyloid of functions where the binary operation is the diamond composition or the classic composition the functions must be all Class C functions so they must all have the as twisted type a to s be for some a and diff --git a/sofp-src/tex/sofp-preface.tex b/sofp-src/tex/sofp-preface.tex index 69cdbb36e..6b9e5ee06 100644 --- a/sofp-src/tex/sofp-preface.tex +++ b/sofp-src/tex/sofp-preface.tex @@ -12,8 +12,8 @@ Readers will need to learn some difficult concepts through prolonged mental concentration and effort. \end{comment} -The book assumes a certain amount of mathematical experience, at about -the level of undergraduate algebra or calculus, as well as some experience +The book assumes a certain amount of mathematical experience (at about +the level of undergraduate algebra or calculus) as well as some experience writing code in general-purpose programming languages. The vision of this book is to explain the mathematical theory that @@ -29,13 +29,11 @@ on page~\pageref{chap:Appendix-Notations}) and terminology (Appendix~\ref{chap:Appendix-Glossary-of-terms} on page~\pageref{chap:Appendix-Glossary-of-terms}). The presentation is self-contained, defining and explaining all required techniques, -notations, and Scala features. Although the code examples are in Scala, -the material in this book also applies to many other functional programming -languages. - -All concepts and techniques are illustrated by examples and explained -as simply as possible (\textsf{``}but not simpler\textsf{''}, as Einstein said). Exercises -should be attempted after absorbing the preceding material. +notations, and Scala features. All code examples have been tested +to work but are intended only for explanation and illustration. As +a rule, the code is not optimized for performance. Although the code +examples are in Scala, the material in this book also applies to many +other functional programming languages. A software engineer needs to learn only those few fragments of mathematical theory that answer questions arising in the programming practice. @@ -61,22 +59,15 @@ The first part of the book introduces functional programming. Readers already familiar with functional programming could skim the glossary (Appendix~\ref{chap:Appendix-Glossary-of-terms} on page~\pageref{chap:Appendix-Glossary-of-terms}) -for unfamiliar terminology and then start reading Chapter~\ref{chap:5-Curry-Howard}. - -Chapters~\ref{chap:5-Curry-Howard}\textendash \ref{chap:Functors,-contrafunctors,-and} -begin using the code notation, such as Eq.~(\ref{eq:f-functor-exponential-def-of-fmap}). -If that notation still appears hard to follow after going through -Chapters~\ref{chap:5-Curry-Howard}\textendash \ref{chap:Functors,-contrafunctors,-and}, -readers will benefit from working through Chapter~\ref{chap:Reasoning-about-code}, -which summarizes the code notation more systematically and clarifies -it with additional examples. +for unfamiliar terminology and then start reading Chapter~\ref{chap:5-Curry-Howard}. -All code examples have been tested to work but are intended only for -explanation and illustration. As a rule, the code is not optimized -for performance. - -The author thanks Joseph Kim and Jim Kleck for doing some of the exercises -and reporting some errors in earlier versions of this book. The author +Participation in the meetup \textsf{``}San Francisco Types, Theorems, and +Programming Languages\textsf{''}\footnote{\texttt{\href{https://www.meetup.com/sf-types-theorems-and-programming-languages/}{https://www.meetup.com/sf-types-theorems-and-programming-languages/}}} +initially motivated the author to begin working on this book. Thanks +are due to Adrian King, Hew Wolff, Peter Vanderbilt, and Young-il +Choo for inspiration and support in that meetup. The author appreciates +the work of Joseph Kim and Jim Kleck who did many of the exercises +and reported some errors in earlier versions of this book. The author also thanks Bill Venners for many helpful comments on the draft, and Harald Gliebe, Andreas R\"ohler, and Philip Schwarz for contributing corrections to the text via \texttt{github}. The author is grateful @@ -109,10 +100,10 @@ res0: Int = 3628800 \end{lstlisting} \item In the introductory chapters, type expressions and code examples are -written in the syntax of Scala. Starting from Chapters~\ref{chap:Higher-order-functions}\textendash \ref{chap:5-Curry-Howard}, -the book introduces a mathematical notation for types: for example, -the Scala type expression \lstinline!((A, B)) => Option[A]! is written -as $A\times B\rightarrow\bbnum 1+A$. Chapters~\ref{chap:Higher-order-functions}\textendash \ref{chap:Reasoning-about-code} +written in the Scala syntax. In Chapters~\ref{chap:Higher-order-functions}\textendash \ref{chap:5-Curry-Howard}, +the book introduces a mathematical notation for types: e.g., the Scala +type expression \lstinline!((A, B)) => Option[A]! is written as $A\times B\rightarrow\bbnum 1+A$. +Chapters~\ref{chap:Higher-order-functions}\textendash \ref{chap:Reasoning-about-code} also develop a more concise notation for code. For example, the functor composition law (in Scala: \lstinline!_.map(f).map(g) == _.map(f andThen g)!) is written in the code notation as: @@ -124,7 +115,11 @@ denotes the function $f$ lifted to the functor $L$ and replaces Scala\textsf{'}s syntax \lstinline!x.map(f)! where \lstinline!x! is of type \lstinline!L[A]!. The symbol $\bef$ denotes the forward composition -of functions (Scala\textsf{'}s method \lstinline!andThen!). Appendix~\ref{chap:Appendix-Notations} +of functions (Scala\textsf{'}s method \lstinline!andThen!). If the notation +still appears hard to follow after going through Chapters~\ref{chap:5-Curry-Howard}\textendash \ref{chap:Functors,-contrafunctors,-and}, +readers will benefit from working through Chapter~\ref{chap:Reasoning-about-code}, +which summarizes the code notation more systematically and clarifies +it with additional examples. Appendix~\ref{chap:Appendix-Notations} on page~\pageref{chap:Appendix-Notations} summarizes this book\textsf{'}s notation for types and code. \item Frequently used methods of standard typeclasses, such as Scala\textsf{'}s \lstinline!flatten!, diff --git a/sofp-src/tex/sofp-reasoning.tex b/sofp-src/tex/sofp-reasoning.tex index 48eb763f2..418dfe4f6 100644 --- a/sofp-src/tex/sofp-reasoning.tex +++ b/sofp-src/tex/sofp-reasoning.tex @@ -90,6 +90,8 @@ \subsection{The nine constructions of fully parametric code} Scala example: \begin{lstlisting} def f[A](x: A): Int = 123 + // Or equivalently: +def f[A]: A => Int = { _ => 123 } \end{lstlisting} Code notation: \[ @@ -118,7 +120,7 @@ \subsection{The nine constructions of fully parametric code} that may use \lstinline!x! as a free variable\index{free variable} (i.e., a variable that should be defined outside that expression). E.g., the expression \lstinline!123 + x + x! uses \lstinline!x! -as a free variable because \lstinline!123 + x + x! only makes sense +as a free variable: indeed, \lstinline!123 + x + x! makes sense only if \lstinline!x! is already defined outside that expression. The Scala code for the corresponding nameless function is: \begin{lstlisting} @@ -138,7 +140,8 @@ \subsection{The nine constructions of fully parametric code} inside \lstinline!expr!, e.g., to \lstinline!{ z => z }!. The resulting code is written in Scala as: \begin{lstlisting} -{ x: Int => { z: Int => z } } +{ x: Int => { z: Int => z } } // Or equivalently: +(x: Int) => (z: Int) => z \end{lstlisting} Code notation: \[ @@ -149,7 +152,7 @@ \subsection{The nine constructions of fully parametric code} \paragraph{4) Use a function} If a function is already defined, we can use it by applying it to -an argument. Scala example: +an argument. A Scala example: \begin{lstlisting} val f = { x: Int => 123 + x + x } f(100) // Evaluates to 323. @@ -172,7 +175,7 @@ \subsection{The nine constructions of fully parametric code} Given a tuple \lstinline!p == (a, b)!, we can extract each of the values via \lstinline!p._1! and \lstinline!p._2!. The corresponding code notation is $p\triangleright\pi_{1}$ and $p\triangleright\pi_{2}$. -The auxiliary functions $\pi_{i}$ (where $i=1,2,...$) may be used +The standard functions $\pi_{i}$ (where $i=1,2,...$) may be used for tuples of any size. Example code defining these functions: \begin{lstlisting} def pi_1[A, B]: ((A, B)) => A = { case (a, b) => a } // Same as p => p._1 @@ -183,8 +186,8 @@ \subsection{The nine constructions of fully parametric code} \pi_{1}^{A,B} & \triangleq a^{:A}\times b^{:B}\rightarrow a\quad,\\ \pi_{2}^{A,B} & \triangleq a^{:A}\times b^{:B}\rightarrow b\quad. \end{align*} -The notation $a\times b$ is used in an \emph{argument} of a function -to destructure a tuple. +We use the notation $a\times b$ in an \emph{argument} of a function +to destructure tuples. \paragraph{7) Create a disjunctive value} @@ -217,23 +220,23 @@ \subsection{The nine constructions of fully parametric code} is that the notation $\bbnum 0+\bbnum 0+x$ is similar to a row vector, $\,\begin{array}{|ccc|} \bbnum 0 & \bbnum 0 & x\end{array}$~, which is well adapted to the matrix notation for \textsf{``}disjunctive -functions\textsf{''}, which we will explain next. +functions\textsf{''}. \paragraph{8) Use a disjunctive value} Once created, disjunctive values can be used as arguments of pattern-matching expressions (Scala\textsf{'}s \lstinline!match!/\lstinline!case! syntax). Recall that functions that take a disjunctive value as an argument -(called \textsf{``}\index{disjunctive functions}\textbf{disjunctive functions}\textsf{''}) +(\textsf{``}\index{disjunctive functions}\textbf{disjunctive functions}\textsf{''}) may be also written in Scala \emph{without} the \lstinline!match! -keyword. Scala example: +keyword. A Scala example: \begin{lstlisting} val compute: Option[Int] => Option[Int] = { case None => Some(100) case Some(x) => Some(x / 2) } \end{lstlisting} -The code notation for this disjunctive function is modeled after that +The code notation for this disjunctive function is modeled after the Scala code: \[ \text{compute}^{:\bbnum 1+\text{Int}\rightarrow\bbnum 1+\text{Int}}\triangleq\,\begin{array}{|c||cc|} @@ -247,12 +250,13 @@ \subsection{The nine constructions of fully parametric code} written in the matrix notation\index{matrix notation}\index{disjunctive type!matrix notation}. Each row of a matrix corresponds to a part of the disjunctive type -matched by one of the \lstinline!case! expressions. In this example, -the disjunctive type \lstinline!Option[Int]! has two parts: the named -unit \lstinline!None! (denoted by $\bbnum 1$) and the case class -\lstinline!Some[Int]!, which is equivalent to the type \lstinline!Int!. -So, the matrix has two rows labeled $\bbnum 1$ and $\text{Int}$, -showing that the function\textsf{'}s argument type is $\bbnum 1+\text{Int}$. +matched by one of the \lstinline!case! expressions. The column to +the left of the double line shows the corresponding disjunctive subtypes. +In this example, the disjunctive type \lstinline!Option[Int]! has +two parts: the named unit \lstinline!None! (denoted by $\bbnum 1$) +and the case class \lstinline!Some[Int]!, which is equivalent to +the type \lstinline!Int!. So, the matrix has two rows labeled $\bbnum 1$ +and $\text{Int}$, showing that the function\textsf{'}s argument type is $\bbnum 1+\text{Int}$. The columns of the matrix correspond to the parts of the disjunctive type \emph{returned} by the function. In this example, the return @@ -261,14 +265,15 @@ \subsection{The nine constructions of fully parametric code} If the return type is not disjunctive, the matrix will have one column. What are the matrix elements? The idea of the matrix notation is to -translate the \lstinline!case! expressions line by line from the -Scala code. Look at the first \lstinline!case! line as if it were -a standalone partial function: +translate the \lstinline!case! expressions line by line. Look at +the first \lstinline!case! line as if it were a standalone partial +function: \begin{lstlisting} { case None => Some(100) } \end{lstlisting} Since \lstinline!None! is a named unit and is denoted by $1$, this -function is written in the code notation as $1\rightarrow\bbnum 0^{:\bbnum 1}+100^{:\text{Int}}$. +function is written in the code notation as $1\rightarrow\bbnum 0^{:\bbnum 1}+100^{:\text{Int}}$, +or more concisely as $\_\rightarrow\bbnum 0+100$. The second line is written in the form of a partial function as: \begin{lstlisting} @@ -277,15 +282,15 @@ \subsection{The nine constructions of fully parametric code} The pattern variable on the left side is \lstinline!x! and has type \lstinline!Int!, so we denote that function by $x^{:\text{Int}}\rightarrow\bbnum 0^{:\bbnum 1}+(x/2)^{:\text{Int}}$. -To obtain the matrix notation, we write the two partial functions -in the two rows: +To obtain the matrix code notation for \lstinline!compute!, we may +begin by writing the two partial functions as two rows of a matrix: \begin{lstlisting} val compute: Option[Int] => Option[Int] = { case None => Some(100) case Some(x) => Some(x / 2) } \end{lstlisting} -Code notation: +The code notation is: \[ \text{compute}^{:\bbnum 1+\text{Int}\rightarrow\bbnum 1+\text{Int}}\triangleq\,\begin{array}{|c||c|} & \bbnum 1+\text{Int}\\ @@ -293,13 +298,13 @@ \subsection{The nine constructions of fully parametric code} \text{Int} & x\rightarrow\bbnum 0+\frac{x}{2} \end{array}\quad. \] -This is already a valid matrix notation for the function $f$. So -far, the matrix has two rows and one column. However, we notice that -each row\textsf{'}s return value is \emph{known} to be in a specific part of -the disjunctive type $\bbnum 1+\text{Int}$ (in this example, both -rows return values of type $\bbnum 0+\text{Int}$). So, we can split -the column into two columns and obtain a clearer and more useful notation -for this function: +This is already a valid matrix notation for the function \lstinline!compute!. +So far, the matrix has two rows and one column. Then we notice that +each row\textsf{'}s return value is \emph{known} to be in a specific subtype +of the disjunctive type $\bbnum 1+\text{Int}$; in this example, both +rows return values of the subtype $\bbnum 0+\text{Int}$. So, we split +the column into two columns labeled \textsf{``}$\bbnum 1$\textsf{''} and \textsf{``}$\text{Int}$\textsf{''}. +This gives a more useful code notation for \lstinline!compute!: \[ \text{compute}^{:\bbnum 1+\text{Int}\rightarrow\bbnum 1+\text{Int}}\triangleq\,\begin{array}{|c||cc|} & \bbnum 1 & \text{Int}\\ @@ -307,10 +312,16 @@ \subsection{The nine constructions of fully parametric code} \text{Int} & \bbnum 0 & x^{:\text{Int}}\rightarrow\frac{x}{2} \end{array}\quad. \] -The void type\index{void type!in matrix notation} $\bbnum 0$ is -written symbolically to indicate that the disjunctive part in that -column is not returned. In this way, the matrix displays the parts -of disjunctive types that are being returned. +The void type\index{void type!in matrix notation} ($\bbnum 0$) is +written in the first column to indicate that the disjunctive part +in that column is not returned. There is no confusion with other columns +because the type $\bbnum 0$ has no values. In this way, the matrix +clearly displays the parts of disjunctive types that are being returned +in each case. + +Because only one part of a disjunctive type can ever be returned, +a row can have at most one non-void value. That value will be in the +column corresponding to the part being returned. Partial functions are expressed in the matrix notation by writing $\bbnum 0$ in the missing rows: @@ -352,15 +363,12 @@ \subsection{The nine constructions of fully parametric code} It is convenient to put the argument $p$ to the \emph{left} of the disjunctive function, resembling the Scala syntax \lstinline!p match {...}!. -Because only one part of a disjunctive type can ever be returned, -a row can have at most one non-void value. That value will be in the -column corresponding to the part being returned. - -The matrix notation allows us to compute such function applications -directly. We view the disjunctive value $\bbnum 0+64^{:\text{Int}}$ -as a \textsf{``}row vector\textsf{''} $\,\begin{array}{|cc|} -\bbnum 0 & 64\end{array}$~, written with a single left line to distinguish it from a function -matrix. Calculations use the standard rules of a vector-matrix product: +Let us see how to compute function applications in the matrix notation. +We view the disjunctive value $\bbnum 0+64^{:\text{Int}}$ as a \textsf{``}row +vector\textsf{''} $\,\begin{array}{|cc|} +\bbnum 0 & 64\end{array}$~. Vectors are written with a single line at left, to distinguish +them from function matrices. Calculations use the standard rules of +a vector-matrix product: \begin{align*} & (\bbnum 0+64)\triangleright\,\begin{array}{||cc|} \bbnum 0 & \_\rightarrow100\\ @@ -372,24 +380,22 @@ \subsection{The nine constructions of fully parametric code} \end{array}\\ & =\,\begin{array}{|cc|} \bbnum 0 & 64\triangleright(x\rightarrow\frac{x}{2})\end{array}\,=\,\begin{array}{|cc|} -\bbnum 0 & 32\end{array}\,=(\bbnum 0+32)\quad. +\bbnum 0 & 32\end{array}\,=\bbnum 0^{:\bbnum 1}+32^{:\text{Int}}\quad. \end{align*} Instead of the multiplication of matrix elements as it would be done -in matrix algebra, we use the pipe ($\triangleright$) operation, -and we drop any terms containing $\bbnum 0$. (We omitted type annotations -here, because we already checked that the types match.) +in linear algebra, we use the pipe ($\triangleright$) operation, +and we drop any terms containing $\bbnum 0$. (Type annotations are +omitted because we already checked that the types match.) \paragraph{9) Use a recursive call} The last construction is to call a function recursively within its own definition. This construction was not shown in Section~\ref{subsec:Short-notation-for-eight-code-constructions} because the constructive propositional logic (which was the main focus -in that chapter) cannot represent a recursively defined value. However, +of that chapter) cannot represent recursively defined values. However, this limitation of propositional logic means only that we do not have -an algorithm for \emph{automatic} derivation of recursive code. (Similarly, -no algorithm can automatically derive code that involves type constructors -with known methods.) Nevertheless, those derivations can be performed -by hand. +an algorithm for \emph{automatic} derivation of recursive code. Those +derivations can be performed by hand. Recursive code is used often, and we need to get some experience reasoning about it. In derivations, this book denotes recursive calls by an @@ -464,11 +470,12 @@ \subsection{Function composition and the pipe notation} \] A useful tool for calculations is the \textbf{pipe}\index{pipe notation}\index{\$@$\triangleright$-notation!see \textsf{``}pipe notation\textsf{''}} -operation, $x\triangleright f$, which places the argument ($x$) -to the \emph{left} of a function ($f$). It is then natural to apply -further functions at \emph{right}, for example $(x\triangleright f)\triangleright g$ -meaning $g(f(x))$. In Scala, methods such as \lstinline!map! and -\lstinline!filter! are often combined in this way: +operation, $x\triangleright f$, which means just the same as $f(x)$ +but places the argument ($x$) to the \emph{left} of a function ($f$). +It is then natural to apply further functions at \emph{right}, for +example, $x\triangleright f\triangleright g$ meaning $g(f(x))$. +In Scala, methods such as \lstinline!map! and \lstinline!filter! +are often combined in this way: \begin{lstlisting} x.map(f).filter(p) \end{lstlisting} @@ -477,7 +484,7 @@ \subsection{Function composition and the pipe notation} x\triangleright\text{fmap}\,(f)\triangleright\text{filt}\,(p)\quad. \] -To enable this common usage, the $\triangleright$ operation is defined +To enable this common usage, the operation $\triangleright$ is defined to group towards the left. So, the parentheses in $(x\triangleright f)\triangleright g=x\triangleright f\triangleright g$ are not needed.\index{pipe notation!operator precedence} @@ -489,7 +496,7 @@ \subsection{Function composition and the pipe notation} Such formulas are needed often, so we introduce the convention that the pipe operation ($\triangleright$) groups weaker than the composition operation ($\bef$).\index{pipe notation!operator precedence} We -can then omit more parentheses: $x\triangleright(f\bef g)=x\triangleright f\bef g$. +can then omit the parentheses: $x\triangleright(f\bef g)=x\triangleright f\bef g$. Another common simplification occurs with function compositions of the form: @@ -522,7 +529,7 @@ \subsection{Function composition and the pipe notation} \begin{align*} & \text{compute}\bef\text{get}=\,\begin{array}{|c||cc|} & \bbnum 1 & \text{Int}\\ -\hline \bbnum 1 & \bbnum 0 & 1\rightarrow100\\ +\hline \bbnum 1 & \bbnum 0 & \_\rightarrow100\\ \text{Int} & \bbnum 0 & x\rightarrow\frac{x}{2} \end{array}\,\bef\,\begin{array}{|c||c|} & \text{Int}\\ @@ -531,18 +538,19 @@ \subsection{Function composition and the pipe notation} \end{array}\\ & \quad=\,\begin{array}{|c||c|} & \text{Int}\\ -\hline \bbnum 1 & (1\rightarrow100)\bef\text{id}\\ +\hline \bbnum 1 & (\_\rightarrow100)\bef\text{id}\\ \text{Int} & (x\rightarrow\frac{x}{2})\bef\text{id} -\end{array}=\,\begin{array}{|c||c|} +\end{array}\,=\,\begin{array}{|c||c|} & \text{Int}\\ -\hline \bbnum 1 & 1\rightarrow100\\ +\hline \bbnum 1 & \_\rightarrow100\\ \text{Int} & x\rightarrow\frac{x}{2} \end{array}\quad. \end{align*} -In this computation, we used the composition ($\bef$) instead of -the \textsf{``}multiplication\textsf{''} of matrix elements. +In such computations, we use the standard rules of matrix multiplication +but apply the composition ($\bef$) instead of the multiplication +of matrix elements. -Why does the rule for matrix multiplication work for function compositions? +Why do the matrix multiplication rules work for function compositions? The reason is the equivalence $x\triangleright f\triangleright g=x\triangleright f\bef g$. We have defined the matrix form of functions to work with the \textsf{``}row-vector\textsf{''} form of disjunctive types, i.e., for the computation $x\triangleright f$ @@ -552,13 +560,12 @@ \subsection{Function composition and the pipe notation} The standard rules of matrix multiplication make it associative. So, the result of $x\triangleright f\triangleright g$ is the same as the result of piping $x$ into the matrix product of $f$ and $g$. -Therefore, the matrix product of $f$ and $g$ must yield the function +Therefore, the matrix product of $f$ and $g$ will yield the function $f\bef g$. -A \textsf{``}non-disjunctive\textsf{''} function (i.e., one not taking or returning -disjunctive types) may be written as a $1\times1$ matrix, so its -composition with disjunctive functions can be computed via the same -rules. +A function that does not take or return disjunctive types may be written +as a $1\times1$ matrix, so its composition with disjunctive functions +can be computed via the same rules. \subsection{Functor and contrafunctor liftings} @@ -579,9 +586,9 @@ \subsection{Functor and contrafunctor liftings} lifting. We may also split a lifted composition into a composition of liftings. -The lifting notation helps us recognize that these steps are possible -just by looking at the formula. Of course, we still need to find a -useful sequence of steps in a given derivation or proof. +The lifting notation helps us recognize that those steps are possible +more easily, just by looking at the formula. Of course, we still need +to find a useful sequence of steps in a given derivation or proof. \section{Derivation techniques} @@ -592,11 +599,10 @@ \subsection{Standard functions for working with products} the last two functions are unlikely to be frequently used in practical programming.) -We already saw the definition and the implementation of the functions -$\pi_{1}$ and $\pi_{2}$. +We already saw the definition and the code for the functions $\pi_{1}$ +and $\pi_{2}$. -The \textsf{``}diagonal\textsf{''} function $\Delta$ is a right inverse for $\pi_{1}$ -and $\pi_{2}$: +The \textsf{``}diagonal\textsf{''} function ($\Delta$) is defined by: \begin{lstlisting} def delta[A]: A => (A, A) = { x => (x, x) } \end{lstlisting} @@ -607,32 +613,32 @@ \subsection{Standard functions for working with products} It is clear that extracting any part of a pair \lstinline!delta(x) == (x, x)! will give back the original \lstinline!x!. This property can be written -as an equation or a \textsf{``}law\textsf{''}: +via equations or \textsf{``}laws\textsf{''}: \begin{lstlisting} -delta(x)._1 == x +delta(x)._1 == x and delta(x)._2 == x \end{lstlisting} Code notation: \[ -\pi_{1}(\Delta(x))=x\quad. +\pi_{1}(\Delta(x))=x\quad,\quad\quad\pi_{2}(\Delta(x))=x\quad. \] -We can transform this law into a point-free equation by first using +We can transform these laws into point-free equations. First, use the pipe notation: \[ \pi_{1}(\Delta(x))=(\Delta(x))\triangleright\pi_{1}=x\triangleright\Delta\triangleright\pi_{1}=x\triangleright\Delta\bef\pi_{1}\quad, \] which gives the equation $x\triangleright\Delta\bef\pi_{1}=x=x\triangleright\text{id}$. -Now we omit \textsf{``}$x\,\triangleright$\textsf{''} and obtain a point-free equation: +Then we omit \textsf{``}$x\,\triangleright$\textsf{''} and obtain a point-free equation: \begin{align} {\color{greenunder}\Delta\text{ is a right inverse of }\pi_{1}:}\quad & \Delta\bef\pi_{1}=\text{id}\quad.\label{eq:pair-identity-law-left} \end{align} -The same property holds for $\pi_{2}$. +The same property holds for $\pi_{2}$, namely: $\Delta\bef\pi_{2}=\text{id}$. The \index{pair product of functions|textit}\textbf{pair product} operation $f\boxtimes g$ is defined for any functions $f$ and $g$ by: \begin{lstlisting} -def pairProduct[A,B,P,Q](f: A => P, g: B => Q): ((A, B)) => (P, Q) = { +def pairProduct[A, B, P, Q](f: A => P, g: B => Q): ((A, B)) => (P, Q) = { case (a, b) => (f(a), g(b)) } \end{lstlisting} @@ -648,7 +654,7 @@ \subsection{Standard functions for working with products} \end{align} An equivalent way of defining $f\boxtimes g$ is via this Scala code: \begin{lstlisting} -def pairProduct[A,B,P,Q](f: A => P, g: B => Q)(p: (A, B)): (P, Q) = +def pairProduct[A, B, P, Q](f: A => P, g: B => Q)(p: (A, B)): (P, Q) = (f(p._1), g(p._2)) \end{lstlisting} \[ @@ -665,8 +671,8 @@ \subsection{Standard functions for working with products} \subsection{Deriving laws for functions with known implementations\label{subsec:Deriving-laws-for-functions-}} -The task is to prove a given law (an equation) for a function whose -code is known. An example of such an equation is the \index{naturality law!of the function Delta@of the function $\Delta$}naturality +We will often need to prove a given law (an equation) for a function +whose code is known. An example of such an equation is the \index{naturality law!of the function Delta@of the function $\Delta$}naturality law of $\Delta$, which states that for any function $f^{:A\rightarrow B}$ we have: \begin{equation} @@ -682,12 +688,12 @@ \subsection{Deriving laws for functions with known implementations\label{subsec: must take arguments of type $B$ and thus returns a value of type $B\times B$. We see that the left-hand side must be a function of type $A\rightarrow B\times B$. So, the $\Delta$ in the right-hand -side must take arguments of type $A$. It then returns a value of +side must take an argument of type $A$. It then returns a value of type $A\times A$, which is consumed by $f\boxtimes f$. In this way, we see that all types match. We can put the resulting types into a type diagram and write the law with type annotations: \[ -\xymatrix{\xyScaleY{1.6pc}\xyScaleX{4.0pc}A\ar[d]\sb(0.45){f}\ar[r]\sb(0.45){\Delta^{A}} & A\times A\ar[d]\sp(0.45){f\boxtimes f}\\ +\xymatrix{\xyScaleY{1.6pc}\xyScaleX{4.0pc}A\ar[d]\sb(0.45){f}\ar[r]\sp(0.45){\Delta^{A}} & A\times A\ar[d]\sp(0.45){f\boxtimes f}\\ B\ar[r]\sp(0.45){\Delta^{B}} & B\times B } \] @@ -697,11 +703,10 @@ \subsection{Deriving laws for functions with known implementations\label{subsec: \noindent To prove the law, we need to use the known code of the function $\Delta$. We substitute that code into the left-hand side of the -law and into the right-hand side of the law, hoping to transform these +law and into the right-hand side of the law, hoping to transform those two expressions until they are the same. -We will now perform this computation in the Scala syntax and in the -code notation: +We perform this derivation in the Scala syntax and in the code notation: \begin{lstlisting} x.pipe(f andThen delta) == (f(x)).pipe { a => (a, a) } @@ -710,7 +715,6 @@ \subsection{Deriving laws for functions with known implementations\label{subsec: == (x, x).pipe { case (a, b) => (f(a), f(b)) } == (f(x), f(x)) // Right-hand side. \end{lstlisting} -In the code notation: \begin{align*} & x\triangleright f\bef\Delta=f(x)\,\gunderline{\triangleright\,(b}\rightarrow b\times b)=f(x)\times f(x)\quad.\\ & \gunderline{x\triangleright\Delta}\,\bef(f\boxtimes f)=(x\times x)\gunderline{\,\triangleright\,(a\times b}\rightarrow f(a)\times f(b))=f(x)\times f(x)\quad. @@ -823,7 +827,7 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki With this definition, we can formulate a law of \lstinline!merge! called the \textsf{``}naturality law\index{naturality law!of merge@of \texttt{merge}}\textsf{''}: \[ -\xymatrix{\xyScaleY{1.6pc}\xyScaleX{4.0pc}A+A\ar[d]\sb(0.45){f^{\uparrow E}}\ar[r]\sb(0.55){\text{merge}^{A}} & A\ar[d]\sp(0.45){f}\\ +\xymatrix{\xyScaleY{1.6pc}\xyScaleX{4.0pc}A+A\ar[d]\sb(0.45){f^{\uparrow E}}\ar[r]\sp(0.55){\text{merge}^{A}} & A\ar[d]\sp(0.45){f}\\ B+B\ar[r]\sp(0.55){\text{merge}^{B}} & B } \] @@ -867,7 +871,7 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki various disjunctive types. As an example, let us verify the \textbf{associativity law}\index{associativity law!of merge@of \texttt{merge}} of \lstinline!merge!: \[ -\xymatrix{\xyScaleY{1.5pc}\xyScaleX{4.5pc}E^{A+A}\ar[d]\sp(0.45){\text{merge}^{\uparrow E}}\ar[r]\sp(0.55){\text{merge}^{A+A}} & A+A\ar[d]\sb(0.5){\text{merge}^{A}}\\ +\xymatrix{\xyScaleY{1.5pc}\xyScaleX{4.5pc}E^{A+A}\ar[d]\sb(0.45){\text{merge}^{\uparrow E}}\ar[r]\sp(0.55){\text{merge}^{A+A}} & A+A\ar[d]\sp(0.45){\text{merge}^{A}}\\ E^{A}\ar[r]\sb(0.55){\text{merge}^{A}} & A } \] @@ -922,8 +926,8 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki \end{array}\quad. \] We cannot proceed with matrix composition because the dimensions of -the matrices do not match. To compute further, we need to expand the -rows and the columns of the first matrix: +the matrices do not match. We need to expand the rows and the columns +of the first matrix. Then we can finish the proof of the law: \[ \begin{array}{|c||c|} & A+A\\ @@ -951,8 +955,8 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki A & \text{id} \end{array}\quad. \] -This proves the law and also helps visualize how various types are -transformed by \lstinline!merge!. +The matrix notation helps visualize how various types are transformed +by \lstinline!merge!. In some cases, we cannot fully split the rows or the columns of a matrix. For instance, if we are calculating with an arbitrary function @@ -971,7 +975,7 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki \] The single column of this matrix remains unsplit. Either that column will remain unsplit throughout the derivation, or additional information -about $f$, $g$, or $h$ will allow us to split the column. +about $f$, $g$, or $h$ will allow us to split that column. Finally, there are two tricks that complement the matrix intuition and may sometimes simplify a disjunctive function.\footnote{These tricks are adapted from Section~2.8 of the book \textsf{``}Program @@ -981,8 +985,8 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki \paragraph{Ignored arguments} If all rows of the disjunctive function ignore their arguments and -always return the same results, we may collapse all rows into one, -as shown in this example: +if all rows return the same result, we may collapse all the rows into +one, as shown in this example: \begin{lstlisting} def same[A]: Either[A, Option[A]] => Option[A] = { case Left(a) => None @@ -1000,7 +1004,7 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki \end{array}=\,\begin{array}{|c||cc|} & \bbnum 1 & A\\ \hline A+\bbnum 1+A & \_\rightarrow1 & \bbnum 0 -\end{array}\quad. +\end{array}\,=\_\rightarrow1+\bbnum 0^{:A}\quad. \] There is a more general formula for arbitrary functions $f^{:X\rightarrow C}$ containing this code: @@ -1011,21 +1015,20 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki B & \_\rightarrow f(x) \end{array}\,=x^{:X}\rightarrow f(x)=f\quad. \end{align*} -In this case, we can completely collapse the matrix, getting an ordinary -(non-disjunctive) function. +The code matrix is replaced by an ordinary (non-disjunctive) function. \paragraph{Simplification of diagonal pair products} Consider the pair product of two disjunctive functions such as $f^{:A+B\rightarrow R}$ and $g^{:P+Q\rightarrow S}$. Computing $f\boxtimes g$ in the matrix notation requires, in general, to split the rows and the columns of -the matrices because the type of $f\boxtimes g$ is: +the matrices because the input type of $f\boxtimes g$ is disjunctive: \begin{align*} f\boxtimes g & :(A+B)\times(P+Q)\rightarrow R\times S\\ & \cong A\times P+A\times Q+B\times P+B\times Q\rightarrow R\times S\quad. \end{align*} -So, the pair product of two $2\times1$ matrices must be written \emph{in -general} as a $4\times1$ matrix: +So, \emph{in general} the pair product $f\boxtimes g$ must be written +as a $4\times1$ matrix: \[ \text{if }f\triangleq\,\begin{array}{|c||c|} & R\\ @@ -1043,8 +1046,7 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki B\times Q & f_{2}\boxtimes g_{2} \end{array}\quad. \] - -A simplification trick exists when the pair product is composed with +A simplification trick exists when $f\boxtimes g$ is composed with the diagonal function $\Delta$: \[ \Delta\bef(f\boxtimes g)=\Delta^{:A+B\rightarrow(A+B)\times(A+B)}\bef(f^{:A+B\rightarrow R}\boxtimes g^{:A+B\rightarrow S})=p\rightarrow f(p)\times g(p)\quad. @@ -1068,9 +1070,9 @@ \subsection{Working with disjunctive types in matrix notation\label{subsec:Worki B & \Delta\bef(f_{2}\boxtimes g_{2}) \end{array}\quad. \] -The rules of matrix multiplication do not help in deriving this law. -So, we use a more basic approach: show that both sides are equal when -applied to arbitrary values $p$ of type $A+B$: +The rules of matrix multiplication do not help in deriving this law +directly. So, we use a more basic approach: show that both sides are +equal when applied to arbitrary values $p$ of type $A+B$: \[ p^{:A+B}\triangleright\Delta\bef(f\boxtimes g)=f(p)\times g(p)\overset{?}{=}p\triangleright\,\begin{array}{|c||c|} & R\times S\\ @@ -1119,11 +1121,11 @@ \subsection{Derivations involving unknown functions with laws} As an example, let us derive the property that $L^{A}\triangleq A\times F^{A}$ is a functor if $F$ is known to be a functor. We are in the situation where we only know that the function $\text{fmap}_{F}$ exists and -satisfies the functor law, but we do not know the code of $\text{fmap}_{F}$. +satisfies the functor laws, but we do not know the code of $\text{fmap}_{F}$. Let us discover the derivation step by step. First, we need to define $\text{fmap}_{L}$. We use the lifting notation -$^{\uparrow F}$ and write, for any $f^{:A\rightarrow B}$: +$^{\uparrow F}$ and write: \begin{lstlisting} def fmap_L[A, B](f: A => B): ((A, F[A])) => (B, F[B]) = { case (a, p) => (f(a), p.map(f)) @@ -1160,10 +1162,10 @@ \subsection{Derivations involving unknown functions with laws} \begin{equation} f^{\uparrow F}\bef g^{\uparrow F}=(f\bef g)^{\uparrow F}\quad.\label{eq:composition-law-F-derivation1} \end{equation} -We could use this law only if we somehow bring $f^{\uparrow F}$ and -$g^{\uparrow F}$ together in the formula. The only way forward is -to compute the function composition of the two functions whose code -we \emph{do} have: +We could make use of this law only if we somehow brought $f^{\uparrow F}$ +and $g^{\uparrow F}$ together in the formula. The only way forward +is to compute the function composition of the two functions whose +code we \emph{do} have: \begin{align*} & \big(a\times p\rightarrow f(a)\times f^{\uparrow F}(p)\big)\bef\big(b\times q\rightarrow g(b)\times g^{\uparrow F}(q)\big)\\ & =a\times p\rightarrow g(f(a))\times g^{\uparrow F}(f^{\uparrow F}(p))\quad. @@ -1200,28 +1202,6 @@ \subsection{Derivations involving unknown functions with laws} \subsection{Exercises\index{exercises}} -\subsubsection{Exercise \label{subsec:Exercise-reasoning-1-4-1}\ref{subsec:Exercise-reasoning-1-4-1}} - -Assume given functors $F$, $G$, $K$, $L$ and a natural transformation -$\phi:F^{A}\rightarrow G^{A}$. - -\textbf{(a)} Prove that $\phi^{\uparrow K}:K^{F^{A}}\rightarrow K^{G^{A}}$ -is also a natural transformation. - -\textbf{(b)} Given another natural transformation $\psi:K^{A}\rightarrow L^{A}$, -prove that the pair product of $\phi$ and $\psi$, that is, $\phi\boxtimes\psi:F^{A}\times K^{A}\rightarrow G^{A}\times L^{A}$, -as well as the pair co-product $\phi\boxplus\psi:F^{A}+K^{A}\rightarrow G^{A}+L^{A}$, -are also natural transformations. The \textbf{pair co-product}\index{pair co-product of functions|textit} -of two functions $\phi$ and $\psi$ is defined by: -\[ -(\phi\boxplus\psi):F^{A}+K^{A}\rightarrow G^{A}+L^{A}\quad,\quad\quad\phi\boxplus\psi\triangleq\,\begin{array}{|c||cc|} - & G^{A} & L^{A}\\ -\hline F^{A} & \phi & \bbnum 0\\ -K^{A} & \bbnum 0 & \psi -\end{array}\quad. -\] - - \subsubsection{Exercise \label{subsec:Exercise-reasoning-1-4}\ref{subsec:Exercise-reasoning-1-4}} Show using matrix calculations that $\text{swap}\bef\text{swap}=\text{id}$, @@ -1249,7 +1229,7 @@ \subsubsection{Exercise \label{subsec:Exercise-reasoning-1-1}\ref{subsec:Exercis \[ f^{\uparrow F}\bef\Delta=\Delta\bef f^{\uparrow L}\quad. \] -Write out all types in this law and draw a type diagram. +Write out all types in that law and draw a type diagram. \subsubsection{Exercise \label{subsec:Exercise-reasoning-1-5}\ref{subsec:Exercise-reasoning-1-5}} @@ -1266,11 +1246,11 @@ \subsubsection{Exercise \label{subsec:Exercise-reasoning-1-5}\ref{subsec:Exercis Implement that function and prove that it satisfies the \textsf{``}naturality law\textsf{''}: for any $f^{:A\rightarrow B}$, \[ -(\text{id}\boxtimes f)\bef\text{two}=\text{two}\bef f^{\uparrow E}\quad, +(\text{id}\boxtimes f)\bef\text{two}=\text{two}\bef f^{\uparrow E}\quad. \] -where $E^{A}\triangleq A+A$ is the functor whose lifting $^{\uparrow E}$ +Here $E^{A}\triangleq A+A$ is the functor whose lifting ($...^{\uparrow E}$) was defined in Section~\ref{subsec:Working-with-disjunctive-functions}. -Write out the types in this law and draw a type diagram. +Write out the types in that naturality law and draw a type diagram. \subsubsection{Exercise \label{subsec:Exercise-reasoning-1}\ref{subsec:Exercise-reasoning-1}} @@ -1293,7 +1273,7 @@ \subsubsection{Exercise \label{subsec:Exercise-reasoning-1-2}\ref{subsec:Exercis \subsubsection{Exercise \label{subsec:Exercise-reasoning-1-3}\ref{subsec:Exercise-reasoning-1-3}} -Consider the functor $L^{A}$ defined as: +Consider the functor $L$ defined as: \[ L^{A}\triangleq\text{Int}\times\text{Int}+A\quad. \] diff --git a/sofp-src/tex/sofp-summary.tex b/sofp-src/tex/sofp-summary.tex index cc1d99435..5e32d268c 100644 --- a/sofp-src/tex/sofp-summary.tex +++ b/sofp-src/tex/sofp-summary.tex @@ -437,7 +437,7 @@ \subsubsection{Exercise \label{par:Exercise-additional-13}\ref{par:Exercise-addi If $M^{\bullet}$ is a commutative monad and $W$ is a commutative monoid then the monoid $M^{W}$ is commutative. -\subsubsection{Exercise{*}{*} \label{par:Exercise-additional-14}\ref{par:Exercise-additional-14}} +\subsubsection{Exercise \label{par:Exercise-additional-14}\ref{par:Exercise-additional-14}} Define a type constructor \lstinline!Triang[A]! representing \textsf{``}triangular matrices\textsf{''} with elements of type \lstinline!A!. Example values $t_{1}$, @@ -478,7 +478,7 @@ \subsubsection{Exercise \label{par:Exercise-additional-15}\ref{par:Exercise-addi \end{lstlisting} From that, prove the type equivalence $\forall A.\,((A\rightarrow A)\rightarrow A)\rightarrow A\cong\bbnum 1$. -\subsubsection{Exercise{*}{*} \label{par:Problem-Peirce-law}\ref{par:Problem-Peirce-law}} +\subsubsection{Exercise \label{par:Problem-Peirce-law}\ref{par:Problem-Peirce-law}} Consider the functor $F^{R}$ defined by: \[ @@ -493,8 +493,7 @@ \subsubsection{Exercise{*}{*} \label{par:Problem-Peirce-law}\ref{par:Problem-Pei that the corresponding type $\forall R.\,F^{R}$ should be void, and it is: $\forall R.\,F^{R}=\forall R.\,R=\bbnum 0$. - -\subsubsection{Exercise{*}{*} \label{par:Problem-Peirce-law-1}\ref{par:Problem-Peirce-law-1}} +\subsubsection{Exercise \label{par:Problem-Peirce-law-1}\ref{par:Problem-Peirce-law-1}} Consider the profunctor $F^{R,S}$ defined by: \[ @@ -514,7 +513,7 @@ \subsubsection{Exercise{*}{*} \label{par:Problem-Peirce-law-1}\ref{par:Problem-P Show that $G^{R}\cong R$ and $F^{R,S}\cong R\rightarrow S$. -\subsubsection{Exercise{*} \label{par:Problem-Peirce-law-2}\ref{par:Problem-Peirce-law-2}} +\subsubsection{Exercise \label{par:Problem-Peirce-law-2}\ref{par:Problem-Peirce-law-2}} Prove the following type equivalences (assuming fixed types $P$, $Q$, ...): @@ -546,7 +545,7 @@ \subsubsection{Exercise \label{par:Exercise-additional-16-2}\ref{par:Exercise-ad Show that $F^{R,S}\cong\bbnum 0$ unless we set $R=S=\bbnum 0$, in which case $F^{\bbnum 0,\bbnum 0}\cong\bbnum 1$. -\subsubsection{Exercise{*} \label{par:Exercise-additional-16}\ref{par:Exercise-additional-16}} +\subsubsection{Exercise \label{par:Exercise-additional-16}\ref{par:Exercise-additional-16}} Define a monad transformer $T_{\text{Cod}_{F}^{L}}^{M,A}$ for the composed codensity monad (Exercise~\ref{subsec:Exercise-combined-codensity-monad}) @@ -554,7 +553,7 @@ \subsubsection{Exercise{*} \label{par:Exercise-additional-16}\ref{par:Exercise-a arbitrary but fixed monad), $M$ (a foreign monad), and $A$ (the value type). Find out which laws hold for that transformer. -\subsubsection{Exercise{*}{*} \label{par:Exercise-additional-16-1}\ref{par:Exercise-additional-16-1}} +\subsubsection{Exercise \label{par:Exercise-additional-16-1}\ref{par:Exercise-additional-16-1}} Consider the (non-covariant) type constructor $G^{A}\triangleq A\rightarrow A$.\footnote{See \texttt{\href{https://stackoverflow.com/questions/72490608/}{https://stackoverflow.com/questions/72490608/}} for discussion about monads having multiple transformers.} @@ -600,7 +599,7 @@ \subsubsection{Exercise{*}{*} \label{par:Exercise-additional-16-1}\ref{par:Exerc for the \lstinline!List! monad. Show that this transformer is not equivalent to the transformers defined in \textbf{(b)}, \textbf{(c)}. -\subsubsection{Exercise{*}{*} \label{par:Exercise-additional-17}\ref{par:Exercise-additional-17}} +\subsubsection{Exercise \label{par:Exercise-additional-17}\ref{par:Exercise-additional-17}} \textbf{(a)} For any fixed type $Z$, functor $F$ and lawful monad $P$, show that $L^{A}\triangleq F^{A\rightarrow P^{Z}}\rightarrow P^{A}$ @@ -758,17 +757,17 @@ \subsubsection{Problem \label{par:Problem-monads-5-2-1}\ref{par:Problem-monads-5 R\times\text{List}^{R} & h\times t\rightarrow h\oplus_{R}\overline{\text{reduce}}\,(t) \end{array}\quad. \] -We can similarly implement a base runner (\lstinline!brun!) for the -transformer $T_{\text{List}}$ if we restrict its usage to \emph{monoid} -types $R$. The function \lstinline!brun! with the type signature -$M^{L^{R}}\rightarrow M^{R}$ aggregates all elements of the effectful -list into a single value of type $M^{R}$ (which is also a monoid -type): +We can similarly implement a special base runner (\lstinline!brunE!) +for the transformer $T_{\text{List}}$ if we restrict its usage to +\emph{monoid} types $R$. The function \lstinline!brunE! with the +type signature $M^{L^{R}}\rightarrow M^{R}$ aggregates all elements +of the effectful list into a single value of type $M^{R}$ (which +is also a monoid type): \[ -\text{brun}:M^{L^{R}}\rightarrow M^{R}\quad,\quad\quad\text{brun}\triangleq\text{flm}_{M}\bigg(\,\begin{array}{|c||c|} +\text{brunE}:M^{L^{R}}\rightarrow M^{R}\quad,\quad\quad\text{brunE}\triangleq\text{flm}_{M}\bigg(\,\begin{array}{|c||c|} & M^{R}\\ \hline \bbnum 1 & 1\rightarrow\text{pu}_{M}(e_{R})\\ -R\times M^{L^{R}} & h\times t\rightarrow\text{pu}_{M}(h)\oplus_{M}\overline{\text{brun}}\,(t) +R\times M^{L^{R}} & h\times t\rightarrow\text{pu}_{M}(h)\oplus_{M}\overline{\text{brunE}}\,(t) \end{array}\,\bigg)\quad. \] Here, we use the binary operation $\oplus_{M}$ of the monoid $M^{R}$, @@ -777,19 +776,19 @@ \subsubsection{Problem \label{par:Problem-monads-5-2-1}\ref{par:Problem-monads-5 p^{:M^{R}}\oplus_{M}q^{:M^{R}}\triangleq p\triangleright\text{flm}_{M}\big(u^{:R}\rightarrow q\triangleright(v^{:R}\rightarrow u\oplus_{R}v)^{\uparrow M}\big)\quad. \] -\textbf{(a)} Is \lstinline!brun! a monoid morphism $T^{A}\rightarrow A$? +\textbf{(a)} Is \lstinline!brunE! a monoid morphism $T^{A}\rightarrow A$? (Note that $T^{A}$ is a monoid since $T$ is a lawful monad.) The -monoid morphism identity law holds for \lstinline!brun!. Does the +monoid morphism identity law holds for \lstinline!brunE!. Does the composition law hold? -\textbf{(b)} Do the monad morphism laws of \lstinline!brun! hold -when restricted to a monoid type $A$? +\textbf{(b)} Do the monad morphism laws of \lstinline!brunE! hold +when restricted to monoid types $A$? \begin{align*} -{\color{greenunder}\text{for all monoid types }A:}\quad & a^{:A}\triangleright\text{pu}_{T}\bef\text{brun}=a^{:A}\triangleright\text{pu}_{M}\quad,\\ -{\color{greenunder}\text{composition law}:}\quad & p^{:T^{T^{A}}}\triangleright\text{ftn}_{T}\bef\text{brun }=p^{:T^{T^{A}}}\triangleright\text{brun}\bef\text{brun}^{\uparrow M}\bef\text{ftn}_{M}\quad. +{\color{greenunder}\text{for all monoid types }A:}\quad & a^{:A}\triangleright\text{pu}_{T}\bef\text{brunE}=a^{:A}\triangleright\text{pu}_{M}\quad,\\ +{\color{greenunder}\text{composition law}:}\quad & p^{:T^{T^{A}}}\triangleright\text{ftn}_{T}\bef\text{brunE }=p^{:T^{T^{A}}}\triangleright\text{brunE}\bef\text{brunE}^{\uparrow M}\bef\text{ftn}_{M}\quad. \end{align*} (If so, Exercise~\ref{subsec:Exercise-traversables-10-1} would show -that \lstinline!brun! is also a \emph{monoid} morphism $M^{L^{A}}\rightarrow M^{A}$.) +that \lstinline!brunE! is also a \emph{monoid} morphism $M^{L^{A}}\rightarrow M^{A}$.) \begin{comment} Failed attempts to verify the composition law: diff --git a/sofp-src/tex/sofp-transformers.tex b/sofp-src/tex/sofp-transformers.tex index b46d1e782..2be999a28 100644 --- a/sofp-src/tex/sofp-transformers.tex +++ b/sofp-src/tex/sofp-transformers.tex @@ -53,11 +53,10 @@ \subsection{Combining monadic effects via functor composition} } } yield tOpt \end{lstlisting} -The type constructor forces us to use pattern matching with nested -functor blocks, since that is the only way of getting access to values -of type \lstinline!A! within \lstinline!Future[Option[A]]!. The -code is repetitive and deeply nested, which makes it hard to read -and to change. +We are forced to use pattern matching with nested functor blocks, +since that is the only way of getting access to values of type \lstinline!A! +within \lstinline!Future[Option[A]]!. The code is repetitive and +deeply nested, which makes it hard to read and to change. The first step towards solving the problem is to rewrite this monadic program as a direct chain of computations depending on the results @@ -275,8 +274,8 @@ \subsubsection{Example \label{subsec:Example-state-monad-composition-fails-with- \subparagraph{Solution} -There is no lawful \lstinline!flatMap! function for the type constructor -\lstinline!Option[State[S, A]]!: +The type constructor \lstinline!Option[State[S, A]]! has no lawful +\lstinline!flatMap! function: \begin{lstlisting} type OpSt[A] = Option[S => (A, S)] // Here, the fixed type S must be already defined. def flatMap[A, B](fa: OpSt[A])(f: A => OpSt[B]): OpSt[B] = fa match { @@ -504,7 +503,7 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme The first task is to obtain the code of monad methods and lifts for the transformers in Table~\ref{tab:Known-monad-transformers}. -The type constructors for monad transformers are conventionally named +The type constructors for monad transformers are usually named as \lstinline!ReaderT!, \lstinline!EitherT!, etc. \paragraph{The \texttt{ReaderT} transformer} @@ -533,8 +532,8 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme \end{lstlisting} The lifts are written in the code notation as: \begin{align*} -{\color{greenunder}\text{foreign lift}:}\quad & \text{flift}:M^{A}\rightarrow T_{\text{Reader}}^{M,A}\quad, & \text{flift}\,(m^{:M^{A}})\triangleq\_^{:R}\rightarrow m=\text{pu}_{\text{Reader}}(m)\quad,\quad~\\ -{\color{greenunder}\text{base lift}:}\quad & \text{blift}:(R\rightarrow A)\rightarrow T_{\text{Reader}}^{M,A}\quad, & \text{blift}\,(t^{:R\rightarrow A})\triangleq r^{:R}\rightarrow\text{pu}_{M}(t(r))=t\bef\text{pu}_{M}\quad. +{\color{greenunder}\text{foreign lift}:}\quad & \text{flift}:M^{A}\rightarrow T_{\text{Reader}}^{M,A}\quad,\quad\quad\text{flift}\,(m^{:M^{A}})\triangleq\_^{:R}\rightarrow m=\text{pu}_{\text{Reader}}(m)\quad,\\ +{\color{greenunder}\text{base lift}:}\quad & \text{blift}:(R\rightarrow A)\rightarrow T_{\text{Reader}}^{M,A}\quad,\quad\quad\text{blift}\,(t^{:R\rightarrow A})\triangleq t\bef\text{pu}_{M}\quad. \end{align*} We have seen in Section~\ref{subsec:The-Reader-monad} that getting @@ -593,10 +592,11 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme Now the type is well adapted to using both $M$\textsf{'}s and \lstinline!Writer!\textsf{'}s flatten methods: \begin{align*} -\text{ftn}_{T}\big(t^{:M^{M^{A\times W}\times W}}\big) & =t\triangleright(m^{:M^{A\times W}}\times w^{:W}\rightarrow m\triangleright(p^{:A\times W}\rightarrow p\times w)^{\uparrow M})^{\uparrow M}\triangleright\text{ftn}_{M}\triangleright(\text{ftn}_{\text{Writer}})^{\uparrow M}\quad,\\ -\text{ftn}_{T} & =(m^{:M^{A\times W}}\times w^{:W}\rightarrow m\triangleright(p^{:A\times W}\rightarrow p\times w)^{\uparrow M})^{\uparrow M}\bef\text{ftn}_{M}\bef(\text{ftn}_{\text{Writer}})^{\uparrow M}\\ - & =\text{flm}_{M}(m\times w\rightarrow m\triangleright(p\rightarrow p\times w)^{\uparrow M}\bef\text{ftn}_{\text{Writer}}^{\uparrow M})\\ - & =\text{flm}_{M}(m\times w\rightarrow m\triangleright(a\times w_{2}\rightarrow a\times(w\oplus w_{2})))\quad. + & \text{ftn}_{T}\big(t^{:M^{M^{A\times W}\times W}}\big)\\ + & \quad=t\triangleright(m^{:M^{A\times W}}\times w^{:W}\rightarrow m\triangleright(p^{:A\times W}\rightarrow p\times w)^{\uparrow M})^{\uparrow M}\triangleright\text{ftn}_{M}\triangleright(\text{ftn}_{\text{Writer}})^{\uparrow M}\quad,\\ + & \text{ftn}_{T}=(m^{:M^{A\times W}}\times w^{:W}\rightarrow m\triangleright(p^{:A\times W}\rightarrow p\times w)^{\uparrow M})^{\uparrow M}\bef\text{ftn}_{M}\bef(\text{ftn}_{\text{Writer}})^{\uparrow M}\\ + & \quad=\text{flm}_{M}(m\times w\rightarrow m\triangleright(p\rightarrow p\times w)^{\uparrow M}\bef\text{ftn}_{\text{Writer}}^{\uparrow M})\\ + & \quad=\text{flm}_{M}(m\times w\rightarrow m\triangleright(a\times w_{2}\rightarrow a\times(w\oplus w_{2})))\quad. \end{align*} Translating this formula to Scala, we obtain the code of \lstinline!flatMap!: @@ -685,7 +685,7 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme def flatMap[B](f: A => ListT[M, B]): ListT[M, B] = ListT( t.flatMap { // Here we need a function of type Option[(A, ListT)] => M[Option[(A, ListT)]]. case None => Monad[M].pure(None) - case Some((head, tail)) => comb(f(head), tail).value // Type is M[Option[(A, ListT)]]. + case Some((head, tail)) => comb(f(head), tail).value // The `.value` has type M[Option[(A, ListT)]]. }) } \end{lstlisting} @@ -697,13 +697,19 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme \end{lstlisting} The lifts for \lstinline!ListT! are: \begin{lstlisting} -def flift[M[_]: Monad, A]: M[A] => ListT[M, A] = ListT(a => m.flatMap(ListT.pure(a).value)) +def flift[M[_]: Monad, A]: M[A] => ListT[M, A] = { m => + ListT(m.map(a => Some((a, ListT(Monad[M].pure(None)))))) +} def blift[M[_]: Monad, A]: List[A] => ListT[M, A] = - _.foldRight(Monad[M].pure(None)){ case (a, tail) => Monad[M].pure(Some((a, blift(tail)))) } + _.foldRight(Monad[M].pure(None)) { + case (a, tail) => Monad[M].pure(Some((a, blift(tail)))) + } \end{lstlisting} \begin{align*} -{\color{greenunder}\text{foreign lift}:}\quad & \text{flift}:M^{A}\rightarrow T_{\text{List}}^{M,A}\quad,\quad\quad m^{:M^{A}}\triangleright\text{flift}\triangleq m\triangleright(a\rightarrow\bbnum 0+a\times\text{pu}_{M}(1+\bbnum 0))^{\uparrow M}\quad,\\ -{\color{greenunder}\text{base lift}:}\quad & \text{blift}:\text{List}^{A}\rightarrow T_{\text{List}}^{M,A}\quad,\quad\quad\text{blift}\triangleq\,\begin{array}{|c||c|} +{\color{greenunder}\text{foreign lift}:}\quad & \text{flift}:M^{A}\rightarrow T_{\text{List}}^{M,A}\quad,\\ + & \text{flift}\triangleq(a\rightarrow\bbnum 0+a\times\text{pu}_{M}(1+\bbnum 0))^{\uparrow M}\quad,\\ +{\color{greenunder}\text{base lift}:}\quad & \text{blift}:\text{List}^{A}\rightarrow T_{\text{List}}^{M,A}\quad,\\ + & \text{blift}\triangleq\,\begin{array}{|c||c|} & M^{\bbnum 1+A\times T_{\text{List}}^{M,A}}\\ \hline \bbnum 1 & 1\rightarrow\text{pu}_{M}(1+\bbnum 0)\\ A\times\text{List}^{A} & a\times t\rightarrow\text{pu}_{M}(\bbnum 0+a\times\overline{\text{blift}}\,(t)) @@ -739,36 +745,38 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme of an \textsf{``}infinite\textsf{''} collection that uses the \lstinline!Reader! monad as $M$: \begin{lstlisting} -def ascend(n: Int): ListT[Reader[Int, *], Int] = ListT(Lazy(k => Some((n, ascend(n + k))))) +def ascend(n: Int): ListT[Reader[Int, *], Int] = + ListT(Lazy(k => Some((n, ascend(n + k))))) \end{lstlisting} -Evaluating \lstinline!ascend(n)! gives a well-defined value representing -the sequence $\left[n,n+k,n+2k,...\right]$. The parameter $k$ is +Evaluating \lstinline!ascend(n)! gives a well-defined, finite value, +but that value contains functions evaluating the infinite sequence +$\left[n,n+k,n+2k,...\right]$ step by step. The parameter $k$ is passed via the \lstinline!Reader! monad\textsf{'}s dependency injection. It is clear that \lstinline!runListT! will not terminate when applied -to such a sequence. +to \lstinline!ascend(n)!. This example shows how to use \lstinline!ListT! for creating effectful streams of unbounded length. If we need to run the effects of $M$ -for an unbounded-length stream, the target should be a stream type -(i.e., an on-call or a lazy list) rather than an eagerly evaluated -\lstinline!List! type. +for an unbounded-length stream, the target should be an iterator type +(i.e., an on-call list) rather than an eagerly evaluated \lstinline!List! +type. Another way of running the effects of a \lstinline!ListT[M, A]! is -to assume that \lstinline!A! is a monoid type. For monoid types \lstinline!A!, +to assume that \lstinline!A! is a monoid. For monoid types \lstinline!A!, we can implement the type signature $\text{List}^{A}\rightarrow A$ -as a standard \lstinline!reduce! operation: +via the standard \lstinline!reduce! operation: \begin{lstlisting} def reduce[A: Monoid]: List[A] => A = { case Nil => Monoid[A].empty case head :: tail => head |+| reduce(tail) } \end{lstlisting} -Similar code gives a runner \lstinline!brun! that transforms \lstinline!ListT[M, A]! +Similar code gives a runner \lstinline!brunE! that transforms \lstinline!ListT[M, A]! into \lstinline!M[A]! for monoid types \lstinline!A!: \begin{lstlisting} -def brun[M[_]: Monad, A: Monoid](listT: ListT[M, A]): M[A] = listT.value.flatMap { +def brunE[M[_]: Monad, A: Monoid](listT: ListT[M, A]): M[A] = listT.value.flatMap { case None => Monad[M].pure(Monoid[A].empty) - case Some((head, tail)) => Monad[M].pure(head) |+| brun(tail) // Monoid M[A]. + case Some((head, tail)) => Monad[M].pure(head) |+| brunE(tail) // Using the Monoid instance for M[A]. } \end{lstlisting} For this code to work, we need a monoid instance for $M^{A}$ (see @@ -777,9 +785,8 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme implicit def monoidMA[M[_]: Functor : Monad, A: Monoid]: Monoid[M[A]] = ... \end{lstlisting} However, it remains unknown\footnote{This book does not have a proof. See Problem~\ref{par:Problem-monads-5-2-1} -for more details.} whether this special version of \lstinline!brun! is a lawful monad -morphism $T_{\text{List}}^{M}\leadsto M$ or even just a monoid morphism -$T_{\text{List}}^{M,A}\rightarrow M^{A}$. +for more details.} whether \lstinline!brunE! is a lawful monad morphism $T_{\text{List}}^{M}\leadsto M$ +or even just a monoid morphism $T_{\text{List}}^{M,A}\rightarrow M^{A}$. \paragraph{The \texttt{StateT} transformer} @@ -824,12 +831,13 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme \] The transformer \lstinline!StateT! does not have a general base runner -of the form $\text{brun}\,(\theta_{\text{State}})$: we cannot convert -an \emph{arbitrary} runner $\theta_{\text{State}}:\text{State}^{S,A}\rightarrow A$ -into a function of type $T_{\text{State}}^{M,A}\rightarrow M^{A}$. -Instead, we need to implement a specialized base runner that converts -$T_{\text{State}}^{M,A}$ into $M^{A}$ by running the \lstinline!State! -monad\textsf{'}s effects while keeping the effects of the foreign monad $M$: +that could be expressed in the form $\text{brun}\,(\theta_{\text{State}})$, +because the type signatures do not allow us to convert an \emph{arbitrary} +runner $\theta_{\text{State}}:\text{State}^{S,A}\rightarrow A$ into +a function of type $T_{\text{State}}^{M,A}\rightarrow M^{A}$. Instead, +we need to implement a specialized base runner that converts $T_{\text{State}}^{M,A}$ +into $M^{A}$ by running the \lstinline!State! monad\textsf{'}s effects while +keeping the effects of the foreign monad $M$: \begin{lstlisting} def brunStateT[M[_]: Functor, S, A](init: S)(t: StateT[M, S, A]): M[A] = t.run(init).map(_._1) \end{lstlisting} @@ -876,9 +884,9 @@ \subsection{Monad transformers for standard monads\label{subsec:Monad-transforme \text{flift}:M^{A}\rightarrow(A\rightarrow M^{R})\rightarrow M^{R}=\text{flm}_{M}\quad. \] However, implementing the \textsf{``}base lift\textsf{''} is impossible: the required -type signature +type signature, \[ -\text{blift}:(\left(A\rightarrow R\right)\rightarrow R)\rightarrow(A\rightarrow M^{R})\rightarrow M^{R} +\text{blift}:(\left(A\rightarrow R\right)\rightarrow R)\rightarrow(A\rightarrow M^{R})\rightarrow M^{R}\quad, \] has no implementation parametric in the monad $M$. The reason is that we cannot produce a value of type $R$ out of $M^{R}$ parametrically @@ -1068,20 +1076,20 @@ \subsection{Combining more than two monads: monad stacks\label{subsec:Combining- \begin{centering} \begin{tabular}{|c|c|c|c|c|} \hline -\textbf{\small{}Monad $L^{A}$} & \textbf{\small{}Monad $M^{A}$} & \textbf{\small{}Monad $T_{L}^{M}$} & \textbf{\small{}Monad $T_{M}^{L}$} & \textbf{\small{}Same?}\tabularnewline +\textbf{\small{}Monad $L^{A}$} & \textbf{\small{}Monad $M^{A}$} & \textbf{\small{}Monad $T_{L}^{M,A}$} & \textbf{\small{}Monad $T_{M}^{L,A}$} & \textbf{\small{}Same?}\tabularnewline \hline \hline -{\small{}}\lstinline!Reader[R, A]! & {\small{}}\lstinline!Either[E, A]! & {\small{}$T_{L}^{M,A}=R\rightarrow E+A$} & {\small{}$T_{M}^{L,A}=R\rightarrow E+A$} & {\small{}Yes}\tabularnewline +{\small{}}\lstinline!Reader[R, A]! & {\small{}}\lstinline!Either[E, A]! & {\small{}$R\rightarrow E+A$} & {\small{}$R\rightarrow E+A$} & {\small{}Yes}\tabularnewline \hline -{\small{}}\lstinline!Reader[R, A]! & {\small{}}\lstinline!Writer[W, A]! & {\small{}$T_{L}^{M,A}=R\rightarrow A\times W$} & {\small{}$T_{M}^{L,A}=R\rightarrow A\times W$} & {\small{}Yes}\tabularnewline +{\small{}}\lstinline!Reader[R, A]! & {\small{}}\lstinline!Writer[W, A]! & {\small{}$R\rightarrow A\times W$} & {\small{}$R\rightarrow A\times W$} & {\small{}Yes}\tabularnewline \hline -{\small{}}\lstinline!Reader[R, A]! & {\small{}}\lstinline!List[A]! & {\small{}$T_{L}^{M,A}=R\rightarrow\text{List}^{A}$} & {\small{}$T_{M}^{L,A}\triangleq R\rightarrow\bbnum 1+A\times T_{M}^{L,A}$} & {\small{}No}\tabularnewline +{\small{}}\lstinline!Reader[R, A]! & {\small{}}\lstinline!List[A]! & {\small{}$R\rightarrow\text{List}^{A}$} & {\small{}$R\rightarrow\bbnum 1+A\times T_{M}^{L,A}$} & {\small{}No}\tabularnewline \hline -{\small{}}\lstinline!Reader[R, A]! & {\small{}}\lstinline!State[S, A]! & {\small{}$T_{L}^{M,A}=R\rightarrow S\rightarrow A\times S$} & {\small{}$T_{M}^{L,A}=S\rightarrow R\rightarrow A\times S$} & {\small{}Yes}\tabularnewline +{\small{}}\lstinline!Reader[R, A]! & {\small{}}\lstinline!State[S, A]! & {\small{}$R\rightarrow S\rightarrow A\times S$} & {\small{}$S\rightarrow R\rightarrow A\times S$} & {\small{}Yes}\tabularnewline \hline -{\small{}}\lstinline!Either[E, A]! & {\small{}}\lstinline!State[S, A]! & {\small{}$T_{L}^{M,A}=S\rightarrow(E+A)\times S$} & {\small{}$T_{M}^{L,A}=S\rightarrow E+A\times S$} & {\small{}No}\tabularnewline +{\small{}}\lstinline!Either[E, A]! & {\small{}}\lstinline!State[S, A]! & {\small{}$S\rightarrow(E+A)\times S$} & {\small{}$S\rightarrow E+A\times S$} & {\small{}No}\tabularnewline \hline -{\small{}}\lstinline!Either[E, A]! & {\small{}}\lstinline!Cont[R, A]! & {\small{}$T_{L}^{M,A}=\left(E+A\rightarrow R\right)\rightarrow R$} & {\small{}$T_{M}^{L,A}=\left(A\rightarrow E+R\right)\rightarrow E+R$} & {\small{}No}\tabularnewline +{\small{}}\lstinline!Either[E, A]! & {\small{}}\lstinline!Cont[R, A]! & {\small{}$\left(E+A\rightarrow R\right)\rightarrow R$} & {\small{}$\left(A\rightarrow E+R\right)\rightarrow E+R$} & {\small{}No}\tabularnewline \hline \end{tabular} \par\end{centering} @@ -1321,9 +1329,9 @@ \subsubsection{Example \label{subsec:Example-monad-stack}\ref{subsec:Example-mon But side effects often cannot be undone once they are run, because changes in the external world may be irreversible.\footnote{For a discussion, see \texttt{\href{https://stackoverflow.com/questions/11792275/}{https://stackoverflow.com/questions/11792275/}}} -Throughout this chapter, we will build transformers for every exponential-polynomial -monad considered in this book (although some transformers will only -partially satisfy the required laws). +Throughout this chapter, we will build transformers for every monad +considered in this book (although some transformers will only partially +satisfy the required laws). \subsection{A typeclass for monad transformers} @@ -1388,15 +1396,16 @@ \subsection{Lifts and runners for monad stacks} foreign lifts, and the runners of the relevant transformers, such as: \[ -\text{flift}_{K}^{P}:P\leadsto(K\varangle P)\quad,\quad\text{blift}_{L}^{P}:L\leadsto(L\varangle P)\quad,\quad\text{frun}_{M}^{P,Q}(f^{:P\leadsto Q}):M\varangle P\leadsto M\varangle Q\quad,\quad\text{etc.,} -\] -for any monad $P$. Keep in mind that the transformer for $K\varangle L$ -is defined by stacking the transformers for $K$ and $L$. As an example, -let us compute the lifts $P\leadsto K\varangle L\varangle P$ and -$L\varangle M\leadsto L\varangle M\varangle P$: -\[ -\text{flift}_{K\varangle L}^{P}:P\leadsto K\varangle L\varangle P=\text{flift}_{L}^{P}\bef\text{flift}_{K}^{L\varangle P}\quad,\quad\text{blift}_{L\varangle M}^{P}:L\varangle M\leadsto L\varangle M\varangle P=\text{frun}_{L}^{M,M\varangle P}(\text{blift}_{M}^{P})\quad. +\text{flift}_{K}^{P}:P\leadsto(K\varangle P)\quad,\quad\text{blift}_{L}^{P}:L\leadsto(L\varangle P)\quad,\quad\text{frun}_{M}^{P,Q}(f^{:P\leadsto Q}):M\varangle P\leadsto M\varangle Q\quad, \] +and so on, for any monad $P$. Keep in mind that the transformer for +$K\varangle L$ is defined by stacking the transformers for $K$ and +$L$. As an example, let us compute the lifts $P\leadsto K\varangle L\varangle P$ +and $L\varangle M\leadsto L\varangle M\varangle P$: +\begin{align*} + & \text{flift}_{K\varangle L}^{P}:P\leadsto K\varangle L\varangle P=\text{flift}_{L}^{P}\bef\text{flift}_{K}^{L\varangle P}\quad,\\ + & \text{blift}_{L\varangle M}^{P}:L\varangle M\leadsto L\varangle M\varangle P=\text{frun}_{L}^{M,M\varangle P}(\text{blift}_{M}^{P})\quad. +\end{align*} Lifting a monad value from a given monad from the middle of the stack into the full monad stack can require composing several lifts. For @@ -1634,9 +1643,10 @@ \subsection{Constructing lifts via operation typeclasses (\textquotedblleft MTL- can be read or updated; the updated state value is automatically passed on to the next computation. We may define the functions \lstinline!get! and \lstinline!set! that work with the state value: -\[ -\text{get}:\text{State}^{S,S}\quad,\quad\quad\text{get}\triangleq s^{:S}\rightarrow s\times s\quad,\quad\quad\text{set}:S\rightarrow\text{State}^{S,\bbnum 1}\quad,\quad\quad\text{set}\triangleq s^{:S}\rightarrow\_^{:S}\rightarrow1\times s\quad. -\] +\begin{align*} + & \text{get}:\text{State}^{S,S}\quad,\quad\quad\text{get}\triangleq s^{:S}\rightarrow s\times s\quad,\\ + & \text{set}:S\rightarrow\text{State}^{S,\bbnum 1}\quad,\quad\quad\text{set}\triangleq s^{:S}\rightarrow\_^{:S}\rightarrow1\times s\quad. +\end{align*} \begin{lstlisting} for { @@ -1647,7 +1657,7 @@ \subsection{Constructing lifts via operation typeclasses (\textquotedblleft MTL- \end{lstlisting} A \lstinline!State!-monadic program can then be written as shown -at left, without referring to the type signature $S\rightarrow A\times S$. +above, without referring to the type signature $S\rightarrow A\times S$. The two functions \lstinline!get! and \lstinline!set! appear to be sufficient to manipulate the internal state in an arbitrary way. So, we regard these two functions as the \textsf{``}effectful operations\index{monads!effectful operations}\textsf{''} @@ -1694,9 +1704,9 @@ \subsection{Constructing lifts via operation typeclasses (\textquotedblleft MTL- monad. For instance, the refactored code may not apply pattern matching directly to a monad value of type $E+A$. -The second ingredient of the MTL-style programming involves \textsf{``}lifting\textsf{''} -the monad operations to arbitrary monad stacks. For example, consider -the \lstinline!State! monad with the operations $\text{get}:\text{State}^{S,S}$ +The second ingredient of the MTL-style programming is \textsf{``}lifting\textsf{''} +the monadic operations to arbitrary monad stacks. For example, consider +the \lstinline!State! monad and its operations $\text{get}:\text{State}^{S,S}$ and $\text{set}:S\rightarrow\text{State}^{S,\bbnum 1}$. For any monad stack $P$ containing the monad $\text{State}^{S,\bullet}$, we need to define the \textsf{``}lifted\textsf{''} operations $\text{get}_{P}:P^{S}$ and @@ -1935,9 +1945,10 @@ \subsection{Constructing lifts via operation typeclasses (\textquotedblleft MTL- \] Since the stack type is $P^{A}\triangleq T_{\text{State}}^{\text{Writer},A}=S\rightarrow W\times A\times S$, the code for $\text{clear}_{P}$ is: -\[ -\text{clear}_{P}:\left(S\rightarrow W\times A\times S\right)\rightarrow S\rightarrow W\times A\times S\quad,\quad\quad\text{clear}_{P}\triangleq p^{:S\rightarrow W\times A\times S}\rightarrow p\bef(\text{clear}\boxtimes\text{id}^{S})\quad. -\] +\begin{align*} + & \text{clear}_{P}:\left(S\rightarrow W\times A\times S\right)\rightarrow S\rightarrow W\times A\times S\quad,\\ + & \text{clear}_{P}\triangleq p^{:S\rightarrow W\times A\times S}\rightarrow p\bef(\text{clear}\boxtimes\text{id}^{S})\quad. +\end{align*} For the stack $P=\,$\lstinline!Writer!$\varangle$\lstinline!Cont!, or more verbosely: \[ @@ -2023,9 +2034,9 @@ \subsection{Motivation for the laws of lifts} \begin{minipage}[t]{0.48\columnwidth}% \textcolor{darkgray}{\footnotesize{}} \begin{lstlisting} - // Anywhere inside a for/yield: - y <- Monad[L].pure(x).up // Assume x: A - z <- f(y) // f: A => T[B] +// Anywhere inside a for/yield: + y <- Monad[L].pure(x).up // x: A + z <- f(y) // f: A => T[B] \end{lstlisting} % \end{minipage}\texttt{\textcolor{blue}{\footnotesize{}\hspace*{\fill} }}% @@ -2055,7 +2066,7 @@ \subsection{Motivation for the laws of lifts} \begin{minipage}[t]{0.48\columnwidth}% \textcolor{darkgray}{\footnotesize{}} \begin{lstlisting} - // Anywhere inside a for/yield: +// Anywhere inside a for/yield: x <- t // Assume t: T[A] y <- Monad[L].pure(x).up \end{lstlisting} @@ -2088,7 +2099,7 @@ \subsection{Motivation for the laws of lifts} \begin{minipage}[t]{0.48\columnwidth}% \textcolor{darkgray}{\footnotesize{}} \begin{lstlisting} - // Anywhere inside a for/yield: +// Anywhere inside a for/yield: x <- p.up // Assume p: L[A] y <- q(x).up // q: A => L[B] \end{lstlisting} @@ -2230,8 +2241,10 @@ \subsection{Motivation for the laws of runners} the types do not match. Compare the types of \lstinline!brun!, \lstinline!frun!, $\theta_{\text{State}}$, and $\theta_{M}^{\uparrow\text{State}}$: \begin{align*} - & \text{brun}:T_{\text{State}}^{M,A}\rightarrow M^{A}=(S\rightarrow M^{A\times S})\rightarrow M^{A}\quad\quad\text{vs.}\quad\theta_{\text{State}}:\left(S\rightarrow A\times S\right)\rightarrow A\quad,\\ - & \text{frun}:T_{\text{State}}^{M,A}\rightarrow\text{State}^{S,A}=(S\rightarrow M^{A\times S})\rightarrow S\rightarrow A\times S\quad\quad\text{vs.}\quad\theta_{M}^{\uparrow\text{State}}:(S\rightarrow M^{A}\times S)\rightarrow S\rightarrow A\times S\quad. + & \text{brun}:T_{\text{State}}^{M,A}\rightarrow M^{A}=(S\rightarrow M^{A\times S})\rightarrow M^{A}\\ + & \quad\quad\text{vs.}\quad\theta_{\text{State}}:\left(S\rightarrow A\times S\right)\rightarrow A\quad,\\ + & \text{frun}:T_{\text{State}}^{M,A}\rightarrow\text{State}^{S,A}=(S\rightarrow M^{A\times S})\rightarrow S\rightarrow A\times S\\ + & \quad\quad\text{vs.}\quad\theta_{M}^{\uparrow\text{State}}:(S\rightarrow M^{A}\times S)\rightarrow S\rightarrow A\times S\quad. \end{align*} We need to use new runners specially adapted to \lstinline!StateT!, as shown in Section~\ref{subsec:Monad-transformers-for-standard-monads}. @@ -2247,7 +2260,8 @@ \subsection{Motivation for the laws of runners} \begin{align*} {\color{greenunder}\text{left-hand side}:}\quad & t\triangleright\text{frun}_{\text{State}}(\theta_{M})\bef\theta_{\text{State}}(i)=t\triangleright(t\rightarrow t\bef\theta_{M})\bef(t\rightarrow i\triangleright t\triangleright\pi_{1})\\ {\color{greenunder}\text{compute composition}:}\quad & \quad=i\triangleright(t\bef\theta_{M})\triangleright\pi_{1}=i\triangleright t\bef\theta_{M}\bef\pi_{1}\quad,\\ -{\color{greenunder}\text{left-hand side}:}\quad & t\triangleright\text{brun}_{\text{State}}(i)\bef\theta_{M}=t\triangleright(t\rightarrow i\triangleright t\triangleright\pi_{1}^{\uparrow M})\triangleright\theta_{M}=i\triangleright t\bef\gunderline{\pi_{1}^{\uparrow M}\bef\theta_{M}}\\ +{\color{greenunder}\text{left-hand side}:}\quad & t\triangleright\text{brun}_{\text{State}}(i)\bef\theta_{M}=t\triangleright(t\rightarrow i\triangleright t\triangleright\pi_{1}^{\uparrow M})\triangleright\theta_{M}\\ + & \quad=i\triangleright t\bef\gunderline{\pi_{1}^{\uparrow M}\bef\theta_{M}}\\ {\color{greenunder}\text{naturality law of }\theta_{M}:}\quad & \quad=i\triangleright t\bef\theta_{M}\bef\pi_{1}\quad. \end{align*} Both sides are now equal, which proves the commutativity of runners @@ -2362,9 +2376,10 @@ \subsection{Motivation for the laws of runners} must be created from a value $a^{:A}$ via $\text{pu}_{L}$. The only way to obtain a value $a^{:A}$ is by applying a runner $\theta_{M}$ to the value $m:M^{A}$. So, we motivate a \textsf{``}purity\textsf{''} law: -\[ -m\triangleright\text{flift}_{L}\triangleright\text{frun}_{L}(\theta_{M})=m\triangleright\theta_{M}\triangleright\text{pu}_{L}\quad,\quad\text{or equivalently:}\quad\quad\text{flift}_{L}\bef\text{frun}_{L}(\theta_{M})=\theta_{M}\bef\text{pu}_{L}\quad. -\] +\begin{align*} + & m\triangleright\text{flift}_{L}\triangleright\text{frun}_{L}(\theta_{M})=m\triangleright\theta_{M}\triangleright\text{pu}_{L}\quad,\\ +{\color{greenunder}\text{or equivalently}:}\quad & \text{flift}_{L}\bef\text{frun}_{L}(\theta_{M})=\theta_{M}\bef\text{pu}_{L}\quad. +\end{align*} An analogous purity law\index{purity laws!of monad transformer runners} for \lstinline!blift! and \lstinline!brun! is: \[ @@ -2599,9 +2614,10 @@ \subsection{Simplifying the laws of lifts and runners via category theory\label{ The definition of $T$ already specifies functions \lstinline!flift!, \lstinline!frun!, and \lstinline!brun! as the natural transformations: -\[ -\text{flift}_{T}^{M}:\text{Id}^{M}\leadsto T^{M}\quad,\quad\quad\text{brun}_{T}^{M}:T^{M}\leadsto\text{Id}^{M}\quad,\quad\quad\text{frun}_{T}^{M,N}:(M\leadsto N)\rightarrow T^{M}\leadsto T^{N}\quad. -\] +\begin{align*} + & \text{flift}_{T}^{M}:\text{Id}^{M}\leadsto T^{M}\quad,\quad\quad\text{brun}_{T}^{M}:T^{M}\leadsto\text{Id}^{M}\quad,\\ + & \text{frun}_{T}^{M,N}:(M\leadsto N)\rightarrow T^{M}\leadsto T^{N}\quad. +\end{align*} The functor laws and the monadic naturality laws for those transformations will then enforce the 18 laws of monad transformers. In this way, a single definition expresses all monad transformer laws. @@ -2846,14 +2862,15 @@ \subsection{Examples of failure to define a general monad transformer\label{subs we will obtain Eq.~(\ref{eq:definition-of-monoidal-convolution}). The Day convolution is associative\footnote{\texttt{\href{https://en.wikipedia.org/wiki/Day_convolution}{https://en.wikipedia.org/wiki/Day\_convolution}}} -and always produces a functor since Eq.~(\ref{eq:definition-of-monoidal-convolution}) +and always produces a new functor out of two functors, since Eq.~(\ref{eq:definition-of-monoidal-convolution}) is covariant in $A$. An example where the Day convolution fails to produce a monad transformer is $L^{A}\triangleq1+A$ and $M^{A}\triangleq R\rightarrow A$. We can compute the functor $L\star M$ explicitly: \begin{align*} & \left(L\star M\right)^{A}\\ {\color{greenunder}\text{definitions of }L,M,\star:}\quad & \cong\exists P.\,\exists Q.\,\gunderline{\left(P\times Q\rightarrow A\right)}\times\left(\bbnum 1+P\right)\times\left(R\rightarrow Q\right)\\ -{\color{greenunder}\text{curry the arguments, move a quantifier}:}\quad & \cong\exists P.\left(\bbnum 1+P\right)\times\gunderline{\exists Q.\left(Q\rightarrow P\rightarrow A\right)\times\left(R\rightarrow Q\right)}\\ + & \text{curry the arguments, move a quantifier}:\quad\\ + & \cong\exists P.\left(\bbnum 1+P\right)\times\gunderline{\exists Q.\left(Q\rightarrow P\rightarrow A\right)\times\left(R\rightarrow Q\right)}\\ {\color{greenunder}\text{co-Yoneda identity with }\exists Q:}\quad & \cong\exists P.\left(\bbnum 1+P\right)\times\left(\gunderline{R\rightarrow P}\rightarrow A\right)\\ {\color{greenunder}\text{swap curried arguments}:}\quad & \cong\exists P.\left(\bbnum 1+P\right)\times\left(P\rightarrow R\rightarrow A\right)\\ {\color{greenunder}\text{co-Yoneda identity with }\exists P:}\quad & \cong\bbnum 1+\left(R\rightarrow A\right)\quad. @@ -2994,7 +3011,7 @@ \subsection{Stacking two monads. Proofs\label{subsec:Stacking-two-monads}} $A$). This way of composition is called \textbf{stacking}\index{monad transformers!stacking} the monad transformers. -In Scala code, this \textsf{``}stacking\textsf{''} construction is written as +In Scala code, this \textsf{``}stacking\textsf{''} construction is written as: \begin{lstlisting} type RT[M, A] = PT[QT[M, *], A] \end{lstlisting} @@ -3164,7 +3181,8 @@ \subsection{Stacking two monads. Proofs\label{subsec:Stacking-two-monads}} The monadic naturality of $\text{brun}_{R}$ is verified similarly, assuming the same law for $\text{brun}_{P}$ and $\text{brun}_{Q}$: \begin{align*} -{\color{greenunder}\text{expect }\text{brun}_{R}(\theta)\bef\phi:}\quad & \text{frun}_{R}(\phi)\bef\text{brun}_{R}(\theta)=\gunderline{\text{frun}_{P}(\text{frun}_{Q}(\phi))\bef\text{brun}_{P}}\left(\text{blift}_{P}\bef\theta\right)\bef\text{brun}_{Q}\left(\text{flift}_{P}\bef\theta\right)\\ +{\color{greenunder}\text{expect }\text{brun}_{R}(\theta)\bef\phi:}\quad & \text{frun}_{R}(\phi)\bef\text{brun}_{R}(\theta)\\ + & =\gunderline{\text{frun}_{P}(\text{frun}_{Q}(\phi))\bef\text{brun}_{P}}\left(\text{blift}_{P}\bef\theta\right)\bef\text{brun}_{Q}\left(\text{flift}_{P}\bef\theta\right)\\ {\color{greenunder}\text{same law for }\text{brun}_{P}:}\quad & =\text{brun}_{P}\left(\text{blift}_{P}\bef\theta\right)\bef\gunderline{\text{frun}_{Q}(\phi)\bef\text{brun}_{Q}}\left(\text{flift}_{P}\bef\theta\right)\\ {\color{greenunder}\text{same law for }\text{brun}_{Q}:}\quad & =\text{brun}_{P}\left(\text{blift}_{P}\bef\theta\right)\bef\text{brun}_{Q}\left(\text{flift}_{P}\bef\theta\right)\bef\phi=\text{brun}_{R}(\theta)\bef\phi\quad. \end{align*} @@ -3684,7 +3702,7 @@ \subsubsection{Statement \label{subsec:Statement-swap-equivalence-to-flatten}\re function will be equal to the \lstinline!swap! function given initially. \textbf{(b)} Given an implementation of $\text{ftn}_{T}$ satisfying -the \textsf{``}compatibility laws\textsf{''}~(\ref{eq:ftn-first-compatibility-law})\textendash (\ref{eq:ftn-second-compatibility-law}), +the \textsf{``}compatibility laws\textsf{''} given in Eqs.~(\ref{eq:ftn-first-compatibility-law})\textendash (\ref{eq:ftn-second-compatibility-law}), we define \lstinline!swap! via Eq.~(\ref{eq:define-swap-via-flatten}) and then substitute that \lstinline!swap! into Eq.~(\ref{eq:define-flatten-via-swap}) to define a new \lstinline!flatten! function. The new \lstinline!flatten! @@ -3995,7 +4013,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-transformer-runner-laws-f must be monad morphisms, i.e., the identity and composition laws must hold: \begin{align*} - & \text{pu}_{L\circ M}\bef\text{frun}\,(\phi)=\text{pu}_{L\circ N}\quad,\quad\quad\text{ftn}_{L\circ M}\bef\text{frun}\,(\phi)=\left(\text{frun}\,(\phi)\right)^{\uparrow M\uparrow L}\bef\text{frun}\,(\phi)\bef\text{ftn}_{L\circ N}\quad,\\ + & \text{pu}_{L\circ M}\bef\text{frun}\,(\phi)=\text{pu}_{L\circ N}\quad,\quad\text{ftn}_{L\circ M}\bef\text{frun}\,(\phi)=\left(\text{frun}\,(\phi)\right)^{\uparrow M\uparrow L}\bef\text{frun}\,(\phi)\bef\text{ftn}_{L\circ N}\quad,\\ & \text{pu}_{L\circ M}\bef\text{brun}\,(\theta)=\text{pu}_{M}\quad,\quad\quad\text{ftn}_{L\circ M}\bef\text{brun}\,(\theta)=\left(\text{brun}\,(\theta)\right)^{\uparrow M\uparrow L}\bef\text{brun}\,(\theta)\bef\text{ftn}_{M}\quad. \end{align*} To derive these laws, we may use the identity and composition laws @@ -4075,9 +4093,10 @@ \subsubsection{Statement \label{subsec:Statement-monad-transformer-runner-laws-f Finally, we need to check the monadic naturality laws of \lstinline!flift! and \lstinline!brun!: -\[ -\phi^{:M\leadsto N}\bef\text{flift}_{L}^{N}=\text{flift}_{L}^{M}\bef(\phi^{:M\leadsto N})^{\uparrow L}\quad,\quad\quad(\phi^{:M\leadsto N})^{\uparrow L}\bef\text{brun}_{L}^{N}(\theta_{L})=\text{brun}_{L}^{M}(\theta_{L})\bef\phi^{:M\leadsto N}\quad. -\] +\begin{align*} + & \phi^{:M\leadsto N}\bef\text{flift}_{L}^{N}=\text{flift}_{L}^{M}\bef(\phi^{:M\leadsto N})^{\uparrow L}\quad,\\ + & (\phi^{:M\leadsto N})^{\uparrow L}\bef\text{brun}_{L}^{N}(\theta_{L})=\text{brun}_{L}^{M}(\theta_{L})\bef\phi^{:M\leadsto N}\quad. +\end{align*} To verify these laws, use the naturality laws of $\text{pu}_{L}$ and $\theta_{L}$: \begin{align*} @@ -4097,7 +4116,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-transformer-runner-laws-f must be monad morphisms, i.e., the identity and composition laws must hold: \begin{align*} - & \text{pu}_{L\circ M}\bef\text{frun}\,(\phi)=\text{pu}_{N\circ M}\quad,\quad\quad\text{ftn}_{L\circ M}\bef\text{frun}\,(\phi)=\left(\text{frun}\,(\phi)\right)^{\uparrow M\uparrow L}\bef\text{frun}\,(\phi)\bef\text{ftn}_{N\circ M}\quad,\\ + & \text{pu}_{L\circ M}\bef\text{frun}\,(\phi)=\text{pu}_{N\circ M}\quad,\quad\text{ftn}_{L\circ M}\bef\text{frun}\,(\phi)=\left(\text{frun}\,(\phi)\right)^{\uparrow M\uparrow L}\bef\text{frun}\,(\phi)\bef\text{ftn}_{N\circ M}\quad,\\ & \text{pu}_{L\circ M}\bef\text{brun}\,(\theta)=\text{pu}_{L}\quad,\quad\quad\text{ftn}_{L\circ M}\bef\text{brun}\,(\theta)=\left(\text{brun}\,(\theta)\right)^{\uparrow M\uparrow L}\bef\text{brun}\,(\theta)\bef\text{ftn}_{L}\quad. \end{align*} The monadic naturality laws of \lstinline!swap! with respect to $\phi$ @@ -4153,7 +4172,7 @@ \subsubsection{Statement \label{subsec:Statement-monad-transformer-runner-laws-f {\color{greenunder}\text{definitions of }\text{ftn}_{L\circ M}\text{ and }\text{brun}:}\quad & =\text{sw}_{L,M}^{\uparrow L}\bef\text{ftn}_{L}\bef\gunderline{\text{ftn}_{M}^{\uparrow L}\bef\theta^{\uparrow L}}\\ {\color{greenunder}\text{composition law of }\theta:}\quad & =\text{sw}_{L,M}^{\uparrow L}\bef\gunderline{\text{ftn}_{L}\bef\left(\theta\bef\theta\right)^{\uparrow L}}=\gunderline{\text{sw}_{L,M}^{\uparrow L}\bef\left(\theta\bef\theta\right)^{\uparrow L\uparrow L}}\bef\text{ftn}_{L}\\ {\color{greenunder}\text{functor composition}:}\quad & =\gunderline{(\text{sw}_{L,M}\bef\theta^{\uparrow L})}^{\uparrow L}\bef\theta^{\uparrow L\uparrow L}\bef\text{ftn}_{L}\\ -{\color{greenunder}\text{monadic naturality law of }\text{sw}_{L,M}:}\quad & =\gunderline{\theta^{\uparrow L}}\bef\theta^{\uparrow L\uparrow L}\bef\text{ftn}_{L}\quad. +{\color{greenunder}\text{monadic naturality of }\text{sw}_{L,M}:}\quad & =\gunderline{\theta^{\uparrow L}}\bef\theta^{\uparrow L\uparrow L}\bef\text{ftn}_{L}\quad. \end{align*} The functor laws of \lstinline!frun! are: @@ -4255,7 +4274,7 @@ \section{Composed-inside transformers. Linear monads\label{sec:transformers-line For a linear monad $M$ and any foreign monad $L$, the functor composition $L\circ M$ is a monad. For example, the type constructor for the -\lstinline!OptionT! monad transformer can be written as +\lstinline!OptionT! monad transformer can be written as: \begin{lstlisting} type OptionT[L[_], A] = L[Option[A]] \end{lstlisting} @@ -4348,25 +4367,29 @@ \subsection{The laws of \texttt{swap}. Proofs} We need to show that $\text{pu}_{L}^{\uparrow M}\bef\text{sw}=\text{pu}_{L}$: \begin{align*} - & \text{pu}_{L}^{\uparrow M}\bef\text{sw}=\,\left\Vert \begin{array}{cc} + & \text{pu}_{L}^{\uparrow M}\bef\text{sw}\\ + & =\,\left\Vert \begin{array}{cc} \text{id} & \bbnum 0\\ \bbnum 0 & q\times a\rightarrow q\times\text{pu}_{L}(a) \end{array}\right|\,\bef\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ q\times l\rightarrow l\triangleright(a^{:A}\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow L} \end{array}\right|\\ -{\color{greenunder}\text{composition}:}\quad & =\,\,\left\Vert \begin{array}{c} + & \text{composition}:\quad\\ + & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ q\times a\rightarrow a\triangleright\gunderline{\text{pu}_{L}\bef(a^{:A}\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow L}} \end{array}\right|\\ -{\color{greenunder}\text{pu}_{L}\text{\textsf{'}s naturality}:}\quad & =\,\left\Vert \begin{array}{c} + & \text{pu}_{L}\text{\textsf{'}s naturality}:\quad\\ + & =\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ (q\times a\rightarrow\bbnum 0^{:P}+q\times a)\bef\text{pu}_{L} \end{array}\right|\,=\,\begin{array}{|c||c|} P & x^{:P}\rightarrow x+\bbnum 0^{:Q\times A}\\ Q\times A & q\times a\rightarrow\bbnum 0^{:P}+q\times a \end{array}\,\bef\text{pu}_{L}\\ -{\color{greenunder}\text{matrix notation}:}\quad & =\text{id}\bef\text{pu}_{L}=\text{pu}_{L}\quad. + & \text{matrix notation}:\quad\\ + & =\text{id}\bef\text{pu}_{L}=\text{pu}_{L}\quad. \end{align*} @@ -4374,7 +4397,8 @@ \subsection{The laws of \texttt{swap}. Proofs} We need to show that $\text{pu}_{M}\bef\text{sw}=\text{pu}_{M}^{\uparrow L}$: \begin{align*} - & \text{pu}_{M}\bef\text{sw}=\,\left\Vert \begin{array}{cc} + & \text{pu}_{M}\bef\text{sw}\\ + & =\,\left\Vert \begin{array}{cc} \bbnum 0 & l^{:L^{A}}\rightarrow q_{0}\times l\end{array}\right|\,\bef\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ q\times l\rightarrow l\triangleright(x^{:A}\rightarrow\bbnum 0^{:P}+q\times x)^{\uparrow L} @@ -4418,7 +4442,8 @@ \subsection{The laws of \texttt{swap}. Proofs} {\color{greenunder}\text{definition of }\text{sw}:}\quad & =x\rightarrow(x+\bbnum 0)\triangleright\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\\ q\times l\rightarrow l\triangleright(a\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow L} -\end{array}\right|\,=(x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\quad. +\end{array}\right|\\ + & =(x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{L}\quad. \end{align*} This equals the upper expression in Eq.~(\ref{eq:l-interchange-derivation1}). Simplify the lower expression: @@ -4449,14 +4474,15 @@ \subsection{The laws of \texttt{swap}. Proofs} The left-hand side is written using the matrices for $\text{ftn}_{M}$ and $\text{sw}$: \begin{align} - & \text{ftn}_{M}\bef\text{sw}=\,\left\Vert \begin{array}{cc} + & \text{ftn}_{M}\bef\text{sw}\nonumber \\ + & =\,\left\Vert \begin{array}{cc} \text{id} & \bbnum 0\\ q\times p\rightarrow p & \bbnum 0\\ \bbnum 0 & q_{1}\times q_{2}\times a\rightarrow\left(q_{1}\oplus q_{2}\right)\times a \end{array}\right|\,\bef\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ q\times l\rightarrow l\triangleright(x\rightarrow\bbnum 0+q\times x)^{\uparrow L} -\end{array}\right|\nonumber \\ +\end{array}\right|\\ & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ \left(q\times p\rightarrow p+\bbnum 0\right)\bef\text{pu}_{L}\\ @@ -4482,7 +4508,8 @@ \subsection{The laws of \texttt{swap}. Proofs} \end{align*} Then compute the composition $\text{sw}^{\uparrow M}\bef\text{sw}$: \begin{align*} - & \text{sw}^{\uparrow M}\bef\text{sw}=\,\left\Vert \begin{array}{cc} + & \text{sw}^{\uparrow M}\bef\text{sw}\\ + & =\,\left\Vert \begin{array}{cc} \text{id} & \bbnum 0\\ \bbnum 0 & q\times p\rightarrow q\times\text{pu}_{L}\left(p+\bbnum 0\right)\\ \bbnum 0 & q_{1}\times q_{2}\times l\rightarrow q_{1}\times(l\triangleright(x\rightarrow\bbnum 0+q_{2}\times x)^{\uparrow L}) @@ -4494,9 +4521,7 @@ \subsection{The laws of \texttt{swap}. Proofs} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ q\times p\rightarrow\left(p+\bbnum 0\right)\triangleright\text{pu}_{L}\bef(x^{:M^{A}}\rightarrow\bbnum 0^{:P}+q\times x)^{\uparrow L}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\bbnum 0^{:P}+q_{1}\times x)^{\uparrow L}\bef(x\rightarrow\bbnum 0+q_{2}\times x)^{\uparrow L} -\end{array}\right| -\end{align*} -\begin{align*} +\end{array}\right|\\ & =\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ q\times p\rightarrow(\bbnum 0^{:P}+q\times\left(p+\bbnum 0\right))\triangleright\text{pu}_{L}\\ @@ -4511,17 +4536,20 @@ \subsection{The laws of \texttt{swap}. Proofs} (q\times p\rightarrow\bbnum 0^{:P}+q\times\left(p+\bbnum 0\right))\bef\gunderline{\text{pu}_{L}\bef\text{ftn}_{M}^{\uparrow L}}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\bbnum 0+q_{1}\times(\bbnum 0+q_{2}\times x))^{\uparrow L}\bef\text{ftn}_{M}^{\uparrow L} \end{array}\right|\\ -{\color{greenunder}\text{naturality law of }\text{pu}_{L}:}\quad & =\,\,\left\Vert \begin{array}{c} + & \text{naturality law of }\text{pu}_{L}:\quad\\ + & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{ftn}_{M}\bef\text{pu}_{L}\\ (q\times p\rightarrow\bbnum 0^{:P}+q\times\left(p+\bbnum 0\right))\bef\text{ftn}_{M}\bef\text{pu}_{L}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\gunderline{\text{ftn}_{M}\left(\bbnum 0+q_{1}\times(\bbnum 0+q_{2}\times x)\right)})^{\uparrow L} \end{array}\right|\\ -{\color{greenunder}\text{compute }\text{ftn}_{M}(...):}\quad & =\,\left\Vert \begin{array}{c} + & \text{compute }\text{ftn}_{M}(...):\quad\\ + & =\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow\text{ftn}_{M}(x+\bbnum 0))\bef\text{pu}_{L}\\ (q\times p\rightarrow\gunderline{\text{ftn}_{M}(\bbnum 0^{:P}+q\times\left(p+\bbnum 0\right))})\bef\text{pu}_{L}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\bbnum 0+\left(q_{1}\oplus q_{2}\right)\times x)^{\uparrow L} \end{array}\right|\\ -{\color{greenunder}\text{compute }\text{ftn}_{M}(...):}\quad & =\,\,\left\Vert \begin{array}{c} + & \text{compute }\text{ftn}_{M}(...):\quad\\ + & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0)\bef\text{pu}_{L}\\ (q\times p\rightarrow p+\bbnum 0)\bef\text{pu}_{L}\\ q_{1}\times q_{2}\times l\rightarrow l\triangleright(x^{:M^{A}}\rightarrow\bbnum 0+\left(q_{1}\oplus q_{2}\right)\times x)^{\uparrow L} @@ -4569,14 +4597,16 @@ \subsection{The laws of \texttt{swap}. Proofs} \end{align*} The right-hand side is: \begin{align*} - & \phi^{\uparrow M}\bef\text{sw}_{N,M}=\,\left\Vert \begin{array}{cc} + & \phi^{\uparrow M}\bef\text{sw}_{N,M}\\ + & =\,\left\Vert \begin{array}{cc} \text{id} & \bbnum 0\\ \bbnum 0 & q\times l\rightarrow q\times\left(l\triangleright\phi\right) \end{array}\right|\,\bef\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{N}\\ q\times n\rightarrow n\triangleright(a\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow N} \end{array}\right|\\ -{\color{greenunder}\text{composition}:}\quad & =\,\,\left\Vert \begin{array}{c} + & \text{composition}:\quad\\ + & =\,\,\left\Vert \begin{array}{c} (x^{:P}\rightarrow x+\bbnum 0^{:Q\times A})\bef\text{pu}_{N}\\ q\times l\rightarrow l\triangleright\phi\bef(a\rightarrow\bbnum 0^{:P}+q\times a)^{\uparrow N} \end{array}\right|\quad. @@ -4805,10 +4835,11 @@ \subsubsection{Statement \label{subsec:Statement-composition-rigid-monads}\ref{s \subsubsection{Example \label{subsec:Example-rigid-composition-1}\ref{subsec:Example-rigid-composition-1}} -Consider the functor composition of the \lstinline!Sel! monad $R_{1}^{A}\triangleq\left(A\rightarrow Q\right)\rightarrow A$ -and the \lstinline!Reader! monad $R_{2}^{A}\triangleq Z\rightarrow A$: +Consider the \lstinline!Sel! monad $R_{1}^{A}\triangleq\left(A\rightarrow Q\right)\rightarrow A$ +and the \lstinline!Reader! monad $R_{2}^{A}\triangleq Z\rightarrow A$. +The functor composition of $R_{1}$ and $R_{2}$ is: \[ -P^{A}\triangleq((Z\rightarrow A)\rightarrow Q)\rightarrow Z\rightarrow A\quad. +P^{A}\triangleq R_{1}\circ R_{2}=((Z\rightarrow A)\rightarrow Q)\rightarrow Z\rightarrow A\quad. \] It follows from Statement~\ref{subsec:Statement-composition-rigid-monads} that the functor $P$ is a rigid monad; so $P$\textsf{'}s transformer is of @@ -4904,7 +4935,8 @@ \subsubsection{Statement \label{subsec:Statement-choice-monad-definition-of-flm} \begin{align} & \text{sw}_{R,M}:M^{R^{A}}\rightarrow R^{M^{A}}\quad,\quad\quad\text{sw}_{R,M}\triangleq m^{:M^{R^{A}}}\rightarrow q^{:H^{M^{A}}}\rightarrow m\triangleright(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}\quad,\label{eq:rigid-monad-short-formula-for-swap}\\ & \text{pu}_{T}:A\rightarrow H^{M^{A}}\rightarrow M^{A}\quad,\quad\quad\text{pu}_{T}\triangleq\text{pu}_{M}\bef\text{pu}_{R}=a^{:A}\rightarrow\_^{:H^{M^{A}}}\rightarrow\text{pu}_{M}(a)\quad,\nonumber \\ - & \text{ftn}_{T}\triangleq t^{:T^{T^{A}}}\rightarrow k^{:H^{M^{A}}}\rightarrow k\triangleright\big(t\triangleright(\text{flm}_{M}(r^{:R^{M^{A}}}\rightarrow r(k)))^{\uparrow R}\big)=\text{flm}_{R}\left(t\rightarrow q\rightarrow t\triangleright\text{flm}_{M}(r\rightarrow r(q))\right)\quad,\nonumber \\ + & \text{ftn}_{T}\triangleq t^{:T^{T^{A}}}\rightarrow k^{:H^{M^{A}}}\rightarrow k\triangleright\big(t\triangleright(\text{flm}_{M}(r^{:R^{M^{A}}}\rightarrow r(k)))^{\uparrow R}\big)\nonumber \\ + & \quad=\text{flm}_{R}\left(t\rightarrow q\rightarrow t\triangleright\text{flm}_{M}(r\rightarrow r(q))\right)\quad,\\ & \text{flm}_{T}(f)=\text{flm}_{R}\big(y\rightarrow q\rightarrow y\triangleright\text{flm}_{M}(x\rightarrow q\triangleright(x\triangleright f))\big)\label{eq:rigid-monad-def-flm-t-via-flm-r}\\ & \quad=t^{:R^{M^{A}}}\rightarrow q^{:H^{M^{B}}}\rightarrow q\triangleright\big(t\triangleright\big(\text{flm}_{M}(x^{:A}\rightarrow q\triangleright(x\triangleright f))\big)^{\uparrow R}\big)\quad.\label{eq:rigid-monad-flm-T-def} \end{align} @@ -4949,9 +4981,10 @@ \subsubsection{Statement \label{subsec:Statement-choice-monad-definition-of-flm} \text{sw}_{R,M}\triangleq m^{:M^{H^{A}\rightarrow A}}\rightarrow q^{:H^{M^{A}}}\rightarrow m\triangleright\big(r^{:H^{A}\rightarrow A}\rightarrow r(q\triangleright\text{pu}_{M}^{\downarrow H})\big)^{\uparrow M}\quad. \] Rewrite the formula in the $\triangleright$-notation to obtain Eq.~(\ref{eq:rigid-monad-short-formula-for-swap}): -\[ -\text{sw}_{R,M}:M^{R^{A}}\rightarrow R^{M^{A}}\quad,\quad\quad\text{sw}_{R,M}\triangleq m^{:M^{R^{A}}}\rightarrow q^{:H^{M^{A}}}\rightarrow m\triangleright(r^{:H^{A}\rightarrow A}\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}\quad. -\] +\begin{align*} + & \text{sw}_{R,M}:M^{R^{A}}\rightarrow R^{M^{A}}\quad,\\ + & \text{sw}_{R,M}\triangleq m^{:M^{R^{A}}}\rightarrow q^{:H^{M^{A}}}\rightarrow m\triangleright(r^{:H^{A}\rightarrow A}\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}\quad. +\end{align*} Equivalently, we may write the code of $\text{sw}_{R,M}$ as: \begin{equation} q\triangleright\big(m\triangleright\text{sw}_{R,M}\big)=m\triangleright(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}\quad.\label{eq:rigid-monad-choice-swap-short} @@ -4988,7 +5021,8 @@ \subsubsection{Statement \label{subsec:Statement-choice-monad-definition-of-flm} \begin{align*} & f\diamond_{_{R}}g=a^{:A}\rightarrow k^{:H^{C}}\rightarrow a\triangleright\tilde{f}\big(k\triangleright(\tilde{g}(k))^{\downarrow H}\big)\,\gunderline{\bef}\,\tilde{g}(k)\\ {\color{greenunder}\triangleright\text{-notation}:}\quad & =a\rightarrow k\rightarrow a\triangleright\gunderline{\tilde{f}}\big(k\triangleright(\tilde{g}(k))^{\downarrow H}\big)\triangleright\tilde{g}(k)\\ - & =a\rightarrow k\rightarrow f(a)(k\,\gunderline{\triangleright\,(\tilde{g}(k)})^{\downarrow H})\,\gunderline{\triangleright\,\tilde{g}(k)}=a\rightarrow k\rightarrow g\big(f(a)(k\triangleright(b\rightarrow g(b)(k))^{\downarrow H})\big)(k)\quad. + & =a\rightarrow k\rightarrow f(a)(k\,\gunderline{\triangleright\,(\tilde{g}(k)})^{\downarrow H})\,\gunderline{\triangleright\,\tilde{g}(k)}\\ + & =a\rightarrow k\rightarrow g\big(f(a)(k\triangleright(b\rightarrow g(b)(k))^{\downarrow H})\big)(k)\quad. \end{align*} This code can be shortened by defining a helper function $\gamma(g,k)$, also denoted $\gamma_{g,k}$ for brevity: @@ -5161,7 +5195,8 @@ \subsubsection{Statement \label{subsec:Statement-choice-monad-swap}\ref{subsec:S We first apply the left-hand side of the law~(\ref{eq:swap-law-3-formulation-R-M}) to $m$ and $q$: \begin{align*} - & (\text{ftn}_{R}^{\uparrow M}\bef\text{sw})(m)(q)=q\triangleright\big(m\triangleright\text{ftn}_{R}^{\uparrow M}\gunderline{\bef}\text{sw}\big)=q\triangleright\big(m\triangleright\text{ftn}_{R}^{\uparrow M}\triangleright\gunderline{\text{sw}}\big)\\ + & (\text{ftn}_{R}^{\uparrow M}\bef\text{sw})(m)(q)=q\triangleright\big(m\triangleright\text{ftn}_{R}^{\uparrow M}\gunderline{\bef}\text{sw}\big)\\ + & =q\triangleright\big(m\triangleright\text{ftn}_{R}^{\uparrow M}\triangleright\gunderline{\text{sw}}\big)\\ {\color{greenunder}\text{use Eq.~(\ref{eq:rigid-monad-choice-swap-short})}:}\quad & =m\triangleright\gunderline{\text{ftn}_{R}^{\uparrow M}\bef(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)^{\uparrow M}}\\ {\color{greenunder}\text{composition law of }M:}\quad & =m\triangleright\big(\text{ftn}_{R}\bef(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r)\big)^{\uparrow M}\quad. \end{align*} @@ -5205,7 +5240,8 @@ \subsubsection{Statement \label{subsec:Statement-choice-monad-swap}\ref{subsec:S & q\triangleright\big(q\triangleright\big(\text{sw}\bef(x\rightarrow q\triangleright x)\big)^{\downarrow H}\triangleright\gunderline{\text{sw}(m)\bef\text{sw}}\big)\\ {\color{greenunder}\text{use Eq.~(\ref{eq:swap-law-3-derivation-1})}:}\quad & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\gunderline{\big(\text{sw}\bef(x\rightarrow q\triangleright x)\big)^{\downarrow H}\triangleright\text{pu}_{M}^{\downarrow H}}\bef r)\big)^{\uparrow M}\\ {\color{greenunder}\text{composition under }^{\downarrow H}:}\quad & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\big(\gunderline{\text{pu}_{M}\bef\text{sw}}\bef(x\rightarrow q\triangleright x)\big)^{\downarrow H}\bef r)\big)^{\uparrow M}\\ -{\color{greenunder}\text{outer identity law of }\text{sw}:}\quad & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\big(\gunderline{\text{pu}_{M}^{\uparrow R}\bef(x\rightarrow q\triangleright x)}\big)^{\downarrow H}\bef r)\big)^{\uparrow M}\\ + & \text{outer identity law of }\text{sw}:\quad\\ + & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\big(\gunderline{\text{pu}_{M}^{\uparrow R}\bef(x\rightarrow q\triangleright x)}\big)^{\downarrow H}\bef r)\big)^{\uparrow M}\\ {\color{greenunder}\text{compute composition}:}\quad & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef(q\triangleright\big(x\rightarrow q\triangleright\text{pu}_{M}^{\uparrow R}(x)\big)^{\downarrow H}\bef r)\big)^{\uparrow M}\quad. \end{align*} We arrived at the same expression as the left-hand side of the law. @@ -5225,7 +5261,8 @@ \subsubsection{Statement \label{subsec:Statement-choice-monad-swap}\ref{subsec:S {\color{greenunder}\text{definition of }^{\uparrow R}:}\quad & =q\triangleright\big(\text{ftn}_{M}^{\downarrow H}\gunderline{\bef}(m\triangleright\text{sw}^{\uparrow M}\triangleright\text{sw})\gunderline{\bef}\text{ftn}_{M}\big)\nonumber \\ {\color{greenunder}\triangleright\text{-notation}:}\quad & =\big(\gunderline{q\triangleright\text{ftn}_{M}^{\downarrow H}}\triangleright(\gunderline{m\triangleright\text{sw}^{\uparrow M}}\triangleright\gunderline{\text{sw}})\big)\triangleright\text{ftn}_{M}\nonumber \\ {\color{greenunder}\text{use Eq.~(\ref{eq:rigid-monad-choice-swap-short})}:}\quad & =\big(m\triangleright\gunderline{\text{sw}^{\uparrow M}}\triangleright\big(r\rightarrow q\triangleright\gunderline{\text{ftn}_{M}^{\downarrow H}\triangleright\text{pu}_{M}^{\downarrow H}}\bef r\gunderline{\big)^{\uparrow M}}\big)\triangleright\text{ftn}_{M}\nonumber \\ -{\color{greenunder}\text{composition under }^{\downarrow H}\text{ and }^{\uparrow M}:}\quad & =m\triangleright\big(\text{sw}\bef\big(r\rightarrow q\triangleright(\gunderline{\text{pu}_{M}\bef\text{ftn}_{M}})^{\downarrow H}\bef r\big)\big)^{\uparrow M}\bef\text{ftn}_{M}\nonumber \\ + & \text{composition under }^{\downarrow H}\text{ and }^{\uparrow M}:\quad\nonumber \\ + & =m\triangleright\big(\text{sw}\bef\big(r\rightarrow q\triangleright(\gunderline{\text{pu}_{M}\bef\text{ftn}_{M}})^{\downarrow H}\bef r\big)\big)^{\uparrow M}\bef\text{ftn}_{M}\\ {\color{greenunder}\text{left identity law of }M:}\quad & =m\triangleright\big(\text{sw}\bef(r\rightarrow q\triangleright r)\big)^{\uparrow M}\bef\text{ftn}_{M}\quad.\label{eq:rigid-monad-1-swap-law-4-derivation-5} \end{align} Let us simplify the sub-expression $\text{sw}\bef(r\rightarrow q\triangleright r)$ @@ -5237,7 +5274,8 @@ \subsubsection{Statement \label{subsec:Statement-choice-monad-swap}\ref{subsec:S Substituting this expression into Eq.~(\ref{eq:rigid-monad-1-swap-law-4-derivation-5}), we get: \begin{align*} - & m\triangleright\big(\gunderline{\text{sw}\bef(r\rightarrow q\triangleright r)}\big)^{\uparrow M}\bef\text{ftn}_{M}=m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r\gunderline{\big)^{\uparrow M\uparrow M}\bef\text{ftn}_{M}}\\ + & m\triangleright\big(\gunderline{\text{sw}\bef(r\rightarrow q\triangleright r)}\big)^{\uparrow M}\bef\text{ftn}_{M}\\ + & =m\triangleright\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r\gunderline{\big)^{\uparrow M\uparrow M}\bef\text{ftn}_{M}}\\ {\color{greenunder}\text{naturality law of }\text{ftn}_{M}:}\quad & =m\triangleright\text{ftn}_{M}\bef\big(r\rightarrow q\triangleright\text{pu}_{M}^{\downarrow H}\bef r\big)^{\uparrow M}\quad. \end{align*} @@ -5446,7 +5484,8 @@ \subsubsection{Statement \label{subsec:Statement-product-of-monad-transformers}\ The functor laws hold due to the properties of the pair product: \begin{align*} {\color{greenunder}\text{expect to equal }\text{id}:}\quad & \text{frun}_{L}(\text{id})=\text{frun}_{G}(\text{id})\boxtimes\text{frun}_{H}(\text{id})=\text{id}\boxtimes\text{id}=\text{id}\quad,\\ -{\color{greenunder}\text{expect to equal }\text{frun}_{L}(\phi\bef\chi):}\quad & \text{frun}_{L}(\phi)\bef\text{frun}_{L}(\chi)=\big(\gunderline{\text{frun}_{G}(\phi)\bef\text{frun}_{G}(\chi)}\big)\boxtimes\big(\gunderline{\text{frun}_{H}(\phi)\bef\text{frun}_{H}(\chi)}\big)\\ +{\color{greenunder}\text{expect to equal }\text{frun}_{L}(\phi\bef\chi):}\quad & \text{frun}_{L}(\phi)\bef\text{frun}_{L}(\chi)\\ + & \quad=\big(\gunderline{\text{frun}_{G}(\phi)\bef\text{frun}_{G}(\chi)}\big)\boxtimes\big(\gunderline{\text{frun}_{H}(\phi)\bef\text{frun}_{H}(\chi)}\big)\\ {\color{greenunder}\text{functor laws of }\text{frun}_{G},\,\text{frun}_{H}:}\quad & \quad=\text{frun}_{G}(\phi\bef\chi)\boxtimes\text{frun}_{H}(\phi\bef\chi)=\text{frun}_{L}(\phi\bef\chi)\quad. \end{align*} @@ -5459,10 +5498,10 @@ \subsubsection{Statement \label{subsec:Statement-product-of-monad-transformers}\ \[ \text{brun}_{L}:T_{G}^{M}\times T_{H}^{M}\leadsto M\quad,\quad\quad\text{brun}_{L}\triangleq\pi_{1}\bef\text{brun}_{G}\quad. \] -It is a monad morphism because it is a composition of a projection -(Statement~\ref{subsec:Statement-projection-is-monadic-morphism}) -and a monad morphism $\text{brun}_{G}$. Function composition preserves -monad morphisms (Statement~\ref{subsec:Statement-monadic-morphism-composition}). +It is a monad morphism because it is a function composition of two +monad morphisms: $\text{brun}_{G}$ and a projection function $\pi_{1}$ +(which is a monad morphism by Statement~\ref{subsec:Statement-projection-is-monadic-morphism}). +Function composition preserves monad morphisms (Statement~\ref{subsec:Statement-monadic-morphism-composition}). The nondegeneracy law holds because it holds for $\text{brun}_{G}$: \[ @@ -5501,7 +5540,6 @@ \subsubsection{Statement \label{subsec:Statement-product-of-monad-transformers}\ & =\Delta\bef(\gunderline{\text{flift}_{G}\bef\text{frun}_{G}}(\phi))\boxtimes(\gunderline{\text{flift}_{H}\bef\text{frun}_{H}(\phi)})=\Delta\bef(\gunderline{\phi}\bef\text{flift}_{G})\boxtimes(\gunderline{\phi}\bef\text{flift}_{H})\\ & =\gunderline{\Delta\bef(\phi\boxtimes\phi)}\bef(\text{flift}_{G}\boxtimes\text{flift}_{H})=\phi\bef\gunderline{\Delta\bef(\text{flift}_{G}\boxtimes\text{flift}_{H})}=\phi\bef\text{flift}_{L}\quad. \end{align*} - To verify the monadic naturality law of an information-losing definition of $\text{brun}_{L}$: \begin{align*} @@ -5617,12 +5655,13 @@ \subsubsection{Statement \label{subsec:Statement-free-pointed-monad-transformer} & M^{A+N^{A}}\\ \hline A & \text{pu}_{M}\bef(a\rightarrow a+\bbnum 0^{:N^{A}})^{\uparrow M}\\ N^{A} & \text{pu}_{M}^{\uparrow N}\bef(t\rightarrow\bbnum 0^{:A}+\text{merge}\,(t))\bef\text{pu}_{M} -\end{array}\,=\,\begin{array}{|c||c|} +\end{array}\\ + & =\,\,\begin{array}{|c||c|} & M^{A+N^{A}}\\ \hline A & (a\rightarrow a+\bbnum 0^{:N^{A}})\bef\text{pu}_{M}\\ N^{A} & (n\rightarrow\bbnum 0^{:A}+\text{merge}\,(n\triangleright\text{pu}_{M}^{\uparrow N}))\bef\text{pu}_{M} \end{array}\\ - & =\,\,\begin{array}{|c||c|} + & =\,\begin{array}{|c||c|} & A+N^{A}\\ \hline A & a\rightarrow a+\bbnum 0^{:N^{A}}\\ N^{A} & n\rightarrow\bbnum 0^{:A}+n\triangleright\text{pu}_{M}^{\uparrow N}\bef\text{merge} @@ -5695,14 +5734,16 @@ \subsubsection{Statement \label{subsec:Statement-free-pointed-monad-transformer} to show that the second rows of the matrices are also equal: \begin{align*} & (t\rightarrow\bbnum 0+\text{merge}\,(t))\bef\text{sw}\overset{?}{=}\text{ftn}_{M}^{\uparrow N}\bef(t\rightarrow\bbnum 0+\text{merge}\,(t))\bef\text{pu}_{M}\quad,\\ -{\color{greenunder}\text{apply both sides to }t:}\quad & (\bbnum 0+t\triangleright\text{merge})\,\gunderline{\triangleright\,\text{sw}}\overset{?}{=}\big(\bbnum 0+t\triangleright\text{ftn}_{M}^{\uparrow N}\triangleright\text{merge}\big)\triangleright\text{pu}_{M}\quad,\\ + & \text{apply both sides to }t:\quad\\ + & (\bbnum 0+t\triangleright\text{merge})\,\gunderline{\triangleright\,\text{sw}}\overset{?}{=}\big(\bbnum 0+t\triangleright\text{ftn}_{M}^{\uparrow N}\triangleright\text{merge}\big)\triangleright\text{pu}_{M}\quad,\\ {\color{greenunder}\text{apply }\text{sw}:}\quad & (\bbnum 0+t\triangleright\text{merge}\triangleright\text{merge})\triangleright\text{pu}_{M}\overset{?}{=}\big(\bbnum 0+t\triangleright\text{ftn}_{M}^{\uparrow N}\triangleright\text{merge}\big)\triangleright\text{pu}_{M}\quad. \end{align*} We will prove the last equality if we show that the function \lstinline!merge! has the following property: -\[ -\text{merge}\bef\text{merge}=\text{ftn}_{M}^{\uparrow N}\bef\text{merge}\quad,\quad\text{or}:\quad\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}=\text{ftn}_{M}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}\quad. -\] +\begin{align*} + & \text{merge}\bef\text{merge}=\text{ftn}_{M}^{\uparrow N}\bef\text{merge}\quad,\\ +{\color{greenunder}\text{or}:}\quad & \text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}=\text{ftn}_{M}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}\quad. +\end{align*} \[ \xymatrix{\xyScaleY{1.8pc}\xyScaleX{3.5pc}N^{M^{M^{A}}}\ar[r]\sp(0.55){\text{merge}^{M^{A}}}\ar[d]\sb(0.45){\text{ftn}_{M}^{\uparrow N}} & N^{M^{A}}\ar[d]\sb(0.45){\text{merge}^{A}}\\ N^{M^{A}}\ar[r]\sp(0.55){\text{merge}^{A}} & N^{A} @@ -5720,8 +5761,10 @@ \subsubsection{Statement \label{subsec:Statement-free-pointed-monad-transformer} \begin{align*} {\color{greenunder}\text{expect to equal }\text{ftn}_{M}^{\uparrow N}\bef\text{merge}:}\quad & \text{merge}\bef\text{merge}=\text{flift}_{K}^{\uparrow N}\bef\gunderline{\text{ftn}_{N}\bef\text{flift}_{K}^{\uparrow N}}\bef\text{ftn}_{N}\\ {\color{greenunder}\text{naturality of }\text{ftn}_{N}:}\quad & =\text{flift}_{K}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N\uparrow N}\bef\gunderline{\text{ftn}_{N}\bef\text{ftn}_{N}}\\ -{\color{greenunder}\text{associativity law of }\text{ftn}_{N}:}\quad & =\gunderline{\text{flift}_{K}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N\uparrow N}\bef\text{ftn}_{N}^{\uparrow N}}\bef\text{ftn}_{N}=(\gunderline{\text{flift}_{K}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}})^{\uparrow N}\bef\text{ftn}_{N}\\ -{\color{greenunder}\text{lifting law~(\ref{eq:free-pointed-transformer-use-lifting-derivation1})}:}\quad & =\gunderline{(\text{ftn}_{M}\bef\text{flift}_{K})^{\uparrow N}}\bef\text{ftn}_{N}=\text{ftn}_{M}^{\uparrow N}\bef\gunderline{\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}}=\text{ftn}_{M}^{\uparrow N}\bef\text{merge}\quad. +{\color{greenunder}\text{associativity law of }\text{ftn}_{N}:}\quad & =\gunderline{\text{flift}_{K}^{\uparrow N}\bef\text{flift}_{K}^{\uparrow N\uparrow N}\bef\text{ftn}_{N}^{\uparrow N}}\bef\text{ftn}_{N}\\ + & =(\gunderline{\text{flift}_{K}\bef\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}})^{\uparrow N}\bef\text{ftn}_{N}\\ +{\color{greenunder}\text{lifting law~(\ref{eq:free-pointed-transformer-use-lifting-derivation1})}:}\quad & =\gunderline{(\text{ftn}_{M}\bef\text{flift}_{K})^{\uparrow N}}\bef\text{ftn}_{N}=\text{ftn}_{M}^{\uparrow N}\bef\gunderline{\text{flift}_{K}^{\uparrow N}\bef\text{ftn}_{N}}\\ + & =\text{ftn}_{M}^{\uparrow N}\bef\text{merge}\quad. \end{align*} This completes the proof of the outer interchange law of \lstinline!swap!. @@ -8871,7 +8914,7 @@ \subsubsection{Statement \label{subsec:Statement-generalized-search-monad}\ref{s such values, the Kleisli composition $\tilde{\diamond}_{_{T}}$ may be simplified: If we know that the functions $f^{:H^{B}\rightarrow A\rightarrow T_{L}^{M,B}}$ and $g^{:H^{C}\rightarrow A\rightarrow T_{L}^{M,C}}$ ignore their -first arguments, we may write +first arguments, we may write: \[ f\,\tilde{\diamond}_{_{T}}g=k^{:H^{C}}\rightarrow f(...)\diamond_{_{T_{L}}}g(...)=(\_^{:H^{C}})\rightarrow f(...)\diamond_{_{T_{L}}}g(...)\quad. \] diff --git a/sofp-src/tex/sofp-traversable.tex b/sofp-src/tex/sofp-traversable.tex index 2aa1d30e4..60107ac8e 100644 --- a/sofp-src/tex/sofp-traversable.tex +++ b/sofp-src/tex/sofp-traversable.tex @@ -14,9 +14,8 @@ \section{Motivation} in that style (\lstinline!map!, \lstinline!filter!, \lstinline!flatMap!, and \lstinline!zip!) and generalized them to many different type constructors. This chapter adopts the same approach to study and generalize -the \lstinline!reduce! method. In this way, we will obtain a complete -understanding of the \lstinline!map!/\lstinline!reduce! programming -style. +the \lstinline!reduce! method. In this way, we will conclude the +study of the \lstinline!map!/\lstinline!reduce! programming style. \subsection{From \texttt{reduce} and \texttt{foldLeft} to \texttt{foldMap} and \texttt{traverse}\label{subsec:From-reduce-and-foldleft-to-foldmap}} @@ -80,7 +79,7 @@ \subsection{From \texttt{reduce} and \texttt{foldLeft} to \texttt{foldMap} and \[ \text{foldMap}\,(\text{upd})(\text{xs})\left(b_{0}\right)\triangleq\text{foldLeft}\,(\text{xs})\left(b_{0}\right)(\text{upd})\quad. \] -Omitting the argument $b_{0}$, we may visualize the computation performed +Omitting the argument $b_{0}$, we visualize the computation performed by \lstinline!foldMap! as: \[ \text{foldMap}\,(\text{upd})\left(\left[x_{1},x_{2},x_{3}\right]\right)=\text{upd}\left(x_{1}\right)\oplus_{M}\text{upd}\left(x_{2}\right)\oplus_{M}\text{upd}\left(x_{3}\right)\quad. @@ -107,9 +106,9 @@ \subsection{From \texttt{reduce} and \texttt{foldLeft} to \texttt{foldMap} and and \lstinline!foldMap! are in fact equivalent. The type signature of \lstinline!foldMap! suggests that we could -replace \lstinline!Seq[_]! by an arbitrary type constructor \lstinline!L[_]!. -We call \textbf{foldable}\index{foldable functor} a type constructor -\lstinline!L[_]! for which the \lstinline!foldMap! method is defined: +replace \lstinline!Seq! by an arbitrary functor \lstinline!L!. We +call a functor \lstinline!L! \textbf{foldable}\index{foldable functor} +if a \lstinline!foldMap! method is defined: \begin{lstlisting} def foldMap_L[M: Monoid, A](f: A => M): L[A] => M \end{lstlisting} @@ -181,7 +180,8 @@ \subsection{The \texttt{traverse} operation\label{subsec:The-traverse-operation} reduces to that of \lstinline!foldMap!. Before studying the properties of \lstinline!traverse!, we will look -at some examples of its practical use. +at some examples of implementing that function for various type constructors +and using it in practice. \section{Practical use of folding and traversing operations} @@ -195,8 +195,8 @@ \subsubsection{Example \label{subsec:Example-traverse-for-1+a*a}\ref{subsec:Exam Implement \lstinline!traverse! for the type constructor $L$ defined by: -\begin{wrapfigure}{l}{0.5\columnwidth}% -\vspace{-1\baselineskip} +\begin{wrapfigure}{l}{0.4\columnwidth}% +\vspace{-0.5\baselineskip} \begin{lstlisting} type L[A] = Option[(A, A)] \end{lstlisting} @@ -216,7 +216,7 @@ \subsubsection{Example \label{subsec:Example-traverse-for-1+a*a}\ref{subsec:Exam \begin{lstlisting} def trav[A, B, F[_]: Applicative : Functor](f: A => F[B]): Option[(A, A)] => F[Option[(B, B)]] \end{lstlisting} -In the short type notation, this type signature is: +In the type notation, this type signature is written as: \[ \text{trav}_{L}:(A\rightarrow F^{B})\rightarrow(\bbnum 1+A\times A)\rightarrow F^{\bbnum 1+B\times B}\quad. \] @@ -237,7 +237,7 @@ \subsubsection{Example \label{subsec:Example-traverse-for-1+a*a}\ref{subsec:Exam to the required type $F^{\bbnum 1+B\times B}$: \begin{lstlisting} def trav[A, B, F[_]: Applicative : Functor](f: A => F[B]): Option[(A, A)] => F[Option[(B, B)]] = { - case None => Applicative[F].pure(None) // No other choice here. + case None => Applicative[F].pure(None) case Some((a1, a2)) => Applicative[F].zip(f(a1), f(a2)).map { case (b1, b2) => Some((b1, b2)) } } \end{lstlisting} @@ -431,7 +431,7 @@ \subsection{Aggregating tree-like data by folding. Breadth-first traversal\label \end{lstlisting} In the same way, we may implement \lstinline!toList: L[A] => List[A]! -for any foldable functor \lstinline!L[_]!. +for any foldable functor \lstinline!L!. The function \lstinline!toList! captures the requirement that a foldable functor \lstinline!L! must have a well-defined way of iterating over @@ -577,7 +577,7 @@ \subsection{Decorating a tree. I. Depth-first traversal\label{subsec:Decorating- \begin{lstlisting} val t2 = Branch(Leaf(8), Branch(Branch(Leaf(3), Leaf(5)), Leaf(4))) -scala> t2.map(x => x + 20) // Assuming a Functor instance for T2[_]. +scala> t2.map(x => x + 20) // Assuming a Functor instance for T2. res0: T2[Int] = Branch(Leaf(28), Branch(Branch(Leaf(23), Leaf(25)), Leaf(24))) \end{lstlisting} This transforms the tree {\tiny{}\Tree[ 8 [ [ 3 5 ] 4 ] ] } into @@ -605,8 +605,8 @@ \subsection{Decorating a tree. I. Depth-first traversal\label{subsec:Decorating- // Assume that we have defined Applicative and Functor instances for St. def computeIndex[A]: A => St[(A, Int)] = ??? // Define the "decoration" function. def zipWithIndexDF[A](tree: T2[A]): T2[(A, Int)] = { - val afterTraverse: St[T2[(A, Int)]] = trav[A, (A, Int), St](computeIndex)(tree) - afterTraverse.run(0)._1 // Run the State monad and get the result value. + val result: St[T2[(A, Int)]] = trav[A, (A, Int), St](computeIndex)(tree) + result.run(0)._1 // Run the State monad and get the result value. } \end{lstlisting} This will be a depth-first traversal if \lstinline!trav! is the function @@ -642,10 +642,10 @@ \subsection{Decorating a tree. II. Breadth-first traversal\label{subsec:Decorati \lstinline!T2[A]! merely needs to enumerate the values of type \lstinline!A! in the breadth-first order, but a \lstinline!traverse! function must return a value of type \lstinline!F[T2[B]]!. This requires us to -\emph{merge the effects} of an arbitrary applicative functor \lstinline!F[_]! +\emph{merge the effects} of an arbitrary applicative functor \lstinline!F! in the breadth-first order, while gathering the values of type \lstinline!B! into a tree structure (\lstinline!T2[B]!) wrapped under \lstinline!F!. -The function \lstinline!toListBFS! shown in the previous section +The function \lstinline!toListBFS! shown in Section~\ref{subsec:Aggregating-tree-like-data-bfs} is not sufficient for that purpose, because the tree structure cannot be reproduced if we only have a list of leaf values. Even the nested list computed by \lstinline!toList2! is not sufficient. We need additional @@ -743,7 +743,7 @@ \subsection{Decorating a tree. II. Breadth-first traversal\label{subsec:Decorati tree \lstinline!t2! whose left and right subtrees are \lstinline!l = !{\tiny{}\Tree[ 8 ]} and \lstinline!r =!{\tiny{}\Tree[ [ 3 5 ] 4 ]}. The descriptors of these subtrees are computed by recursive calls of \lstinline!t2ToTD!, -which (if implemented correctly) should yield the following: +which (if implemented correctly) should give this: \begin{lstlisting} t2ToTD(l) == Last(List(8)) // One leaf at level 0. @@ -887,8 +887,8 @@ \subsection{Decorating a tree. II. Breadth-first traversal\label{subsec:Decorati \end{lstlisting} The next step is to implement \lstinline!traverse! for the type constructor -\lstinline!TD[_]!, which makes \lstinline!TD[_]! into a traversable -functor. We will call its \lstinline!traverse! operation \textsf{``}\lstinline!travTD!\textsf{''} +\lstinline!TD!, which makes \lstinline!TD! into a traversable functor. +We will call its \lstinline!traverse! operation \textsf{``}\lstinline!travTD!\textsf{''} for clarity. Since \lstinline!TD[A]! is essentially a decorated list of lists, we will need \lstinline!List!\textsf{'}s \lstinline!traverse! (Example~\ref{subsec:Example-traversable-seq}), which we here denote by \lstinline!travList!. We will also need a @@ -928,8 +928,8 @@ \subsection{Decorating a tree. II. Breadth-first traversal\label{subsec:Decorati tree with the breadth-first traversal index: \begin{lstlisting} def zipWithIndexBF[A](tree: T2[A]): T2[(A, Int)] = { - val afterTraverse: St[T2[(A, Int)]] = travBF[A, (A, Int), St](computeIndex)(tree) - afterTraverse.run(0)._1 // Run the State monad and get the result value. + val result: St[T2[(A, Int)]] = travBF[A, (A, Int), St](computeIndex)(tree) + result.run(0)._1 // Run the State monad and get the result value. } scala> zipWithIndexBF(t2) @@ -969,7 +969,7 @@ \subsection{The \texttt{Traversable} typeclass. Implementing \texttt{scanLeft}} the state updates. This will allow us to implement \lstinline!scanLeft! automatically for every traversable functor. -Assume that \lstinline!L[_]! is a traversable functor whose \lstinline!trav! +Assume that \lstinline!L! is a traversable functor whose \lstinline!trav! function is available: \begin{lstlisting} def trav[A, B, F[_]: Applicative: Functor](f: A => F[B])(la: L[A]): F[L[B]] = ... @@ -1022,13 +1022,13 @@ \subsection{The \texttt{Traversable} typeclass. Implementing \texttt{scanLeft}} \subsection{Tasks that cannot be performed via \texttt{traverse}\label{subsec:Tasks-not-implementable-via-traverse}} The \lstinline!traverse! function is powerful since it can use an -arbitrary applicative functor \lstinline!F[_]!. However, some computations +arbitrary applicative functor \lstinline!F!. However, some computations are still not expressible via \lstinline!traverse! because they require information that \lstinline!traverse! cannot have. We will now look at two examples of this. -The first example is the depth labeling of a tree: each leaf gets -a value equal to its depth. For instance, the tree {\tiny{} \Tree[ 8 [ [ 3 5 ] 4 ] ] } +The first example is the \textsf{``}depth labeling\textsf{''} of a tree: each leaf +gets a value equal to its depth. For instance, the tree {\tiny{} \Tree[ 8 [ [ 3 5 ] 4 ] ] } becomes {\tiny{}}{\tiny{} \Tree[ (8,1) [ [ (3,3) (5,3) ] (4,2) ] ] } after depth labeling. This cannot be implemented via \lstinline!traverse! because it cannot detect nodes that have the same depth in the tree. @@ -1045,8 +1045,7 @@ \subsection{Tasks that cannot be performed via \texttt{traverse}\label{subsec:Ta label certain nodes with the same depth value but other nodes with a different depth value. -Depth labeling can be implemented as a special operation such as \lstinline!zipWithDepth! -for the tree type \lstinline!T2!: +Depth labeling can be implemented as a recursive function \lstinline!zipWithDepth!: \begin{lstlisting} def zipWithDepth[A](initial: Int = 0): T2[A] => T2[(A, Int)] = { case Leaf(a) => Leaf((a, initial)) @@ -1100,7 +1099,7 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche \text{T2}^{A}\triangleq A+\text{T2}^{A}\times\text{T2}^{A}\quad. \] This type refers recursively to itself in two places. To express that, -define a bifunctor $S^{A,R}$: +define a bifunctor $S$ like this: \[ S^{A,R}\triangleq A+R\times R\quad. \] @@ -1112,11 +1111,11 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche final case class T2[A](run: S[A, T2[A]]) \end{lstlisting} -The bifunctor\index{bifunctor} $S^{\bullet,\bullet}$ is called the -\textbf{recursion scheme}\index{recursion scheme} of the type \lstinline!T2!. -The recursion scheme describes the places where the recursive type -refers to itself in its definition. All the recursive uses correspond -to occurrences of the type parameter $R$ in $S^{A,R}$. +The bifunctor\index{bifunctor} $S$ is called the \textbf{recursion +scheme}\index{recursion scheme} of the type \lstinline!T2!. The +recursion scheme describes the places where the recursive type refers +to itself in its definition. All the recursive uses correspond to +occurrences of the type parameter $R$ in $S^{A,R}$. The folding operation \lstinline!foldMap! takes a function $f$ of type $A\rightarrow Z$ as a parameter: @@ -1130,7 +1129,7 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche and make the folding operation \textsf{``}location-aware\textsf{''}, we need to change the type signature of $f$. To figure out the new type signature, let us look at the code of \lstinline!printLaTeXSubtree! (short notation -\textsf{``}\lstinline!pls!\textsf{''}) from the previous section: +\textsf{``}$\text{pls}$\textsf{''}) from the previous section: \[ \text{pls}\triangleq\,\begin{array}{|c||c|} & \text{String}\\ @@ -1201,21 +1200,20 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche \end{lstlisting} It is important that the function $\text{fold}_{S}$ is parametric -in the recursion scheme $S^{\bullet,\bullet}$ and the result type -$Z$ (which is not required to be a monoid). Different recursion schemes -$S^{\bullet,\bullet}$ may be used to define lists, trees, and other -recursive data types. The same code of $\text{fold}_{S}$ will work -for all those data types, as long as we have the recursion scheme -$S^{\bullet,\bullet}$ and the corresponding lifting function (\lstinline!fmapR! -in the Scala code shown above, or $^{\uparrow S^{A,\bullet}}$ in -the code notation). +in the recursion scheme $S$ and the result type $Z$ (which is not +required to be a monoid). Different recursion schemes $S$ may be +used to define lists, trees, and other recursive data types. The same +code of $\text{fold}_{S}$ will work for all those data types, as +long as we have the recursion scheme $S$ and the corresponding lifting +function (\lstinline!fmapR! in the Scala code shown above, or $^{\uparrow S^{A,\bullet}}$ +in the code notation). To illustrate the general applicability of $\text{fold}_{S}$ to different data types, let us implement a \lstinline!printLaTeX! function for ordinary lists, for non-empty lists, and for rose trees (Section~\ref{subsec:Rose-trees}). Scala\textsf{'}s standard \lstinline!List! type and the corresponding recursion -scheme $S^{A,R}$ may be defined by: +scheme $S$ may be defined by: \[ \text{List}^{A}\triangleq\bbnum 1+A\times\text{List}^{A}\quad,\quad\quad S^{A,R}\triangleq\bbnum 1+A\times R\quad,\quad\quad\text{List}^{A}\triangleq S^{A,\text{List}^{A}}\quad. \] @@ -1231,17 +1229,17 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche \[ \text{TreeN}^{A}\triangleq A+\text{NEL}^{\text{TreeN}^{A}}\quad,\quad\quad S^{A,R}\triangleq A+\text{NEL}^{R}\quad,\quad\quad\text{TreeN}^{A}\triangleq S^{A,\text{TreeN}^{A}}\quad. \] -For rose trees, the recursion scheme $S^{A,R}$ is itself a recursively +For rose trees, the recursion scheme $S$ is itself a recursively defined type because it uses the non-empty list (NEL). This is not -a problem: $S^{A,R}$ is still polynomial, which guarantees that any -value of type $S^{A,R}$ contains a finite number of elements of types -$A$ and $R$. So, any value of type \lstinline!TreeN[A]! will contain +a problem: $S$ is still polynomial, which guarantees that any value +of type $S^{A,R}$ contains a finite number of elements of types $A$ +and $R$. So, any value of type \lstinline!TreeN[A]! will contain a finite number of values of type $A$, assuring that the folding operation will terminate. To avoid repetitive code, let us define all three data types (\lstinline!List!, \lstinline!NEL!, and \lstinline!TreeN!) at once through a universal -recursive class \lstinline!Fix! that takes the recursion scheme $S^{\bullet,\bullet}$ +recursive class \lstinline!Fix! that takes the recursion scheme $S$ as a type parameter: \begin{lstlisting} type S1[A, R] = Option[(A, R)] // For List. @@ -1322,8 +1320,8 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche every value of type $L^{A}$. That is, no values of type $L^{A}$ should cause an infinite loop in $\text{fold}_{S}$. A simple example where $\text{fold}_{S}$ enters an infinite loop is the recursion -scheme $S^{A,R}\triangleq A+(\bbnum 1\rightarrow R)$. Note that $S^{A,R}$ -is non-polynomial due to the function type $\bbnum 1\rightarrow R$, +scheme $S^{A,R}\triangleq A+(\bbnum 1\rightarrow R)$. This $S$ is +non-polynomial due to the function type $\bbnum 1\rightarrow R$, which delays the evaluation of a value of type $R$. This allows us to implement a well-defined, finite value \lstinline!x: L[A]! which refers to itself under the delayed evaluation: @@ -1341,15 +1339,16 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche Trying to compute \lstinline!fold(f)(x)! with any \lstinline!f! will result in an infinite loop. -It seems that we need to restrict recursion schemes $S^{A,R}$ to -\emph{polynomial} bifunctors. Such $S^{A,R}$ will define recursive -polynomial functors $L^{A}$ that support no delayed evaluation of -stored values of type $A$. So, any value \lstinline!x! of type $L^{A}$ -will have to contain a finite number of values of type $A$, and \lstinline!fold(f)(x)! -is guaranteed to terminate for any terminating function $f:S^{A,Z}\rightarrow Z$. +It seems that we need to restrict recursion schemes $S$ to \emph{polynomial} +bifunctors. Such $S$ will define recursive polynomial data types +$L^{A}$ that are eager data structures (supporting no delayed evaluation +of stored values of type $A$). So, any value \lstinline!x! of type +$L^{A}$ will have to contain a finite number of values of type $A$, +and \lstinline!fold(f)(x)! is guaranteed to terminate for any terminating +function $f:S^{A,Z}\rightarrow Z$. -Rather than working with the general $\text{fold}_{S}$ function and -redefine all recursive types via \lstinline!Fix!, it is more convenient +Instead of working with the general function $\text{fold}_{S}$ and +defining all recursive types via \lstinline!Fix!, it is more convenient to implement and use specialized versions of $\text{fold}_{S}$ for already defined recursive types. The general implementation of $\text{fold}_{S}$ in Eq.~(\ref{eq:fold-via-recursion-scheme-1}) can be translated @@ -1358,8 +1357,8 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche For instance, while the type \lstinline!TreeN[A]! is equivalent to \lstinline!Fix[S3[A, *]]! shown above, it is easier to work with -\lstinline!TreeN[A]!. The specialized version of $\text{fold}_{S}$ -for \lstinline!TreeN[A]! has the type signature: +\lstinline!TreeN!. The specialized version of $\text{fold}_{S}$ +for \lstinline!TreeN! has the type signature: \begin{lstlisting} def foldTreeN[A, Z](f: S3[A, Z] => Z): TreeN[A] => Z = ??? \end{lstlisting} @@ -1371,7 +1370,7 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche case Branch(ts) => f(Right(ts.map(foldTreeN(f)))) } \end{lstlisting} -Then we can implement \lstinline!printLaTeX! for \lstinline!TreeN[A]! +Then we can implement \lstinline!printLaTeX! for \lstinline!TreeN! like this: \begin{lstlisting} def printLaTeX[A](tree: TreeN[A]): String = "\\Tree" + foldTreeN[A, String](toLaTeX3)(tree) @@ -1380,11 +1379,12 @@ \subsection{Recursion schemes. I. Folding operations\label{subsec:Recursion-sche Another simple example of an aggregation operation that cannot be expressed as a traversal is the task of determining the maximum branching number of a given rose tree. The function \lstinline!foldTreeN! now -allows us to implement that computation: +allows us to implement that: \begin{lstlisting} def maxBranching[A]: TreeN[A] => Int = foldTreeN[A, Int] { case Left(_) => 0 - case Right(nel) => math.max(nel.max, nel.length) // NEL must have `max` and `length` methods. + case Right(nel) => math.max(nel.max, nel.length) +// Assuming that we implemented `max` and `length` methods for NEL. } scala> maxBranching(x3) @@ -1398,7 +1398,7 @@ \subsection{Recursion schemes. II. Unfolding operations} operation is \textsf{``}unfolding\textsf{''}: converting a single value into a collection. By reversing the direction of certain function arrows in the type signature of $\text{fold}_{S}$, we can define a general \textsf{``}unfolding\textsf{''} -method that uses an arbitrary recursion scheme $S^{A,R}$ and an arbitrary +method that uses an arbitrary recursion scheme $S$ and an arbitrary function of type $Z\rightarrow S^{A,Z}$: \begin{equation} \text{unfold}_{S}:(Z\rightarrow S^{A,Z})\rightarrow Z\rightarrow L^{A}\quad,\quad\text{unfold}_{S}(f)\triangleq f\bef\overline{\text{unfold}_{S}(f)}^{\uparrow S^{A,\bullet}}\quad.\label{eq:unfold-via-recursion-scheme} @@ -1407,7 +1407,7 @@ \subsection{Recursion schemes. II. Unfolding operations} for sequences: starting from an initial value, a function is applied repeatedly to compute further elements of the sequence. The \lstinline!unfold! operation generalizes that computation to an arbitrary recursive type -$L^{A}$ whose recursion scheme $S^{A,R}$ is given. +constructor $L$ whose recursion scheme $S$ is given. To get more intuition, we look at some examples using \lstinline!unfold! with lists and binary trees. @@ -1431,7 +1431,7 @@ \subsubsection{Example \label{subsec:Example-unfold-list}\ref{subsec:Example-unf \subparagraph{Solution} -The recursion scheme for \lstinline!List[A]! is $S^{A,R}\triangleq\bbnum 1+A\times R$. +The recursion scheme for \lstinline!List! is $S^{A,R}\triangleq\bbnum 1+A\times R$. Let us specialize the code of \lstinline!unfold! from Eq.~(\ref{eq:unfold-via-recursion-scheme}) to the type \lstinline!List[A]!: \begin{lstlisting} @@ -1457,10 +1457,11 @@ \subsubsection{Example \label{subsec:Example-unfold-list}\ref{subsec:Example-unf $\bbnum 0+a\times z^{\prime}$ with some values $a$ and $z^{\prime}$. The value $a$ must be the new element of the list. The value $z^{\prime}$ will be passed to the next call of $f$. Since the next element must -be twice the previous one, we must have $a=z$ and $z^{\prime}=2*z$. +be twice the previous one, we must have $a=z$ and $z^{\prime}=z*2$. So, the code of $f$ is: \begin{lstlisting} -def f(n: Long): Long => Option[(Long, Long)] = { z => if (z >= n) None else Some((z, z * 2)) } +def f(n: Long): Long => Option[(Long, Long)] = + { z => if (z >= n) None else Some((z, z * 2)) } \end{lstlisting} Note that the code of $f$ is \emph{not} recursive, and the value $n$ is captured inside the nameless function returned by $f(n)$. @@ -1533,7 +1534,8 @@ \subsubsection{Example \label{subsec:Example-unfold-tree}\ref{subsec:Example-unf \end{lstlisting} This completes the implementation of \lstinline!fullBinaryTree!. -To test the resulting code, compute a full tree of depth $2$:~{\tiny{}\Tree[ [ 0 1 ] [ 2 3 ] ]} +To test the resulting code, compute a full tree of depth $2$, which +we expect to be~{\tiny{}\Tree[ [ 0 1 ] [ 2 3 ] ]}: \begin{lstlisting} scala> fullBinaryTree(2) res0: T2[Int] = Branch(Branch(Leaf(0), Leaf(1)), Branch(Leaf(2), Leaf(3))) @@ -1547,9 +1549,8 @@ \subsubsection{Example \label{subsec:Example-unfold-tree-evenodd}\ref{subsec:Exa trees of type \lstinline!T2[Int]! where the leaves have descending numbers from \lstinline!n! to \lstinline!0!, but all odd numbers are on the left and all even numbers on the right. For example, \lstinline!evenOdd(3)! -should generate the tree {\tiny{}\Tree[ 3 [ [ 1 0 ] 2 ] ]} , while -\lstinline!evenOdd(4)! should give the tree {\tiny{}\Tree[ [ 3 [ [ 1 0 ] 2 ] ] 4 ]} -. +should generate the tree {\tiny{}\Tree[ 3 [ [ 1 0 ] 2 ] ]} and \lstinline!evenOdd(4)! +should give the tree {\tiny{}\Tree[ [ 3 [ [ 1 0 ] 2 ] ] 4 ]} . \subparagraph{Solution} @@ -1600,8 +1601,10 @@ \subsubsection{Example \label{subsec:Example-unfold-tree-evenodd}\ref{subsec:Exa when \lstinline!startAt == 0!. So, the code of \lstinline!f! is: \begin{lstlisting} val f: Z => Either[Int, (Z, Z)] = { - case Z(n, false) if n > 0 && n % 2 == 0 => Right((Z(n - 1, false), Z(n, true))) - case Z(n, false) if n > 0 && n % 2 == 1 => Right((Z(n, true), Z(n - 1, false))) + case Z(n, false) if n > 0 && n % 2 == 0 => + Right((Z(n - 1, false), Z(n, true))) + case Z(n, false) if n > 0 && n % 2 == 1 => + Right((Z(n, true), Z(n - 1, false))) case Z(n, _) => Left(n) // Make a leaf when n == 0 or makeLeaf == true. } \end{lstlisting} @@ -1626,15 +1629,14 @@ \subsubsection{Example \label{subsec:Example-unfold-tree-evenodd}\ref{subsec:Exa is known as reasoning by \textbf{co-induction}\index{co-induction}. It is related to mathematical induction but is significantly different from the reasoning required to write the code for a folding operation -(which is directly modeled on induction). In co-induction, the base -cases are not at the beginning of the computation but \textsf{``}in the future\textsf{''}. -Note that \lstinline!unfold(f)(z)! will call itself whenever the -value $f(z)$ of type $S^{A,Z}$ contains additional values of type -$Z$. The programmer must carefully choose a suitable type $Z$ and -a suitable function $f$ such that \lstinline!unfold(f)(z)! stops -the recursion at the required places. For instance, if $S^{A,Z}\triangleq A+Z\times Z$, +(which is directly modeled on induction). Note that \lstinline!unfold(f)(z)! +will call itself whenever the value $f(z)$ of type $S^{A,Z}$ contains +additional values of type $Z$. The programmer must carefully choose +a suitable type $Z$ and a suitable function $f$ such that \lstinline!unfold(f)(z)! +stops the recursion at the required places. For instance, if $S^{A,Z}\triangleq A+Z\times Z$, the function $f$ must sometimes return a value of type $A+\bbnum 0$ -to stop the unfolding. +to stop the unfolding. One could say that the base cases in co-induction +are not at the beginning of the computation but \textsf{``}in the future\textsf{''}. Is the recursion guaranteed to stop while evaluating \lstinline!unfold!? The type signature $f:Z\rightarrow S^{A,Z}$ itself does not guarantee @@ -1642,18 +1644,15 @@ \subsubsection{Example \label{subsec:Example-unfold-tree-evenodd}\ref{subsec:Exa places. If $S^{A,Z}\triangleq A+Z\times Z$ and $f(z)$ always returns values of type $\bbnum 0+Z\times Z$ (for example, $f(z)\triangleq\bbnum 0+z\times z$), the unfolding operation \lstinline!unfold(f)! will enter an infinite -loop trying to constructing a tree of infinite size. This will, of -course, fail since data structures in a computer cannot have infinite -size. +loop trying to construct a tree of infinite size. This will, of course, +fail since data structures in a computer cannot have infinite size. Unfolding will always terminate if we use a data type that \emph{delays} the evaluation of its recursively defined parts. Those parts will be computed only on demand. To obtain further data, the code needs to call certain functions. Data types of this kind are sometimes called \textsf{``}infinite\textsf{''},\index{infinite data types} which is misleading since -no infinite amount of data is involved. A function may be called many -times and produce any number of result values, but it does not mean -that a function stores an infinite amount of data. +only a finite amount of data is ever stored in memory. As an example, consider the recursion scheme $S^{A,R}\triangleq A+(\bbnum 1\rightarrow R\times R)$. The corresponding data type $L^{A}\triangleq S^{A,L^{A}}$ is a binary @@ -1674,7 +1673,7 @@ \subsubsection{Example \label{subsec:Example-unfold-tree-evenodd}\ref{subsec:Exa case Left(a) => ULeaf(a) case Right(func) => UBranch { _ => // It is important to delay the evaluation of func(()). val (z1, z2) = func(()) // Force the evaluation of branches at this level. - (unfoldUT(f)(z1), unfoldUT(f)(z2)) // `unfold` will delay the evaluation of further branches. + (unfoldUT(f)(z1), unfoldUT(f)(z2)) // `unfoldUT` will delay the evaluation of further branches. } } @@ -1721,8 +1720,8 @@ \subsection{Recursion schemes. III. Traversing operations} is \lstinline!zipWithDepth!, which we implemented in Section~\ref{subsec:Tasks-not-implementable-via-traverse} through custom code. We will now implement \lstinline!zipWithDepth! via a more general traversal operation ($\text{trav}_{S}$) parameterized -by an arbitrary recursion scheme $S^{\bullet,\bullet}$ and an arbitrary -functor $F^{\bullet}$ (not necessarily applicative). +by an arbitrary recursion scheme $S$ and an arbitrary functor $F$ +(not necessarily applicative!). To figure out the type signature of $\text{trav}_{S}$, consider the relationship between \lstinline!foldMap!, $\text{fold}_{S}$, and @@ -1846,11 +1845,11 @@ \subsubsection{Exercise \label{subsec:Exercise-traversables-7-1}\ref{subsec:Exer \subsubsection{Exercise \label{subsec:Exercise-traversables-7}\ref{subsec:Exercise-traversables-7}} -For the binary tree \lstinline!T2[A]! (Section~\ref{subsec:Recursion-schemes.-folding}), +For the binary tree \lstinline!T2! (Section~\ref{subsec:Recursion-schemes.-folding}), implement a \lstinline!Traversable! instance for \emph{right-to-left} depth-first traversal order. Use that \lstinline!Traversable! instance -to implement \lstinline!zipWithIndex! for the type \lstinline!T2[A]!. -Verify that \lstinline!zipWithIndex! transforms the tree {\tiny{} \Tree[ [ 8 [ 3 5 ] ] 4 ] } +to implement \lstinline!zipWithIndex! for \lstinline!T2!. Verify +via tests that \lstinline!zipWithIndex! transforms the tree {\tiny{} \Tree[ [ 8 [ 3 5 ] ] 4 ] } into {\tiny{} \Tree[ [ (8,3) [ (3,2) (5,1) ] ] (4,0) ] } . \subsubsection{Exercise \label{subsec:Exercise-traversables-8}\ref{subsec:Exercise-traversables-8}} @@ -1870,7 +1869,7 @@ \subsubsection{Exercise \label{subsec:Exercise-traversables-8-1}\ref{subsec:Exer \subsubsection{Exercise \label{subsec:Exercise-traversables-12}\ref{subsec:Exercise-traversables-12}} Use the specialized version of $\text{fold}_{S}$ for the binary tree -\lstinline!T2[A]! (Section~\ref{subsec:Recursion-schemes.-folding}) +\lstinline!T2! (Section~\ref{subsec:Recursion-schemes.-folding}) to compute the maximum depth of a tree: \begin{lstlisting} def maxDepth[A](tree: T2[A]): Int = foldT2(???)(???) @@ -1882,8 +1881,8 @@ \subsubsection{Exercise \label{subsec:Exercise-traversables-12}\ref{subsec:Exerc \subsubsection{Exercise \label{subsec:Exercise-traversables-11}\ref{subsec:Exercise-traversables-11}} -For the data type \lstinline!T3[A]! defined in Exercise~\ref{subsec:Exercise-applicative-I-1-1}, -define a recursion scheme and implement a specialized version of \lstinline!unfold! +For the data type \lstinline!T3! defined in Exercise~\ref{subsec:Exercise-applicative-I-1-1}, +write a recursion scheme and implement a specialized version of \lstinline!unfold! as \lstinline!unfoldT3!. Using \lstinline!unfoldT3!, write a function that generates ternary trees of the form {\tiny{}}{\tiny{} \Tree[.3 0 [.2 0 1 0 ] 0 ] } starting from the given integer $n$ at the root. @@ -1894,7 +1893,8 @@ \section{Laws and structure} to choose simpler but equivalent versions of these operations. We have seen four methods that implement folding operations: \lstinline!foldLeft!, \lstinline!foldMap!, \lstinline!reduce!, and \lstinline!toList!. -With suitable naturality laws, these methods are equivalent. +It turns out that all those methods are equivalent when suitable naturality +laws hold. \subsection{Equivalence of \texttt{reduce}, \texttt{foldLeft}, \texttt{foldMap}, and \texttt{toList}. Monoid morphisms\label{subsec:Equivalence-of-foldLeft,foldMap,reduce,and-toList}} @@ -2026,15 +2026,15 @@ \subsubsection{Statement \label{subsec:Statement-foldleft-foldmap-equivalence}\r \textbf{(c)} The functions \lstinline!foldFn! and \lstinline!reduceE! are equivalent as long as \lstinline!foldFn! obeys the laws~(\ref{eq:foldFn-first-special-law}) and~(\ref{eq:foldFn-second-special-law}) shown below and \lstinline!reduceE! -satisfies the monoidal naturality law. +obeys the monoidal naturality law. We will show in Statement~\ref{subsec:relational-property-for-foldFn} below that the special laws~(\ref{eq:foldFn-first-special-law}) and~(\ref{eq:foldFn-second-special-law}) follow from parametricity. -So, these laws will hold automatically when the code of \lstinline!foldFn! -is fully parametric. However, formulating these special laws allows -us to prove the equivalence of \lstinline!foldFn! and \lstinline!reduceE! -without assuming parametricity. +So, those laws will hold automatically when the code of \lstinline!foldFn! +is fully parametric. Formulating those special laws allows us to prove +the equivalence of \lstinline!foldFn! and \lstinline!reduceE! without +assuming parametricity. \subparagraph{Proof} @@ -2371,7 +2371,7 @@ \subsubsection{Statement \label{subsec:relational-property-for-foldFn}\ref{subse \subsubsection{Statement \label{subsec:Statement-reduceE-toList-equivalence}\ref{subsec:Statement-reduceE-toList-equivalence}} Functions \lstinline!reduceE! and \lstinline!toList! are equivalent -if expressed through each other as: +if expressed via each other as: \begin{align} & \text{toList}:L^{A}\rightarrow\text{List}^{A}\quad,\quad\quad\text{toList}=\text{pu}_{\text{List}}^{\uparrow L}\bef\text{reduceE}^{\text{List}^{A}}\quad,\label{eq:toList-via-reduceE}\\ & \text{reduceE}:L^{M}\rightarrow M\quad,\quad\quad\text{reduceE}=\text{toList}\bef\text{reduceList}\quad,\label{eq:reduceE-via-toList} @@ -2437,7 +2437,7 @@ \subsubsection{Statement \label{subsec:Statement-reduceE-toList-equivalence}\ref & =f(h)\oplus_{N}f(\overline{\text{reduceList}}\left(t\right))\\ {\color{greenunder}\text{monad morphism law}:}\quad & =f\big(h\oplus_{M}\overline{\text{reduceList}}\left(t\right)\big)\quad. \end{align*} -Using the monad morphism identity law ($f(e_{M})=e_{N}$), we find: +Using the monoid morphism identity law ($f(e_{M})=e_{N}$), we find: \begin{align*} & f^{\uparrow\text{List}}\bef\text{reduceList}=\,\begin{array}{|c||c|} & N\\ @@ -2507,7 +2507,7 @@ \subsubsection{Statement \label{subsec:Statement-reduceE-toList-equivalence}\ref \text{pu}_{\text{List}}^{\uparrow\text{List}}\bef\text{reduceList}=\text{id}^{:\text{List}^{M}\rightarrow\text{List}^{M}}\quad.\label{eq:identity-for-reduceList-and-pure} \end{equation} We use the definition of \lstinline!reduceList[M]! and set \lstinline!M! -to the monoid type \lstinline!List[A]!: +to the monoidal type \lstinline!List[A]!: \begin{align*} & \text{pu}_{\text{List}}^{\uparrow\text{List}}\bef\text{reduceList}^{\text{List}^{A}}=\,\begin{array}{|c||cc|} & \bbnum 1 & \text{List}^{M}\times\text{List}^{\text{List}^{M}}\\ @@ -2578,9 +2578,9 @@ \subsection{The missing laws of \texttt{foldMap} and \texttt{reduce}} \] This law describes the property that \lstinline!toList[A]! works in the same way for all types \lstinline!A!. Certainly, programmers -expect this property to hold. But the main intent of \lstinline!toList! +expect this property to hold. But the main purpose of \lstinline!toList! is to extract values of type $A$ out of $L^{A}$ and store them in -a list. Naturality laws do not express this intent. +a list. Naturality laws do not express that purpose. More precisely, programmers expect that for any \textsf{``}container\textsf{''} value \lstinline!p! of type \lstinline!L[A]!, the value \lstinline!toList(p)! @@ -2597,25 +2597,26 @@ \subsection{The missing laws of \texttt{foldMap} and \texttt{reduce}} stored in \lstinline!p!. Unfortunately, it seems to be impossible to express this property in the form of an equation satisfied by \lstinline!toList!. Often, a loss of information is prevented by imposing an identity -law. Can we formulate an identity law for \lstinline!toList!? Such -a law could state that \lstinline!toList(p)! should extract some -known values contained in \lstinline!p!. If $L$ were a pointed functor\index{pointed functor} +law. Can we formulate an identity law for \lstinline!toList! stating +that \lstinline!toList(p)! should extract some known values contained +in \lstinline!p!? If $L$ were a pointed functor\index{pointed functor} (see Section~\ref{subsec:Pointed-functors-motivation-equivalence}), we could use its \lstinline!pure! method to inject a known value \lstinline!x! into the container \lstinline!p! and then require \lstinline!toList! to extract the same value \lstinline!x!. But the definition of \lstinline!toList! does not require the functor -$L$ to be pointed. So, in general we cannot inject values into $L^{A}$ -in a way that is guaranteed to preserve information. This prevents -us from formulating an identity law for \lstinline!toList!. +$L$ to be pointed. So, in general we cannot inject values into a +data structure of type $L^{A}$ in a way that is guaranteed to preserve +information. This prevents us from formulating an identity law of +\lstinline!toList!. Another approach to finding laws is to look for type signatures in the form of a \textsf{``}lifting\textsf{''} that transforms a function of one type into a function of another type. We have summarized the methods of several standard typeclasses as \textsf{``}liftings\textsf{''} in Section~\ref{subsec:The-pattern-of-functorial-typeclasses}. The laws of a \textsf{``}lifting\textsf{''} are the functor laws (identity and composition). -Could we apply this approach to the folding operations? The type signature -of the \lstinline!foldMap! function is:\index{foldMap function@\texttt{foldMap} function} +Could we apply that approach to derive laws for folding operations? +The type signature of the \lstinline!foldMap! function is:\index{foldMap function@\texttt{foldMap} function} \[ \text{foldMap}:\left(A\rightarrow M\right)\rightarrow L^{A}\rightarrow M\quad. \] @@ -2624,32 +2625,30 @@ \subsection{The missing laws of \texttt{foldMap} and \texttt{reduce}} to impose the functor laws on those liftings. For instance, the functor composition law involves applying \lstinline!foldMap! to a composition of arguments. But the type signature $A\rightarrow M$ (where $M$ -is a fixed type) does not support composition since we cannot compose -$A\rightarrow M$ with $B\rightarrow M$. - -So, the \textsf{``}lifting\textsf{''} approach also fails to yield a suitable law -for folding operations. However, we will see below that lifting-like -laws may be imposed on the \lstinline!traverse! operation, whose -type signature is a generalization of that of \lstinline!foldMap!. -We will show that the laws of \lstinline!traverse! forbid information-losing -implementations. Since \lstinline!foldMap! can be derived from \lstinline!traverse!, -we may take the position that the only acceptable implementations -of \lstinline!foldMap! are those derived from a lawful \lstinline!traverse! -function. +is a fixed type) does not support composition: we cannot compose $A\rightarrow M$ +with $B\rightarrow M$. + +So, the \textsf{``}lifting\textsf{''} approach fails to give us laws for folding operations. +However, we will see below that lifting-like laws may be imposed on +\lstinline!traverse!, whose type signature is a generalization of +that of \lstinline!foldMap!. We will show that the laws of \lstinline!traverse! +forbid information-losing implementations. Since \lstinline!foldMap! +can be derived from \lstinline!traverse!, we may take the position +that the only acceptable implementations of \lstinline!foldMap! are +those derived from a lawful \lstinline!traverse! function. \subsection{All polynomial functors are foldable} It turns out that the lack of available laws does not prevent us from finding correct implementations of folding operations. The reason is that folding operations are available only for polynomial functors, -such as \lstinline!Option[A]! and \lstinline!List[A]! (note that -\lstinline!List! is a \emph{recursively} \emph{defined} polynomial -functor\index{polynomial functor!recursive}). Those functors represent -data structures that store a finite number of values of type \lstinline!A!. -It is clear what it means to extract \textsf{``}all values of type $A$\textsf{''} -from such data structures. To show that all polynomial functors are -foldable, we will define \lstinline!toList! inductively via structural -analysis of functor types. The definition will ensure that \lstinline!toList! +such as \lstinline!Option! and \lstinline!List! (note that \lstinline!List! +is a \emph{recursively} \emph{defined} polynomial functor\index{polynomial functor!recursive}). +Those functors represent data structures that store a finite number +of values. It is clear what it means to \textsf{``}extract all values\textsf{''} from +such data structures. To show that all polynomial functors are foldable, +we will define \lstinline!toList! inductively via structural analysis +of functor types. The definition will ensure that \lstinline!toList! correctly extracts the values stored by a given data structure. Let us first show that non-polynomial functors are \emph{not} foldable. @@ -2685,7 +2684,7 @@ \subsection{All polynomial functors are foldable} Other folding operations (\lstinline!foldLeft!, \lstinline!foldMap!, \lstinline!reduce!) can be derived from \lstinline!toList! (Section~\ref{subsec:Equivalence-of-foldLeft,foldMap,reduce,and-toList}). -Polynomial functors are built via the five standard type constructions +Polynomial functors are built via five standard type constructions (Table~\ref{subsec:Type-notation-and-standard-type-constructions} without the function types). Defining \lstinline!toList! for these constructions will provide an implementation of folding operations @@ -2701,7 +2700,7 @@ \subsection{All polynomial functors are foldable} \paragraph{Type parameter} The identity functor $L^{A}\triangleq A$ is viewed as a container -holding a single value of type $A$. So, we simply define $\text{toList}\triangleq\text{id}$. +holding a single value of type $A$. So, we simply define \lstinline!toList(x) = List(x)!. \paragraph{Products} @@ -2712,7 +2711,7 @@ \subsection{All polynomial functors are foldable} def toList_M[A]: ((K[A], L[A])) => List[A] = { case (p, q) => toList_K(p) ++ toList_L(q) } \end{lstlisting} \[ -\text{toList}_{M}:K^{A}\times L^{A}\rightarrow\text{List}^{A}\quad,\quad\quad\text{toList}_{M}\triangleq p^{:K^{A}}\times q^{:L^{A}}\rightarrow\text{toList}_{K}(p)\,\pplus\,\text{toList}_{L}(q)\quad. +\text{toList}_{M}:K^{A}\times L^{A}\rightarrow\text{List}^{A}\quad,\quad\text{toList}_{M}\triangleq p^{:K^{A}}\times q^{:L^{A}}\rightarrow\text{toList}_{K}(p)\,\pplus\,\text{toList}_{L}(q)\quad. \] This implementation contains an arbitrary choice: the values stored in $p$ are listed before the values stored in $q$. We could equally @@ -2747,9 +2746,9 @@ \subsection{All polynomial functors are foldable} We need to implement \lstinline!toList! for a functor $L$ defined recursively by $L^{A}\triangleq S^{A,L^{A}}$, where the recursion -scheme $S^{\bullet,\bullet}$ is a bifunctor that is itself foldable. -A bifunctor $S^{\bullet,\bullet}$ is \textbf{foldable}\index{foldable bifunctor} -if there is a \lstinline!toList! function with this type signature: +scheme $S$ is a bifunctor that is itself foldable. A bifunctor $S$ +is \textbf{foldable}\index{foldable bifunctor} if there is a \lstinline!toList! +function with this type signature: \[ \text{toList}_{S}:S^{A,A}\rightarrow\text{List}^{A}\quad. \] @@ -2763,11 +2762,10 @@ \subsection{All polynomial functors are foldable} \] This code first converts values of type $S^{A,L^{A}}$ into type $S^{A,\text{List}^{A}}$ by lifting \lstinline!toList! recursively to the second type parameter -of $S^{\bullet,\bullet}$. We then convert $S^{A,\text{List}^{A}}$ -into $S^{\text{List}^{A},\text{List}^{A}}$ by creating one-element -lists via $\text{pu}_{\text{List}}$. After that, all values of type -$A$ are extracted from $S^{\text{List}^{A},\text{List}^{A}}$ using -$\text{toList}_{S}$, which returns a nested $\text{List}^{\text{List}^{A}}$. +of $S$. We then convert $S^{A,\text{List}^{A}}$ into $S^{\text{List}^{A},\text{List}^{A}}$ +by creating one-element lists via $\text{pu}_{\text{List}}$. After +that, all values of type $A$ are extracted from $S^{\text{List}^{A},\text{List}^{A}}$ +using $\text{toList}_{S}$, which returns a nested $\text{List}^{\text{List}^{A}}$. The final \lstinline!flatten! operation reduces that to a $\text{List}^{A}$. This implements \lstinline!toList! for the five type constructions @@ -2792,13 +2790,13 @@ \subsection{Equivalence of \texttt{traverse} and \texttt{sequence}} \end{lstlisting} The function \lstinline!Future.sequence! is limited to sequence-like -data types such as \lstinline!L[A] = List[A]!. To generalize the -\lstinline!sequence! operation to other data types, we replace a -sequence-like type by an arbitrary traversable functor \lstinline!L[A]!. -We also replace the \lstinline!Future! type constructor by an arbitrary -applicative functor (since the \lstinline!traverse! operation accepts -one). It turns out that a function called \lstinline!sequence! can -then be defined via the \lstinline!traverse! operation and vice versa: +data types such as \lstinline!List!. To generalize the \lstinline!sequence! +operation to other data types, we replace a sequence-like type by +an arbitrary traversable functor \lstinline!L!. We also replace the +\lstinline!Future! type constructor by an arbitrary applicative functor +(since the \lstinline!traverse! operation accepts one). It turns +out that a function called \lstinline!sequence! can then be defined +via the \lstinline!traverse! operation and vice versa: \begin{lstlisting} def sequence[A, L[_]: Traversable, F[_]: Applicative : Functor]: L[F[A]] => F[L[A]] = _.traverse(id) def traverse[A, B, L[_]: Traversable, F[_]: Applicative : Functor](la: L[A])(f: A => F[B]): F[L[B]] = sequence(la.map(f)) @@ -2868,14 +2866,13 @@ \subsection{Laws of \texttt{traverse}} } \] -We also need a naturality law with respect to the parameter \lstinline!F!, +We also need a naturality law with respect to the parameter $F$, which is a type constructor required to be an applicative functor. This law expresses the requirement that \lstinline!traverse! may -not inspect the type of \lstinline!F! directly and make decisions -based on that type. The code of \lstinline!traverse! may only use -$F$\textsf{'}s applicative methods (\lstinline!wu! and \lstinline!zip!). -To formulate this requirement as an \textbf{applicative naturality -law}\index{applicative naturality law!of traverse@of \texttt{traverse}}, +not inspect the type of $F$ directly and make decisions based on +that type. The code of \lstinline!traverse! may only use $F$\textsf{'}s applicative +methods (\lstinline!wu! and \lstinline!zip!). To formulate this +requirement as an \textbf{applicative naturality law}\index{applicative naturality law!of traverse@of \texttt{traverse}}, we write an equation similar to the second naturality law, except that the function $f$ will now map the applicative functor $F$ to another \emph{arbitrary} applicative functor $G$: @@ -2905,7 +2902,8 @@ \subsection{Laws of \texttt{traverse}} Natural transformations $f:F^{X}\rightarrow G^{X}$ that preserve the applicative methods of $F$ and $G$ are called \index{applicative morphism}\textbf{applicative -morphisms} (compare to monoid morphisms defined in Section~\ref{subsec:Equivalence-of-foldLeft,foldMap,reduce,and-toList} +morphisms} (first defined in Section~\ref{subsec:Applicative-morphisms}; +compare to monoid morphisms defined in Section~\ref{subsec:Equivalence-of-foldLeft,foldMap,reduce,and-toList} and monad morphisms defined in Section~\ref{subsec:Monads-in-category-theory-monad-morphisms}). The laws of applicative morphisms are:\index{identity laws!of applicative morphisms}\index{composition law!of applicative morphisms} \begin{align} @@ -2958,7 +2956,7 @@ \subsubsection{Example \label{subsec:Example-some-applicative-morphisms}\ref{sub \subsubsection{Example \label{subsec:Example-naturality-law-of-traverse}\ref{subsec:Example-naturality-law-of-traverse}} Verify the naturality law of \lstinline!traverse! for $L^{A}\triangleq A\times A$ -using an applicative morphism between applicative functors $G^{A}\triangleq\bbnum 1+A$ +using an applicative morphism between $G^{A}\triangleq\bbnum 1+A$ and $H^{A}\triangleq\bbnum 1+A\times A$. \subparagraph{Solution} @@ -3004,7 +3002,7 @@ \subsubsection{Example \label{subsec:Example-naturality-law-of-traverse}\ref{sub in order to obey the identity laws of applicative morphisms. Next, we verify that $g$ is an applicative morphism. To check the -identity law of $g$: +identity law: \[ \text{wu}_{G}\triangleright g=\,\begin{array}{|cc|} \bbnum 0 & 1\end{array}\,\triangleright\,\begin{array}{|c||cc|} @@ -3219,9 +3217,9 @@ \subsubsection{Statement \label{subsec:Statement-identity-law-traverse-simplifie \] \noindent The second \lstinline!traverse! operation needs to be lifted -to \lstinline!F! for the types to match. The result (of type $F^{G^{L^{C}}}$) -looks like a \lstinline!traverse! operation with respect to the functor -$F\circ G$. By Statement~\ref{subsec:Statement-applicative-composition}, +to $F$ for the types to match. The result (of type $F^{G^{L^{C}}}$) +looks like a result of a \lstinline!traverse! operation with respect +to the functor $F\circ G$. By Statement~\ref{subsec:Statement-applicative-composition}, the functor $F\circ G$ is applicative. So, we may apply a single \lstinline!traverse! operation using that functor and obtain: \noindent \begin{center} @@ -3313,9 +3311,9 @@ \subsubsection{Statement \label{subsec:Statement-identity-law-traverse-simplifie F((fa.run zip fb.run).map { case (sa, sb) => sa zip sb }) } -val f: Int => S[Int] = i => S(j => (i + j, i + j)) -val g: Int => S[Int] = f -val ff: Int => F[Int] = i => F(f(i).map(g)) +val f1: Int => S[Int] = i => S(j => (i + j, i + j)) +val f2: Int => S[Int] = f1 +val f1f2: Int => F[Int] = i => F(f1(i).map(f2)) val l: L[Int] = (1, 0) val result1: F[L[Int]] = trav2[Int, Int, F](f1f2)(l) @@ -3630,13 +3628,13 @@ \subsection{All polynomial functors are traversable\label{subsec:All-polynomial- \paragraph{Recursive types} Here $L^{A}\triangleq S^{A,L^{A}}$, with a recursion scheme given -by a bifunctor\index{bifunctor} $S^{A,R}$. In order to obtain the -traversable property of $L$, we will need to assume that $S$ is -traversable with respect to both its type parameters in a special -way, which we call \textsf{``}bitraversable\textsf{''}. It is not enough if $S^{A,R}$ -is traversable with respect to each type parameter separately. +by a bifunctor\index{bifunctor} $S$. In order to obtain the traversable +property of $L$, we will need to assume that $S$ is traversable +with respect to both its type parameters in a special way, which we +call \textsf{``}bitraversable\textsf{''}. It is not sufficient if $S$ is traversable +with respect to each type parameter separately. -A bifunctor $S^{A,B}$ is called \textbf{bitraversable}\index{bitraversable bifunctor} +A bifunctor $S$ is called \textbf{bitraversable}\index{bitraversable bifunctor} if it has a \lstinline!bisequence! method (denoted by $\text{seq2}_{S}$): \[ \text{seq2}_{S}^{F,A,B}:S^{F^{A},F^{B}}\rightarrow F^{S^{A,B}}\quad, @@ -3652,9 +3650,9 @@ \subsection{All polynomial functors are traversable\label{subsec:All-polynomial- Section~\ref{subsec:All-polynomial-bifunctors-are-bitraversable} will show that all polynomial bifunctors are bitraversable. So, we -are free to use any recursion scheme $S^{A,R}$ as long as $S$ is -a polynomial bifunctor. For now, we assume that a lawful $\text{seq2}_{S}$ -is available and define $\text{seq}_{L}$ as: +are free to use any polynomial bifunctor $S$ as a recursion scheme. +For now, we assume that a lawful $\text{seq2}_{S}$ is available and +define $\text{seq}_{L}$ as: \[ \text{seq}_{L}^{F,A}:S^{F^{A},L^{F^{A}}}\rightarrow F^{S^{A,L^{A}}}\quad,\quad\quad\text{seq}_{L}^{F,A}\triangleq\big(\overline{\text{seq}}_{L}^{F,A}\big)^{\uparrow S^{F^{A},\bullet}}\bef\text{seq2}_{S}^{F,A,L^{A}}\quad. \] @@ -4041,7 +4039,7 @@ \subsubsection{Exercise \label{subsec:Exercise-traversables-laws-1-1}\ref{subsec \subsubsection{Exercise \label{subsec:Exercise-traversables-5}\ref{subsec:Exercise-traversables-5}} Prove that $L^{A}\triangleq M^{N^{A}}$ is a lawful traversable if -$M^{\bullet}$ and $N^{\bullet}$ are traversable functors. +$M$ and $N$ are traversable functors. \subsubsection{Exercise \label{subsec:Exercise-traversables-4}\ref{subsec:Exercise-traversables-4}} @@ -4076,7 +4074,7 @@ \subsubsection{Exercise \label{subsec:Exercise-traversables-laws-2-1}\ref{subsec \subsubsection{Exercise \label{subsec:Exercise-traversables-10}\ref{subsec:Exercise-traversables-10}} -Given a \emph{monad} $M^{\bullet}$ and a monoid morphism $\phi:R\rightarrow S$ +Given a \emph{monad} $M$ and a monoid morphism $\phi:R\rightarrow S$ between some monoid types $R$ and $S$, prove that $\phi^{\uparrow M}:M^{R}\rightarrow M^{S}$ is also a monoid morphism. (The types $M^{R}$ and $M^{S}$ are monoids due to Exercise~\ref{subsec:Exercise-monad-of-monoid-is-monoid}). @@ -4173,9 +4171,9 @@ \subsection{The missing laws of \texttt{traverse} and \texttt{zipWithIndex}\labe Sections~\ref{subsec:Decorating-a-tree1}\textendash \ref{subsec:Decorating-a-tree-breadth-first-traversal} defined the method \lstinline!zipWithIndex! for certain choices of traversals over binary trees. How can we define \lstinline!zipWithIndex! -(denoted $\text{zwi}_{L}$ for brevity) for any traversable functor -$L$? We use $L$\textsf{'}s \lstinline!traverse! method ($\text{trav}_{L}^{F,A,B}$) -and chose the applicative functor $F$ as the \lstinline!State! monad +(denoted $\text{zwi}_{L}$) for any traversable functor $L$? We use +$L$\textsf{'}s \lstinline!traverse! method ($\text{trav}_{L}^{F,A,B}$) and +chose the applicative functor $F$ as the \lstinline!State! monad with the internal state of type \lstinline!Int!: \[ F^{A}\triangleq\text{State}^{\text{Int},A}\triangleq\text{Int}\rightarrow A\times\text{Int}\quad. @@ -4536,7 +4534,8 @@ \subsubsection{Statement \label{subsec:Statement-Bird-representation-theorem-for \subsubsection{Statement \label{subsec:Statement-polynomial-functors-Int-A}\ref{subsec:Statement-polynomial-functors-Int-A}} -For any traversable functor $L$, any type $A$, and any value $p^{:L^{A}}$: +For any traversable functor $L$, any non-void type $A$, and any +value $p^{:L^{A}}$: \textbf{(a)} There is a function $t_{p}:\text{Int}\rightarrow A$ such that \lstinline!zipWithIndex! satisfies: @@ -4562,7 +4561,7 @@ \subsubsection{Statement \label{subsec:Statement-polynomial-functors-Int-A}\ref{ p\triangleright\text{zwi}_{L}\triangleright\pi_{2}^{\uparrow L}=\big(1^{:\text{Int}}\times2^{:\text{Int}}\times...\times n^{:\text{Int}}\big)\triangleright\text{make}^{\text{Int}}\quad. \] Now we define $t_{p}$ as a (partial) function of type $\text{Int}\rightarrow A$ -such that +such that: \[ t_{p}(i)\triangleq a_{i}\quad,\quad i=1,2,...,n\quad. \] @@ -4597,15 +4596,15 @@ \subsection{Traversable contrafunctors and profunctors are not useful} may be traversable. To answer that question, we will try implementing a lawful \lstinline!sequence! function for contrafunctors and profunctors. -Suppose that $L^{A}$ is a contrafunctor. A \lstinline!sequence! -function (denoted by $\text{seq}_{L}$) with the type signature: +Suppose that $L$ is a contrafunctor. A \lstinline!sequence! function +(denoted by $\text{seq}_{L}$) with the type signature: \[ \text{seq}_{L}^{F,A}:L^{F^{A}}\rightarrow F^{L^{A}}\quad, \] is required for $L$ to be traversable. That function must obey the applicative naturality law, which ensures that $\text{seq}_{L}^{F,A}$ works in the same way for any applicative functor $F$. We note that -$\text{seq}_{L}$ is covariant in $F$ because $L^{\bullet}$ is contravariant. +$\text{seq}_{L}^{F,A}$ is covariant in $F$ because $L$ is contravariant. Since $\text{pu}_{F}:\text{Id}^{A}\rightarrow F^{A}$ is an applicative morphism (Example~\ref{subsec:Example-pure-is-applicative-morphism}), we may use this morphism to write the applicative naturality law: @@ -4637,8 +4636,8 @@ \subsection{Traversable contrafunctors and profunctors are not useful} \text{seq}_{L}^{F,A}:(F^{A}\rightarrow Z)\rightarrow F^{A\rightarrow Z}\quad,\quad\quad\text{seq}_{L}^{F,A}(f^{:F^{A}\rightarrow Z})=\text{pu}_{F}(a^{:A}\rightarrow f(\text{pu}_{F}(a)))\quad. \] The function $f$ will be never applied to nontrivial $F$-effects. -So, the function $\text{seq}_{L}$ will never obtain any information -that $f$ would return when applied to nontrivial $F$-effects. +So, $\text{seq}_{L}(f)$ could never use any information that $f$ +would return when applied to nontrivial $F$-effects. We see that contrafunctors are traversable in a way that is not practically useful. @@ -4665,17 +4664,16 @@ \subsection{Traversals for nested recursive types} (a perfect-shaped tree, Sections~\ref{subsec:Perfect-shaped-trees} and~\ref{subsec:Example-traversal-perfect-shaped-tree}) that cannot be defined in this way. The reason is that the recursive type equation -for a perfect-shaped binary tree\index{perfect-shaped tree} $\text{PT}^{A}$ +for a perfect-shaped binary tree\index{perfect-shaped tree} $\text{PT}$ is: \begin{equation} \text{PT}^{A}\triangleq A+\text{PT}^{A\times A}\quad.\label{eq:perfect-shaped-binary-tree-type-equation} \end{equation} This type equation is not of the form $\text{PT}^{A}\triangleq S^{A,\text{PT}^{A}}$ -because the recursive use of $\text{PT}^{\bullet}$ contains a nontrivial -type expression ($A\times A$) instead of just $A$. To express the -type equation~(\ref{eq:perfect-shaped-binary-tree-type-equation}) -via a recursion scheme, we may introduce an additional functor $P$ -and write: +because the recursive use of $\text{PT}$ contains a nontrivial type +expression ($A\times A$) instead of just $A$. To express the type +equation~(\ref{eq:perfect-shaped-binary-tree-type-equation}) via +a recursion scheme, we introduce an additional functor $P$ and write: \[ \text{PT}^{A}\triangleq S^{A,\text{PT}^{P^{A}}}\quad,\quad\quad P^{A}\triangleq A\times A\quad. \] @@ -4699,9 +4697,8 @@ \subsection{Traversals for nested recursive types} \subsubsection{Statement \label{subsec:Statement-nested-recursive-type-traversable}\ref{subsec:Statement-nested-recursive-type-traversable}} -Given a bitraversable bifunctor $S^{\bullet,\bullet}$ and a traversable -functor $P^{\bullet}$, define a nested recursive type constructor -$L^{\bullet}$ by: +Given a bitraversable bifunctor $S$ and a traversable functor $P$, +define a nested recursive type constructor $L$ by: \[ L^{A}\triangleq S^{A,L^{P^{A}}}\quad. \] @@ -4749,7 +4746,7 @@ \subsubsection{Statement \label{subsec:Statement-nested-recursive-type-traversab of lifted functions: \begin{align*} {\color{greenunder}\text{left-hand side}:}\quad & \big(\overline{\text{seq}}_{L}^{F,P^{G^{A}}}\big)^{\uparrow S}\bef\gunderline{\text{seq2}_{S}^{F,G^{A},L^{P^{G^{A}}}}\bef\big((\text{seq}_{P}^{G,A})^{\uparrow L}\bef\overline{\text{seq}}_{L}^{G,P^{A}}\big)^{\uparrow S\uparrow F}}\\ -{\color{greenunder}\text{naturality law of }\text{seq2}_{S}:}\quad & =\gunderline{\big(\overline{\text{seq}}_{L}^{F,P^{G^{A}}}\big)^{\uparrow S}\bef\big((\text{seq}_{P}^{G,A})^{\uparrow L}\bef\overline{\text{seq}}_{L}^{G,P^{A}}\big)^{\uparrow F\uparrow S}}\bef\text{seq2}_{S}^{F,G^{A},G^{L^{P^{A}}}}\\ +{\color{greenunder}\text{naturality of }\text{seq2}_{S}:}\quad & =\gunderline{\big(\overline{\text{seq}}_{L}^{F,P^{G^{A}}}\big)^{\uparrow S}\bef\big((\text{seq}_{P}^{G,A})^{\uparrow L}\bef\overline{\text{seq}}_{L}^{G,P^{A}}\big)^{\uparrow F\uparrow S}}\bef\text{seq2}_{S}^{F,G^{A},G^{L^{P^{A}}}}\\ {\color{greenunder}\text{composition under }^{\uparrow S}:}\quad & =\big(\gunderline{\overline{\text{seq}}_{L}^{F,P^{G^{A}}}\bef(\text{seq}_{P}^{G,A})^{\uparrow L\uparrow F}}\big)^{\uparrow S}\bef\big(\overline{\text{seq}}_{L}^{G,P^{A}}\big)^{\uparrow F\uparrow S}\bef\text{seq2}_{S}^{F,G^{A},G^{L^{P^{A}}}}\quad. \end{align*} It remains to show that: @@ -4760,9 +4757,9 @@ \subsubsection{Statement \label{subsec:Statement-nested-recursive-type-traversab under the lifting $(\dots)^{\uparrow S}$. $\square$ As an advanced example of a nested traversable functor, we will derive -a type for square-shaped matrices\footnote{This and other advanced examples of nested recursive types are explained -in the paper \textsf{``}Manufacturing datatypes\textsf{''} (1999) by R.~Hinze\index{Ralf Hinze}, -see \texttt{\href{https://www.cs.ox.ac.uk/ralf.hinze/publications/WAAAPL99a.ps.gz}{https://www.cs.ox.ac.uk/ralf.hinze/publications/WAAAPL99a.ps.gz}}} with elements of type $A$. For motivation, recall how Example~\ref{subsec:Example-matrix-products} +a type for square-shaped matrices\footnote{This and other advanced examples of designing and using nested recursive +types are explained in the paper \textsf{``}Manufacturing datatypes\textsf{''} (1999) +by R.~Hinze\index{Ralf Hinze}, see \texttt{\href{https://www.cs.ox.ac.uk/ralf.hinze/publications/WAAAPL99a.ps.gz}{https://www.cs.ox.ac.uk/ralf.hinze/publications/WAAAPL99a.ps.gz}}} with elements of type $A$. For motivation, recall how Example~\ref{subsec:Example-matrix-products} encoded square matrices via nested lists of type \lstinline!List[List[A]]!. However, a value of that type is not guaranteed to represent a matrix of a consistent shape. We would like to define a type \lstinline!Sq[A]! @@ -4786,12 +4783,12 @@ \subsubsection{Statement \label{subsec:Statement-nested-recursive-type-traversab The type \lstinline!Sq[A]! is equivalent to an infinite disjunction of types representing square matrices of every size ($1\times1$, -$2\times2$, and so on). To define an infinite disjunctive type in -a program of finite size, we need to use recursion at type level. -In a mathematical sense, this recursion will be induction on the size -of the matrix. So, let us introduce the size of the matrix as an extra -\emph{type parameter} $N$. It will be convenient to define \lstinline!SqSize[N, A]! -as the type of matrices of size \emph{at least} $N$. We intend $N$ +$2\times2$, and so on). To define an infinite disjunctive type, we +normally use recursion at type level. In a mathematical sense, this +recursion will be induction on the size of the matrix. So, let us +introduce the size of the matrix as an extra \emph{type parameter} +$N$. It will be convenient to define \lstinline!SqSize[N, A]! as +the type of matrices of size \emph{at least} $N$. We intend $N$ to be equivalent to one of the types $\bbnum 1$, $\bbnum 2$, etc. The base case ($N=\bbnum 1$) and the inductive step (from $N$ to @@ -4844,7 +4841,7 @@ \subsubsection{Statement \label{subsec:Statement-nested-recursive-type-traversab of type $F^{A\times A\times...\times A}$, which we will then need to convert to the type $F^{N\times N\rightarrow A}$. The only way of performing these computations is by enumerating all possible values -of type $N$.Note that the type \lstinline!Sq! sets the type parameter +of type $N$. Note that the type \lstinline!Sq! sets the type parameter \lstinline!N! in \lstinline!SqSize[N, A]! as \lstinline!N = Unit!. This forces the type parameters \lstinline!N! in all of the \lstinline!Next()! constructors to be \lstinline!Unit! wrapped in a number of \lstinline!Option! @@ -4998,7 +4995,7 @@ \subsubsection{Statement \label{subsec:Statement-nested-recursive-type-traversab res1: List[List[List[Int]]] = List(List(List(0, 1), List(2, 3)), List(List(10, 11), List(12, 13)), List(List(100, 101), List(102, 103))) \end{lstlisting} -The type \lstinline!Sq[A]! assures (at compile time) that all matrices +The type \lstinline!Sq! assures (at compile time) that all matrices have consistent shapes. However, it is hard to use because of complicated type parameters and deeply nested type constructors. To achieve good performance, square matrices and other tensor-like quantities are diff --git a/sofp-src/tex/sofp-typeclasses.tex b/sofp-src/tex/sofp-typeclasses.tex index 42cbad395..7652e7769 100644 --- a/sofp-src/tex/sofp-typeclasses.tex +++ b/sofp-src/tex/sofp-typeclasses.tex @@ -42,13 +42,12 @@ \subsection{Constraining type parameters} \begin{lstlisting} def inject[F[_], A, B](a: A, f: F[B]): F[(A, B)] = f.map(b => (a, b)) // Must have `f.map`. \end{lstlisting} -This function requires the type constructor \lstinline!F[_]! to have -a \lstinline!map! method, i.e., to be a functor. We can implement -\lstinline!inject! only if we constrain the parameter \lstinline!F! -to be a functor. +This function requires the type constructor \lstinline!F! to have +a \lstinline!map! method. We can implement \lstinline!inject! only +if we constrain the parameter \lstinline!F! to be a functor. -What would that constraint be like? Consider an ordinary function -with no type parameters, e.g.: +What would that constraint look like? For motivation, consider an +ordinary function with no type parameters, e.g.: \begin{lstlisting} def f(x: Int): Int = x + 1 \end{lstlisting} @@ -56,34 +55,38 @@ \subsection{Constraining type parameters} of the argument \lstinline!x! to be integer. It is a type error to apply \lstinline!f! to a non-integer argument. -Using a similar syntax for \emph{type} \emph{parameters}, we write -the type signatures for \lstinline!avg! and \lstinline!inject! as: +Scala supports a similar syntax for \emph{type} \emph{parameters}. +We may write the type signatures for \lstinline!avg! and \lstinline!inject! +as: \begin{lstlisting} def avg[T: Fractional](s: Seq[T]): T def inject[F[_]: Functor, A, B](a: A, f: F[B]): F[(A, B)] \end{lstlisting} -Scala uses the syntax \lstinline![T: Fractional]! to constrain the -type parameter \lstinline!T! to \textsf{``}fractional numeric\textsf{''} types. Similarly, -\lstinline![F[_]: Functor]! requires the type constructor \lstinline!F[_]! -to be a functor. Applying \lstinline!avg! or \lstinline!inject! -to types that do not obey those constraints will be a type error detected -at compile time. +The syntax \lstinline![T: Fractional]! constrains the type parameter +\lstinline!T! to \textsf{``}fractional numeric\textsf{''} types. The syntax \lstinline![F[_]: Functor]! +means that the type constructor \lstinline!F! must be a functor. +Applying \lstinline!avg! or \lstinline!inject! to types that do +not obey those constraints will be a type error detected at compile +time. (Here, \lstinline!Fractional! and \lstinline!Functor! are +custom types that need to be implemented in a special way, as we will +show.) In these examples, we are restricting a type parameter to a subset of possible types, because only types from that subset have certain properties that we need. A subset of types, together with the required -properties that those types must satisfy, is called a \textbf{typeclass}\index{typeclass}. +properties those types must satisfy, is called a \textbf{typeclass}\index{typeclass}. The syntax \lstinline![T: Fractional]! is a \textbf{typeclass constraint}\index{typeclass!constraint} that forces the type \lstinline!T! to belong to the typeclass \lstinline!Fractional!. This chapter focuses on defining and using typeclasses and on understanding their properties. We will see in detail how to implement typeclasses -in Scala and use the syntax such as \lstinline![T: Fractional]!. +in Scala and to enable the syntax \lstinline![T: Fractional]! and +\lstinline![F[_]: Functor]!. \subsection{Functions of types and values} The similarity between the type parameter \lstinline!T! and the argument -\lstinline!s! is clear in this type signature, +\lstinline!s! is clear in this type signature: \begin{lstlisting} def avg[T: Fractional](s: Seq[T]): T \end{lstlisting} @@ -96,13 +99,13 @@ \subsection{Functions of types and values} \} \end{comment} -We can view \lstinline!avg! as a function that takes \emph{two} parameters +We may view \lstinline!avg! as a function that takes \emph{two} parameters (a type \lstinline!T! and a value \lstinline!s!) and returns a value. -We can also view \lstinline!avg! as a function from a \emph{type} +We may also view \lstinline!avg! as a function from a \emph{type} \lstinline!T! to a \emph{value} of type \lstinline!Seq[T] => T!. -We may call functions of this kind \textbf{type-to-value}\index{type-to-value function} -functions (TVF). The syntax for TVFs supported in Scala 3 shows this -more clearly: +We call functions of this kind \textbf{type-to-value}\index{type-to-value function} +\textbf{functions} (TVF). The syntax for TVFs supported in Scala 3 +shows this more clearly: \begin{lstlisting} val avg: [T] => Seq[T] => T = ... // Scala 3 only. \end{lstlisting} @@ -113,10 +116,9 @@ \subsection{Functions of types and values} // Better to define as: def avg[T: Fractional](s: Seq[T]): T \end{lstlisting} -A type constructor such as \lstinline!Seq[_]! can be viewed as a -\index{type-to-type function}\textbf{type-to-type} function (TTF) -because it can take any type \lstinline!T! and return a new type -\lstinline!Seq[T]!. +A type constructor such as \lstinline!Seq! is a \index{type-to-type function}\textbf{type-to-type} +\textbf{function} (TTF) because it takes any type \lstinline!T! and +returns a new type \lstinline!Seq[T]!. Functions can map from values or from types and to values or to types, as this table shows: @@ -126,7 +128,7 @@ \subsection{Functions of types and values} \textbf{\small{}functions...} & \textbf{\small{}from value} & \textbf{\small{}from type}\tabularnewline \hline \hline -\textbf{\small{}to value} & {\small{}(VVF)~ }\lstinline!def f(x:Int):Int! & {\small{}(TVF)~ }\lstinline!def pure[A]: A => List[A]!\tabularnewline +\textbf{\small{}to value} & {\small{}(VVF)~ }\lstinline!def f(x: Int): Int! & {\small{}(TVF)~ }\lstinline!def pure[A]: A => List[A]!\tabularnewline \hline \textbf{\small{}to type} & {\small{}(VTF)~ dependent type} & {\small{}(TTF)~ }\lstinline!type MyData[A] = Either[Int, A]!\tabularnewline \hline @@ -135,7 +137,7 @@ \subsection{Functions of types and values} We have already seen examples of VVFs, TVFs, and TTFs. Value-to-type functions (VTFs) are known as \textbf{dependent} \textbf{types}\index{dependent type}, -meaning \textsf{``}types that depend on values\textsf{''}. An example in Scala: +meaning \textsf{``}types that depend on values\textsf{''}. An example in Scala is: \begin{lstlisting} val x = new { type T = Int } val y: x.T = 123 @@ -145,8 +147,8 @@ \subsection{Functions of types and values} \lstinline!x! defined in this code, the expression \lstinline!x.T! gives the type \lstinline!Int!. -We will not consider dependent types (VTFs) in this chapter because -typeclasses only require a combination of a TTF and a TVF. +We will not consider dependent types (VTFs) in this book. Typeclasses +only require a combination of a TTF and a TVF. \subsection{Partial functions of types and values} @@ -158,7 +160,7 @@ \subsection{Partial functions of types and values} functions}\index{partial type-to-value function} (PTVFs), to distinguish them from partial value-to-value functions (PVVFs) we saw before. -In some situations, partial functions are safe to use. For instance, +In most situations, partial functions are unsafe to use. For instance, the following partial function \lstinline!p!: \begin{lstlisting} def p: Either[Int, String] => Int = { case Left(x) => x - 1 } @@ -200,9 +202,9 @@ \subsection{Partial functions of types and values} that the first element exists: \begin{lstlisting} val xs: NonEmptyList[Int] = ... -val h = xs.head // .head is a total function for a NonEmptyList. +val h = xs.head // _.head is a total function for a NonEmptyList. \end{lstlisting} -In these cases, we achieve safety by making types more strictly constrained. +In these cases, we achieve safety by making types more constrained. Similarly, partial type-to-value functions (PTVFs) become safe to use if we impose suitable typeclass constraints on the type parameters. Typeclasses can be viewed as a systematic way of using PTVFs safely. @@ -211,9 +213,9 @@ \section{Implementing typeclasses} A typeclass constraint \lstinline![T: Fractional]! will generate a compile-time error when a function such as \lstinline!avg[T]! is -applied to an incorrectly chosen type parameter \lstinline!T!. If -the Scala library did not already implement the \lstinline!Fractional! -typeclass, how could we reproduce that functionality? +applied to an incorrectly chosen type \lstinline!T!. If the Scala +library did not already implement the \lstinline!Fractional! typeclass, +how could we reproduce that functionality? \subsection{Creating a partial type-to-value function (PTVF)} @@ -233,8 +235,10 @@ \subsection{Creating a partial type-to-value function (PTVF)} Values of type \lstinline!Frac[A]! can be created only if \lstinline!A = BigDecimal! or \lstinline!A = Double!. The keywords \lstinline!sealed! and \lstinline!final! guarantee that no further code could extend this definition and allow -us to create a value of type, say, \lstinline!Frac[String]! or \lstinline!Frac[Boolean]!. -The Scala compiler will not detect any errors in the following code: +us to create a value of type, say, \lstinline!Frac[String]! or \lstinline!Frac[Boolean]!. + +Note that the Scala compiler will \emph{not} detect any errors in +the following code: \begin{lstlisting} type T = Frac[String] type U = Frac[Boolean] @@ -256,16 +260,17 @@ \subsection{Creating a partial type-to-value function (PTVF)} in further computations. The type \lstinline!Frac[A]! is non-void (i.e., has values) only -for $A$ belonging to the set \{\lstinline!BigDecimal!, \lstinline!Double!\} -of types. We now need to define the function \lstinline!avg[T]! with -a type parameter $T$ constrained to that set of types, which is called -the \textbf{type domain}\index{type domain} of the PTVF \lstinline!avg[T]!. +for $A$ belonging to the set of types \{\lstinline!BigDecimal!, +\lstinline!Double!\}. We need to define the function \lstinline!avg[T]! +with a type parameter $T$ constrained to that set of types, which +is called the \textbf{type domain}\index{type domain} of the PTVF +\lstinline!avg[T]!. The type constraint $T\negthickspace\in\,$\{\lstinline!BigDecimal!, \lstinline!Double!\} is equivalent to the requirement that a value -of type $\text{Frac}^{T}$ should exist. So, we will implement the +of type \lstinline!Frac[T]! should exist. So, we will implement the type constraint if we include an \emph{additional argument} of type -$\text{Frac}^{T}$ into the type signature of \lstinline!avg!: +\lstinline!Frac[T]! into the type signature of \lstinline!avg!: \begin{lstlisting} def avg[T](s: Seq[T], frac: Frac[T]): T \end{lstlisting} @@ -274,17 +279,17 @@ \subsection{Creating a partial type-to-value function (PTVF)} of \lstinline!avg[T]!, we will be unable to use types \lstinline!T! for which \lstinline!Frac[T]! is void (i.e., has no values). -In the Scala compiler, a typeclass instance argument such as \lstinline!frac: Frac[T]! -is called an \textbf{evidence parameter}\index{evidence value} \index{typeclass!evidence parameter} +A typeclass instance argument such as \lstinline!frac: Frac[T]! is +called an \textbf{evidence parameter}\index{evidence value} \index{typeclass!evidence parameter} because it \textsf{``}provides evidence\textsf{''} that the type \lstinline!T! belongs -to the type domain of the typeclass. A typeclass instance value is -the same as an \textsf{``}evidence value\textsf{''} in this sense. For brevity, we -will say \textsf{``}instance value\textsf{''} or just \textsf{``}instance\textsf{''}. +to the type domain of the typeclass. Such evidence values are called +\textsf{``}typeclass instance values\textsf{''} or, for brevity, just \textsf{``}typeclass +instances\textsf{''}. In this way, we implemented the typeclass constraint for the PTVF \lstinline!avg[T]!. The main steps were: \begin{enumerate} -\item Define a type constructor \lstinline!Frac[_]!. +\item Define a type constructor \lstinline!Frac!. \item Make sure values of type \lstinline!Frac[A]! exist only when \lstinline!A = BigDecimal! or \lstinline!A = Double!. \item Pass a value of type \lstinline!Frac[T]! to the function \lstinline!avg[T]! @@ -303,7 +308,8 @@ \subsection{Creating a partial type-to-value function (PTVF)} \noindent This code creates a type constructor \lstinline!Frac! and makes values of type \lstinline!Frac[T]! available for chosen type parameters \lstinline!T!. In this way, we implemented the required -type domain. +type domain. (However, this code does not prevent us from adding more +types to that type domain later.) To write the code for \lstinline!avg[T]!, we need to be able to add numeric values and to divide by an integer value. More precisely, @@ -315,8 +321,8 @@ \subsection{Creating a partial type-to-value function (PTVF)} \end{lstlisting} Since \lstinline!avg[T]! now has an additional argument \lstinline!frac!, we may use that argument to hold the required functions. So, we redefine -\lstinline!Frac! as a named tuple (case class) containing the functions -\lstinline!add! and \lstinline!intdiv!: +\lstinline!Frac! as a case class containing the functions \lstinline!add! +and \lstinline!intdiv!: \begin{lstlisting} final case class Frac[T](add: (T, T) => T, intdiv: (T, Int) => T) \end{lstlisting} @@ -328,13 +334,13 @@ \subsection{Creating a partial type-to-value function (PTVF)} \end{lstlisting} With these definitions, implementing \lstinline!avg[T]! becomes straightforward: \begin{lstlisting} -def avg[T](s: Seq[T], frac: Frac[T]): T = { // Assuming `s` is a non-empty sequence. - val sum = s.reduce(frac.add) // Here, `reduce` would fail on an empty sequence `s`. - frac.intdiv(sum, s.length) // Compute `sum/length`. +def avg[T](s: Seq[T], frac: Frac[T]): T = { // Assuming `s` is non-empty. + val sum = s.reduce(frac.add) // `s.reduce` fails if `s` is empty! + frac.intdiv(sum, s.length) // Compute `sum/length`. } \end{lstlisting} -To use this function, we need to pass an instance value corresponding -to the type \lstinline!T!: +To use this function, we need to pass a typeclass instance for the +type \lstinline!T!: \begin{lstlisting} scala> avg(Seq(1.0, 2.0, 3.0), fracD) // It will be a type error to use fracBD instead of fracD here. res0: Double = 2.0 @@ -352,10 +358,10 @@ \subsection{Creating a partial type-to-value function (PTVF)} To add another supported type \lstinline!T! to the type domain, we write one more line of code similar to \lstinline!val fracD = ...! -An equivalent implementation of the \lstinline!Frac! typeclass via -a \lstinline!trait! with methods requires this code: +An equivalent implementation of the \lstinline!Frac! typeclass can +be written by using a \lstinline!trait! with methods: \begin{lstlisting} -trait Frac[T] { // The trait is not `sealed`. +trait Frac[T] { // The trait should not be `sealed`. def add(x: T, y: T): T def intdiv(x: T, n: Int): T } @@ -391,9 +397,10 @@ \subsection{Creating a partial type-to-value function (PTVF)} \subsection{Scala\textsf{'}s \texttt{implicit} values} An \textbf{implicit }\index{implicit value} declaration is a feature -of Scala that makes values automatically available to any function -that declares an \textsf{``}implicit argument\textsf{''} of the same type. Scala\textsf{'}s -syntax for implicit values is: +of Scala that makes arguments automatically available to functions +that declare \textsf{``}implicit arguments\textsf{''}. + +Scala\textsf{'}s syntax for implicit values is: \begin{lstlisting} implicit val x: Int = 123 \end{lstlisting} @@ -422,8 +429,8 @@ \subsection{Scala\textsf{'}s \texttt{implicit} values} declared as \lstinline!implicit!, we can simply write \lstinline!implicitly[T]! with no arguments to apply that function. (The type parameter usually needs to be specified.) If no implicit value of type \lstinline!T! -is available, a compile-time error will occur. If an implicit value -of type \lstinline!T! is available in the current scope, \lstinline!implicitly[T]! +is available in the current scope, a compile-time error will occur. +If an implicit value of type \lstinline!T! is available, \lstinline!implicitly[T]! will return that value: \begin{lstlisting} implicit val s: String = "qqq" @@ -433,10 +440,11 @@ \subsection{Scala\textsf{'}s \texttt{implicit} values} \end{lstlisting} It is an error to declare more than one implicit value of the same -type in the same scope, because implicit arguments are specified by -type alone. The Scala compiler will not be able to set implicit arguments -of functions automatically when the function\textsf{'}s outer scope contains -more than one implicit value of a required type, as in this code: +type in the same scope, because implicit arguments are distinguished +by type alone. The Scala compiler will not be able to set implicit +arguments of functions automatically when the function\textsf{'}s outer scope +contains more than one implicit value of the same type, as in this +code: \begin{lstlisting} implicit val x: Int = 1 implicit val y: Int = 2 @@ -449,8 +457,8 @@ \subsection{Scala\textsf{'}s \texttt{implicit} values} implicitly[Int] ^ \end{lstlisting} -But it is not an error to declare several implicit arguments of the -\emph{same} type, e.g.: +But it is not an error to declare several implicit \emph{arguments} +of the same type, e.g.: \begin{lstlisting} def f(a: String)(implicit x: MyType, y: MyType) implicit val z: MyType = ??? @@ -458,7 +466,7 @@ \subsection{Scala\textsf{'}s \texttt{implicit} values} f("abc") // Same as f("abc")(z, z) since z is the unique implicit value of type MyType. \end{lstlisting} In the example above, the arguments \lstinline!x! and \lstinline!y! -will be set to the same value, \lstinline!z!. A compile-time error +will be set to the same value (\lstinline!z!). A compile-time error will occur if no \lstinline!implicit! value of type \lstinline!MyType! is visible in the current scope: \begin{lstlisting} @@ -479,7 +487,7 @@ \subsection{Implementing typeclasses by making instances \texttt{implicit} } values need to be written out less often. The example with the \lstinline!Frac! typeclass is implemented using -implicit values like this: +implicit values as: \begin{lstlisting} final case class Frac[T](add: (T, T) => T, intdiv: (T, Int) => T) implicit val fracBD = Frac[BigDecimal]( (x, y) => x + y, (x, n) => x / n ) @@ -488,8 +496,8 @@ \subsection{Implementing typeclasses by making instances \texttt{implicit} } To define the function \lstinline!avg[T]!, we declare an implicit argument as a \lstinline!Frac! typeclass evidence for \lstinline!T!: \begin{lstlisting} -def avg[T](s: Seq[T])(implicit frac: Frac[T]): T = { // Assuming `s` is a non-empty sequence. - val sum = s.reduce(frac.add) // Here, `reduce` would fail on an empty sequence `s`. +def avg[T](s: Seq[T])(implicit frac: Frac[T]): T = { + val sum = s.reduce(frac.add) frac.intdiv(sum, s.length) // Compute `sum/length`. } \end{lstlisting} @@ -503,21 +511,19 @@ \subsection{Implementing typeclasses by making instances \texttt{implicit} } res1: BigDecimal = 1.5 \end{lstlisting} -Scala\textsf{'}s \textsf{``}typeclass constraint\textsf{''} syntax is equivalent to implicit -evidence arguments: the code +Scala\textsf{'}s typeclass constraint syntax, such as this code: \begin{lstlisting} def f[A: Typeclass1, B: Typeclass2](args...) \end{lstlisting} -is equivalent to the longer code: +is equivalent to this longer code: \begin{lstlisting} def f[A, B](args...)(implicit t1: Typeclass1[A], t2: Typeclass2[B]) \end{lstlisting} The shorter code omits the names (\lstinline!t1!, \lstinline!t2!) -of the evidence values. Those values can be extracted via the standard -function \lstinline!implicitly! because all \lstinline!implicit! -arguments are automatically made available as implicit values in the -scope of a function\textsf{'}s body. The code of \lstinline!avg[T]! can then -be written as: +of the evidence values. Those values can be extracted using \lstinline!implicitly[...]!, +because all \lstinline!implicit! arguments are automatically made +available as implicit values in the scope of the function\textsf{'}s body. +The code of \lstinline!avg[T]! can then be rewritten as: \begin{lstlisting} def avg[T: Frac](s: Seq[T]): T = { val frac = implicitly[Frac[T]] @@ -532,13 +538,13 @@ \subsection{Implementing typeclasses by making instances \texttt{implicit} } be made available by using an \lstinline!import! declaration. In many cases, explicit \lstinline!import! declarations can be avoided. One way to avoid them is to declare the required implicit values within -the \textbf{companion object}\index{companion object} of the typeclass -(i.e., the Scala \lstinline!object! with the same name as the type -constructor): +the \textbf{companion object}\index{companion object} of the typeclass\textsf{'}s +constructor (i.e., a Scala \lstinline!object! with the same name +as the type constructor representing the typeclass): \begin{lstlisting} final case class Frac[T](add: (T, T) => T, intdiv: (T, Int) => T) -object Frac { // The companion object of `Frac[T]` creates some typeclass instances as `implicit`. +object Frac { // The companion object of `Frac[T]` creates some typeclass instances as `implicit` values. implicit val fracBD = Frac[BigDecimal]( (x, y) => x + y, (x, n) => x / n ) implicit val fracD = Frac[Double]( (x, y) => x + y, (x, n) => x / n ) } @@ -546,10 +552,10 @@ \subsection{Implementing typeclasses by making instances \texttt{implicit} } Whenever a function needs an implicit value of type \lstinline!Frac[T]! for a specific type \lstinline!T!, the Scala compiler will automatically look within the companion object of \lstinline!Frac! (as well as -within the companion object of the type \lstinline!T!) for any instances -declared there. So, the programmer\textsf{'}s code will not need to \lstinline!import! -those typeclass instances explicitly even if the companion object -is in a different module: +within the companion object of the given type \lstinline!T!) for +any instances declared there. So, the programmer\textsf{'}s code will not need +to \lstinline!import! those typeclass instances explicitly even if +the companion object is in a different module: \begin{lstlisting} scala> avg(Seq(1.0, 2.0, 3.0)) res0: Double = 2.0 @@ -560,13 +566,12 @@ \subsection{Extension methods} In Scala, function applications can use three kinds of syntax: \begin{enumerate} -\item The \textsf{``}function\textsf{''} syntax: arguments are to the right of the function -as in \lstinline!plus(x, y)! or \lstinline!plus(x)(y)!. +\item The \textsf{``}function\textsf{''} syntax: arguments are to the right of the function. +Examples are: \lstinline!plus(x, y)! and \lstinline!plus(x)(y)!. \item The \textsf{``}method\textsf{''} syntax: the first argument is to the left of the function, and all other arguments (if any) are placed to the right of the function, as in \lstinline!x.plus(y)! or \lstinline!xs.foldLeft(0)(updater)!. -\item The \textsf{``}infix method\textsf{''} syntax (only applies to functions with two -\emph{explicit} arguments): no dot character is used. For example, +\item The \textsf{``}infix method\textsf{''} syntax: no dot character is used. For example, \lstinline!x plus y!, or \lstinline!xs map {x => x + 1}!, or \lstinline!Set(1, 2, 3) contains 1!. \end{enumerate} The last two syntax features are often used when writing chains of @@ -685,11 +690,12 @@ \subsubsection{Example \label{subsec:tc-Example-metadata-extractors}\ref{subsec: \subparagraph{Solution} We will implement a typeclass \lstinline!HasMetadata! and declare -instances only for \lstinline!Data1!, \lstinline!Data2!, and \lstinline!Data3!. -The code for extracting the metadata will be contained within the -typeclass instances. Since the metadata extractors have types \lstinline!T => String! -and \lstinline!T => Long!, a simple solution is to define the typeclass -as a \lstinline!case class! containing these two functions: +instances for the types \lstinline!Data1!, \lstinline!Data2!, and +\lstinline!Data3!. The code for extracting the metadata will be contained +within the typeclass instances. Since the metadata extractors have +types \lstinline!T => String! and \lstinline!T => Long!, a simple +solution is to define the typeclass as a \lstinline!case class! containing +these two functions: \begin{lstlisting} final case class HasMetadata[T](getName: T => String, getCount: T => Long) \end{lstlisting} @@ -719,7 +725,7 @@ \subsubsection{Example \label{subsec:tc-Example-metadata-extractors}\ref{subsec: This code defines PTVFs \lstinline!getName! and \lstinline!getCount! with the type domain that contains the three types \lstinline!Data1!, \lstinline!Data2!, \lstinline!Data3!. In order to add a new type, -say \lstinline!Data4!, to the type domain, we will need to declare +say \lstinline!Data4!, to the type domain, we will need to create a new typeclass instance as an implicit value of type \lstinline!HasMetadata[Data4]!. New implicit values can be defined anywhere in the code, not necessarily within the companion object \lstinline!HasMetadata!. To avoid extra @@ -774,7 +780,7 @@ \subsubsection{Example \label{subsec:tc-Example-observability}\ref{subsec:tc-Exa to be one of the supported types of counters. The type signature and sample code: \begin{lstlisting} -def bump[C]()(...) = ??? +def bump[C](...)(implicit ...) = ??? val counter = Counter(...) val testCounter = TestCounter(...) @@ -878,10 +884,10 @@ \subsubsection{Example \label{subsec:tc-Example-Pointed-type}\ref{subsec:tc-Exam implicit def defaultFunc[A]: HasDefault[A => A] = HasDefault[A => A](identity) \end{lstlisting} -Types that have default values are also called \textbf{pointed}\index{pointed type}\index{types!pointed} -types. This book defines the typeclass \lstinline!Pointed! for pointed -\emph{functors} (Section~\ref{subsec:Pointed-functors-motivation-equivalence}) -rather than for pointed types. +Types with default values are also called \textbf{pointed}\index{pointed type}\index{types!pointed} +types. But this book defines the typeclass \lstinline!Pointed! for +pointed \emph{functors} (Section~\ref{subsec:Pointed-functors-motivation-equivalence}), +not for pointed types. \subsubsection{Example \label{subsec:tc-Example-Semigroups}\ref{subsec:tc-Example-Semigroups} (semigroups)} @@ -904,7 +910,8 @@ \subsubsection{Example \label{subsec:tc-Example-Semigroups}\ref{subsec:tc-Exampl A type \lstinline!T! with an associative binary operation is called a \textbf{semigroup}\index{semigroup}\index{typeclass!Semigroup@\texttt{Semigroup}}. The task in this example is to define the semigroup operation for -the types \lstinline!Int!, \lstinline!String!, and \lstinline!List[A]!. +the types \lstinline!Int!, \lstinline!String!, and \lstinline!List[A]! +by using a typeclass. \subparagraph{Solution} @@ -915,7 +922,7 @@ \subsubsection{Example \label{subsec:tc-Example-Semigroups}\ref{subsec:tc-Exampl final case class Semigroup[T](combine: (T, T) => T) \end{lstlisting} The typeclass instances for the supported types are defined using -a short syntax as: +a short syntax: \begin{lstlisting} object Semigroup { implicit val semigroupInt = Semigroup[Int](_ + _) @@ -1037,7 +1044,8 @@ \subsubsection{Example \label{subsec:tc-Example-semigroup-alternative-implementa implicit def semigroup1[T] = Semigroup[T]{ (x, y) => x } \end{lstlisting} Similarly, the definition $x\oplus y\triangleq y$ gives an associative -binary operation for a (different) semigroup. +binary operation for a (different) semigroup, based on the same type +\lstinline!T!. \textbf{(b)} To verify the associativity law: \begin{align*} @@ -1152,8 +1160,8 @@ \subsubsection{Example \label{subsec:tc-Example-Monoids-1}\ref{subsec:tc-Example def monoidOf[T: Semigroup : HasDefault]: Monoid[T] = Monoid(implicitly[Semigroup[T]].combine, implicitly[HasDefault[T]].value) \end{lstlisting} -We can also define this function as an \lstinline!implicit!, so that -every type \lstinline!T! with a \lstinline!Semigroup! and \lstinline!HasDefault! +If this function is defined as \lstinline!implicit def!, every type +\lstinline!T! with a \lstinline!Semigroup! and \lstinline!HasDefault! instances will automatically receive a \lstinline!Monoid! typeclass instance as well. @@ -1185,28 +1193,6 @@ \subsubsection{Example \label{subsec:tc-Example-Monoids-1}\ref{subsec:tc-Example instances will not work, and a different \lstinline!Monoid! instance must be defined. -Are there alternative implementations of the \lstinline!Monoid! typeclass -instance given \lstinline!Semigroup! and \lstinline!HasDefault! -instances? The function \lstinline!monoidOf! needs to produce a value -of type $\left(T\times T\rightarrow T\right)\times T$ given values -of type $T\times T\rightarrow T$ and a value of type $T$: -\[ -\text{monoidOf}:\left(T\times T\rightarrow T\right)\times T\rightarrow\left(T\times T\rightarrow T\right)\times T\quad. -\] -When the type signature of \lstinline!monoidOf! is written in this -notation, it is clear that \lstinline!monoidOf! should be the identity -function; indeed, that is what our code translates to. Although there -are many other implementations of the same type signature, only the -code shown above will satisfy the monoid laws. An example of an unlawful -implementation is: -\begin{lstlisting} -def badMonoidOf[T](implicit ti1: Semigroup[T], ti2: HasDefault[T]): Monoid[T] = - Monoid((x, y) => ti1.combine(x, ti1.combine(x, y)), ti2.value) -\end{lstlisting} -This implementation defines the monoid operation as $x\oplus x\oplus y$ -instead of the correct definition $x\oplus y$. If we set $y=e_{T}$, -we will get $x\oplus x$ instead of $x$, violating one of the identity -laws. \subsection{Typeclasses for type constructors\label{subsec:Typeclasses-for-type-constructors}} @@ -1216,9 +1202,8 @@ \subsection{Typeclasses for type constructors\label{subsec:Typeclasses-for-type- \begin{lstlisting} def inject[F[_]: Functor, A, B](a: A, f: F[B]): F[(A, B)] = ??? \end{lstlisting} -The \lstinline!Functor! typeclass implementing this constraint will -use the syntax \lstinline!Functor[F[_]]! because the type parameter -\lstinline!F! is itself a type constructor. +The syntax \lstinline!F[_]! indicates that the type parameter \lstinline!F! +is itself a type constructor. What information needs to be wrapped by a typeclass instance? A functor \lstinline!F! must have a \lstinline!map! function with the standard @@ -1237,18 +1222,18 @@ \subsection{Typeclasses for type constructors\label{subsec:Typeclasses-for-type- final case class Functor[F[_]](map: $\forall(A,B).\,$F[A] => (A => B) => F[B]) // Not possible in Scala 2. \end{lstlisting} Scala 3 directly supports an argument type that \emph{itself} contains -type quantifiers such as $\forall(A,B)$. In Scala 2, we need to represent -such \textsf{``}nested\textsf{''} type quantifiers by writing a \lstinline!trait! -with a \lstinline!def! method:\index{typeclass!Functor@\texttt{Functor}} +type quantifiers such as $\forall(A,B)$. In Scala 2, we have to replace +nested type quantifiers by a \lstinline!trait! with a \lstinline!def! +method:\index{typeclass!Functor@\texttt{Functor}} \begin{lstlisting} trait Functor[F[_]] { def map[A, B](fa: F[A])(f: A => B): F[B] } \end{lstlisting} The type constructor \lstinline!Functor! has the type parameter \lstinline!F[_]!, -which must be itself a type constructor. For any type constructor -\lstinline!F!, a value of type \lstinline!Functor[F]! is a wrapper -for a value of type $\forall(A,B).\,F^{A}\rightarrow\left(A\rightarrow B\right)\rightarrow F^{B}$. +which is a type constructor. For any type constructor \lstinline!F!, +a value of type \lstinline!Functor[F]! is a wrapper for a value of +type $\forall(A,B).\,F^{A}\rightarrow\left(A\rightarrow B\right)\rightarrow F^{B}$. Values of type \lstinline!Functor! (i.e., typeclass instances) are implemented with the \textsf{``}\lstinline!new { ... }!\textsf{''} syntax: \begin{lstlisting} @@ -1274,19 +1259,19 @@ \subsection{Typeclasses for type constructors\label{subsec:Typeclasses-for-type- def inject[F[_]: Functor, A, B](a: A, f: F[B]): F[(A, B)] = f.map { b => (a, b) } -scala> inject("abc", Seq(1, 2, 3)) // An implicit Functor[Seq] must be in scope. +scala> inject("abc", Seq(1, 2, 3)) // Need an implicit Functor[Seq] here. res0: Seq[(String, Int)] = List(("abc", 1), ("abc", 2), ("abc", 3)) \end{lstlisting} - Just like the \lstinline!Monoid! typeclass, the code of the \lstinline!Functor! + Similarly to the \lstinline!Monoid! typeclass, the code of the \lstinline!Functor! typeclass does not enforce the functor laws on the implementation. -It is the programmer responsibility to verify that the laws hold. +It is the programmer\textsf{'}s responsibility to verify that the laws hold. One way of checking the laws is to use the \texttt{scalacheck} library\index{scalacheck library@\texttt{scalacheck} library}\index{verifying laws with scalacheck@verifying laws with \texttt{scalacheck}}\footnote{\texttt{\href{https://www.scalacheck.org}{https://www.scalacheck.org}}} that automatically runs random tests for the given assertions, trying to discover a set of values for which some assertion fails. Using the \lstinline!Functor! typeclass constraint, we can implement a function (in our terminology, a PTVF) that checks the functor laws -for \emph{any} given type constructor \lstinline!F[_]!: +for \emph{any} given type constructor \lstinline!F!: \begin{lstlisting} import org.scalacheck.Arbitrary // Necessary imports and definitions. import org.scalatest.prop.GeneratorDrivenPropertyChecks @@ -1304,12 +1289,12 @@ \subsection{Typeclasses for type constructors\label{subsec:Typeclasses-for-type- \end{lstlisting} The \texttt{scalacheck} library will substitute a large number of -random values into the given assertions, Note that the laws are being -tested only with a finite number of values and with type parameters -set to specific types. While it is useful to test laws with \texttt{scalacheck} +random values into the given assertions. But the laws will be tested +only with a finite number of values and with type parameters set to +specific types. While it is useful to test laws with \texttt{scalacheck} (we might find a bug), only a symbolic derivation provides a rigorous proof that the laws hold. One of the main themes of this book is to -show how to perform symbolic derivations efficiently. +show how to perform such symbolic derivations. \section{Deriving typeclass instances via structural analysis} @@ -1324,8 +1309,8 @@ \section{Deriving typeclass instances via structural analysis} \item Quickly decide whether a given type can have a typeclass instance of \lstinline!Monoid!, \lstinline!Semigroup!, etc. \item If so, derive the code for the new typeclass instance without guessing. -\item Have assurance that the required typeclass laws will hold for newly -constructed instances. +\item Assure that the required typeclass laws will hold for newly constructed +instances. \end{itemize} In the following sections, we will show how to use this approach for some simple typeclasses. @@ -1339,7 +1324,7 @@ \subsection{Extractors} \begin{lstlisting} final case class HasMetadata[T](getName: T => String, getCount: T => Long) \end{lstlisting} -In the type notation, this type constructor is written as +In the type notation, this type constructor is written as: \[ \text{HasMetadata}^{T}\triangleq(T\rightarrow\text{String})\times(T\rightarrow\text{Long})\quad. \] @@ -1350,7 +1335,7 @@ \subsection{Extractors} We may call this typeclass a \textsf{``}$Z$-extractor\index{extractor typeclass}\index{typeclass!Extractor@\texttt{Extractor}}\textsf{''} since types $T$ from its type domain permit us somehow to extract values of type $Z$. With a fixed type $Z$, we denote the typeclass -by +by: \[ \text{Extractor}^{T}\triangleq T\rightarrow Z\quad. \] @@ -1447,7 +1432,7 @@ \subsection{Extractors} instances, we conclude that any polynomial type expression has an \lstinline!Extractor! instance as long as every product type contains at least one $Z$ or another \lstinline!Extractor! type. For example, -the type expression +the type expression: \[ A\times Z+Z\times(P+Z\times Q)+B\times C\times Z \] @@ -1586,8 +1571,8 @@ \subsection{Extractors} \[ f^{:T\rightarrow Z}\rightarrow c^{:C}\times g^{:C\rightarrow Z+P\times T}\rightarrow g(c)\triangleright\text{???}^{:Z+P\times T\rightarrow Z}\quad. \] -The new typed hole has a function type. We can write the code in matrix -notation as: +The new typed hole has a function type and is filled using $f^{:T\rightarrow Z}$. +Then: \[ \text{extractorS}\triangleq f^{:T\rightarrow Z}\rightarrow c^{:C}\times g^{:C\rightarrow Z+P\times T}\rightarrow g(c)\triangleright\,\begin{array}{|c||c|} & Z\\ @@ -1619,7 +1604,7 @@ \subsection{Extractors} \begin{lstlisting} val t = TypeT((10, x => Right((true, TypeT((x * 2, y => Left("abc"))))))) -scala> extractorT.extract(t) // The recursive definition of `extractorT` terminates. +scala> extractorT.extract(t) // The recursion in extractorT terminates. res0: String = abc \end{lstlisting} @@ -1633,8 +1618,8 @@ \subsection{Extractors} def f(i: Int): Int = i + 1 def x: Int = f(x) -scala> x // Infinite loop: f(f(f(f(...))) -java.lang.StackOverflowError +scala> x // Infinite loop: f(f(f(f(...))) +java.lang.StackOverflowError: ... \end{lstlisting} The code for \lstinline!extractorT! works because \lstinline!extractorT! is a value of a \emph{function} type, and because the presence of @@ -1660,10 +1645,10 @@ \subsection{Extractors} def f: Int => Int = k(f) // This definition is invalid! scala> f // Infinite loop: k(k(k(k(...))) -java.lang.StackOverflowError +java.lang.StackOverflowError: ... scala> f(4) // Infinite loop: k(k(k(k(...)))(4) -java.lang.StackOverflowError +java.lang.StackOverflowError: ... \end{lstlisting} This code is clearly invalid. But if we expand the right-hand side of the recursive equation to: @@ -1675,10 +1660,10 @@ \subsection{Extractors} \begin{lstlisting}[mathescape=true] def f: Int => Int = { x => k(f)(x) } // This defines $\color{dkgreen} f(n) = 2^n $ for $\color{dkgreen}n \geq 0$. -scala> f // We can compute f without an infinite loop. +scala> f // We can compute f without an infinite loop. res0: Int => Int = -scala> f(4) // We can compute f(4) without an infinite loop. +scala> f(4) // We can compute f(4) without an infinite loop. res1: Int = 16 \end{lstlisting} The recursive use of $f$ now occurs \emph{within} a function body, @@ -1786,7 +1771,7 @@ \subsection{Equality comparison: The \texttt{Eq} typeclass\label{subsec:The-Eq-t Let us perform structural analysis for the \lstinline!Eq! typeclass, defining $\text{Eq}^{A}\triangleq A\times A\rightarrow\bbnum 2$. -The results (see Table~\ref{tab:Type-constructions-for-Eq} below) +The results (see Table~\ref{tab:Type-constructions-for-Eq} on page~\pageref{tab:Type-constructions-for-Eq}) will show which types can have a function that compares values for equality. @@ -1807,8 +1792,8 @@ \subsection{Equality comparison: The \texttt{Eq} typeclass\label{subsec:The-Eq-t for comparisons of $A$ and $B$ separately. It is important that the code for this construction assumes that both -types $A$ and $B$ have lawful \lstinline!Eq! instances and performs -the comparisons \lstinline!a1 === a2! and \lstinline!b1 === b2!. +types $A$ and $B$ have lawful \lstinline!Eq! instances, which are +used to perform the comparisons \lstinline!a1 === a2! and \lstinline!b1 === b2!. If the above code performed only, say, the comparison \lstinline!a1 === a2!, the resulting comparison operation would have violated the \index{identity laws!of equality}\textbf{identity law} of the equality operation (if $x=y$ then $f(x)=f(y)$ for any @@ -1874,8 +1859,8 @@ \subsection{Equality comparison: The \texttt{Eq} typeclass\label{subsec:The-Eq-t def equals[R, A: Eq](f: R => A, g: R => A): Boolean = f(r1) === g(r1) // This violates the identity law. \end{lstlisting} The above code defines a comparison operation that violates the identity -law: there are many functions \lstinline!f! and \lstinline!g! that -will give different results for arguments not equal to $r_{1}$. +law. There are many unequal functions \lstinline!f! and \lstinline!g! +that will give the same results when applied to $r_{1}$. Another way to see the problem is to write the type equivalence: \[ @@ -1886,7 +1871,7 @@ \subsection{Equality comparison: The \texttt{Eq} typeclass\label{subsec:The-Eq-t to define the equality operation for $R\rightarrow A$. If we have a chosen value of type $S$, e.g., if $S\cong\bbnum 1+T$, we will again reduce the situation to the product construction with the function -type $T\rightarrow A$. This process will end only if the type $R$ +type $T\rightarrow A$. This procedure will end only if the type $R$ has the form: \[ R\cong\bbnum 1+\bbnum 1+...+\bbnum 1\quad,\quad\quad R\rightarrow A\cong\bbnum 1+\bbnum 1+...+\bbnum 1\rightarrow A\cong A\times A\times...\times A\quad, @@ -1913,11 +1898,11 @@ \subsection{Equality comparison: The \texttt{Eq} typeclass\label{subsec:The-Eq-t However, this does not mean that equality between functions cannot be decided at all. \index{equality between functions}By definition, two functions (say, $f^{:A\rightarrow B}$ and $g^{:A\rightarrow B}$) -are equal ($f=g$) if for any $x^{:A}$ we have $f(x)=g(x)$. Functions -have no \lstinline!Eq! typeclass instances, so we cannot write code -that checks at run time whether $f=g$. As a rule, we need to use -\emph{\index{symbolic calculations@\emph{symbolic calculations}}symbolic -calculations} if we want to prove equality between functions. +are equal ($f=g$) if for any $x^{:A}$ we have $f(x)=g(x)$. As a +rule, we need to use \index{symbolic calculations}\emph{symbolic +calculations} if we want to prove equality between functions. We cannot +write code that tests at run time whether $f=g$, because functions +have no \lstinline!Eq! typeclass instances. \paragraph{Recursive types} @@ -1927,21 +1912,21 @@ \subsection{Equality comparison: The \texttt{Eq} typeclass\label{subsec:The-Eq-t Let us prove this rigorously. Consider a recursive polynomial type $T$ defined using a polynomial -functor $S^{\bullet}$: +functor $S$: \[ T\triangleq S^{T}\quad. \] -The functor $S^{\bullet}$ may use other fixed types that have \lstinline!Eq! +The functor $S$ may use other fixed types that have \lstinline!Eq! instances. To construct the typeclass instance for $T$, we first -implement a function \lstinline!eqS! of type: +implement a function \lstinline!eqS[A]! of type: \[ -\text{eqS}:\forall A.\,\text{Eq}^{A}\rightarrow\text{Eq}^{S^{A}}\quad. +\text{eqS}^{A}:\text{Eq}^{A}\rightarrow\text{Eq}^{S^{A}}\quad. \] This function produces an \lstinline!Eq! instance for $S^{A}$ using -\lstinline!Eq! instances of $A$ and of all other types that $S^{A}$ -depends on. The product and co-product constructions guarantee that -it is always possible to implement this function for a polynomial -functor $S^{\bullet}$. Then we define an \lstinline!Eq! instance +\lstinline!Eq! instances of $A$ and (possibly) of some other types +that $S^{A}$ depends on. The product and co-product constructions +guarantee that it is always possible to implement this function for +a polynomial functor $S$. Then we define an \lstinline!Eq! instance for $T$ recursively: \[ \text{eqT}:\text{Eq}^{T}\quad,\quad\quad\text{eqT}\triangleq\text{eqS}\,(\text{eqT})\quad. @@ -1963,14 +1948,14 @@ \subsection{Equality comparison: The \texttt{Eq} typeclass\label{subsec:The-Eq-t implicit val e1 = eqEither[Int, A] // Instance for $\color{dkgreen} \text{Int}+A $ implicit val e2 = eqPair[A, A] // Instance for $\color{dkgreen} A\times A $. implicit val e3 = eqPair[Int, (A, A)] // Instance for $\color{dkgreen} \text{Int}\times A\times A $. - eqEither[Either[Int, A], (Int, (A, A))] // Instance for $\color{dkgreen} \text{Int}+A+\text{Int}\times A\times A $. + eqEither[Either[Int, A], (Int, (A, A))] // For $\color{dkgreen} \text{Int}+A+\text{Int}\times A\times A $. } implicit def eqT: Eq[T] = Eq { case (T(s1), T(s2)) => eqS(eqT).equal(s1, s2) } \end{lstlisting} To test that the recursion terminates, define a value of type $T$ and call \lstinline!===!: \begin{lstlisting}[mathescape=true] -val t = T(Left(Right(T(Left(Left(10)))))) // $\color{dkgreen}t: \bbnum 0+(10^{\scriptscriptstyle:\text{Int}}+\bbnum 0+\bbnum 0)^{\scriptscriptstyle:T}+\bbnum 0$. +val t = T(Left(Right(T(Left(Left(10)))))) // $\color{dkgreen}t: \bbnum 0+(10^{\scriptscriptstyle:\text{Int}}+\bbnum 0+\bbnum 0)^{\scriptscriptstyle:T}+\bbnum 0$ scala> t === t res0: Boolean = true @@ -2050,7 +2035,8 @@ \subsection{Semigroups\label{subsec:Semigroups-constructions}} A type $T$ has an instance of \lstinline!Semigroup! when an associative binary operation of type $T\times T\rightarrow T$ is available. We will now apply structural analysis to this typeclass. The results -are shown in Table~\ref{tab:Type-constructions-for-semigroup}. +are shown in Table~\ref{tab:Type-constructions-for-semigroup} on +page~\pageref{tab:Type-constructions-for-semigroup}. \paragraph{Fixed types} @@ -2081,13 +2067,13 @@ \subsection{Semigroups\label{subsec:Semigroups-constructions}} If types $A$ and $B$ are semigroups, the product $A\times B$ also has a \lstinline!Semigroup! instance. That instance is computed by -a function we call \lstinline!semigroupPair!: +a function we call \lstinline!semigroupPair[A, B]!: \[ -\text{semigroupPair}:\forall(A,B).\,\text{Semigroup}^{A}\times\text{Semigroup}^{B}\rightarrow\text{Semigroup}^{A\times B}\quad. +\text{semigroupPair}^{A,B}:\text{Semigroup}^{A}\times\text{Semigroup}^{B}\rightarrow\text{Semigroup}^{A\times B}\quad. \] Writing out the type expressions, we get the type signature: \[ -\text{semigroupPair}:\forall(A,B).\,\left(A\times A\rightarrow A\right)\times\left(B\times B\rightarrow B\right)\rightarrow\left(A\times B\times A\times B\rightarrow A\times B\right)\quad. +\text{semigroupPair}^{A,B}:\left(A\times A\rightarrow A\right)\times\left(B\times B\rightarrow B\right)\rightarrow\left(A\times B\times A\times B\rightarrow A\times B\right)\quad. \] While this type signature can be implemented in a number of ways, we look for code that preserves information, in hopes of satisfying @@ -2366,7 +2352,7 @@ \subsection{Semigroups\label{subsec:Semigroups-constructions}} \textbf{\footnotesize{}Construction} & \textbf{\footnotesize{}Type signature to implement}\tabularnewline \hline \hline -{\footnotesize{}The }\lstinline!Unit!{\footnotesize{} type, or other +{\footnotesize{}The }\lstinline!Unit!{\footnotesize{} type or other fixed type $C$} & {\footnotesize{}$\text{Semigroup}^{C}$}\tabularnewline \hline {\footnotesize{}Product of semigroups $A$ and $B$} & {\footnotesize{}$\text{Semigroup}^{A}\times\text{Semigroup}^{B}\rightarrow\text{Semigroup}^{A\times B}$}\tabularnewline @@ -2394,7 +2380,8 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} For the binary operation $A\times A\rightarrow A$, we can re-use the results of structural analysis for semigroups. Additionally, we will need to verify that the default value satisfies monoid\textsf{'}s identity -laws. The results are shown in Table~\ref{tab:Type-constructions-for-monoid}. +laws. The results are shown in Table~\ref{tab:Type-constructions-for-monoid} +on page~\pageref{tab:Type-constructions-for-monoid}. \paragraph{Fixed types} @@ -2408,7 +2395,7 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} This construction works for semigroups but \emph{not} for monoids: the \textsf{``}trivial\textsf{''} semigroup operations $x\oplus y=x$ and $x\oplus y=y$ -are not compatible with monoid\textsf{'}s identity laws. (e.g., with the definition +are not compatible with monoid\textsf{'}s identity laws. (E.g., with the definition $x\oplus y=x$, no default value $e$ could possibly satisfy the left identity law $e\oplus y=y$ because $e\oplus y=e$ for all $y$). @@ -2442,7 +2429,7 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} $S\times P$ where $S$ is a semigroup that has an \textbf{action on} $P$. The \textsf{``}action\textsf{''} is a function $\alpha:S\rightarrow P\rightarrow P$ such that $\alpha(s_{1})\bef\alpha(s_{2})=\alpha(s_{1}\oplus s_{2})$. -The operation is defined as +The operation is defined as: \[ (s_{1}\times p_{1})\oplus(s_{2}\times p_{2})=(s_{1}\oplus_{S}s_{2})\times\alpha(s_{2})(p_{1})\quad. \] @@ -2450,7 +2437,7 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} and $\bbnum 2\times\left(\bbnum 1+A\right)$. This cannot be made into a monoid since the information about $p_{2}$ is discarded. The associativity law holds because the first part of the pair is associative -by assumption of semigroup $S$, and the second part of the pair is +by assumption of semigroup $S$, and the second part of the pair is: \[ \alpha(s_{2}\oplus_{S}s_{3})(p_{1})=\alpha(s_{3})(\alpha(s_{2})(p_{1}))\quad. \] @@ -2515,24 +2502,24 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} The monoidal operation $\oplus$ is the forward function composition $f\bef g$, so the monoid laws for this operation are the identity and the associativity laws of function composition (see Section~\ref{subsec:Laws-of-function-composition}). -We could also use the backward function composition ($f\circ g$) -to define $f\oplus g$. +Alternatively, we could use the \emph{backward} function composition +($f\circ g$) to define a (different) operation $f\oplus g$. \paragraph{Recursive types} Can we define a \lstinline!Monoid! instance for a type $T$ defined -by $T\triangleq S^{T}$, where $S^{\bullet}$ is some type constructor? -As we have seen, products, co-products, and function type constructions +by $T\triangleq S^{T}$, where $S$ is some type constructor? As we +have seen, products, co-products, and function type constructions preserve monoids. For any type built up via these constructions from monoids, a \lstinline!Monoid! instance can be derived. These constructions cover all exponential-polynomial types. So, let us consider an exponential-polynomial -type constructor $S^{A}$ that contains a type parameter $A$, primitive -types, and other known monoid types. For such type constructors $S^{\bullet}$, +type expression $S^{A}$ that contains a type parameter $A$, primitive +types, and other known monoid types. For such type constructors $S$, we will always be able to implement a function \lstinline!monoidS! that derives a \lstinline!Monoid! instance for $S^{A}$ from a monoid instance for $A$: \[ -\text{monoidS}:\text{Monoid}^{A}\rightarrow\text{Monoid}^{S^{A}}\quad. +\text{monoidS}^{A}:\text{Monoid}^{A}\rightarrow\text{Monoid}^{S^{A}}\quad. \] A monoid instance for $T$ is then defined recursively by: \[ @@ -2556,8 +2543,8 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} \lstinline!monoidEitherPreferB!, and \lstinline!monoidFunc!), we can now implement the recursive construction. -To illustrate how that works, consider the exponential-polynomial -type constructor $S^{\bullet}$ defined as: +To illustrate how that works, consider the following recursion scheme +$S$: \[ S^{A}\triangleq\left(\text{Int}+A\right)\times\text{Int}+\text{String}\times\left(A\rightarrow\left(A\rightarrow\text{Int}\right)\rightarrow A\right)\quad. \] @@ -2565,10 +2552,9 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} type S[A] = Either[(Either[Int, A], Int), (String, A => (A => Int) => A)] \end{lstlisting} -It is clear that $S^{A}$ is built up from type constructions that -preserve monoids at each step. So, we expect that the recursive type -$T\triangleq S^{T}$ is a monoid. We first implement the function -\lstinline!monoidS!: +It is clear that $S$ is built up from type constructions that preserve +monoids at each step. So, we expect that the recursive type $T\triangleq S^{T}$ +is a monoid. We first implement the function \lstinline!monoidS!: \begin{lstlisting} def monoidS[A](implicit ti: Monoid[A]): Monoid[S[A]] = { implicit val m0 = monoidEitherPreferB[Int, A] @@ -2691,7 +2677,7 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} $P$ and $Q$ are monoids and additionally $P$ has an \textbf{action on} $Q$. The \textsf{``}action\textsf{''} is a function $\alpha:P\rightarrow Q\rightarrow Q$ obeying the special composition law, $\alpha(p_{1})\bef\alpha(p_{2})=\alpha(p_{1}\oplus p_{2})$. -The monoidal operation is defined by +The monoidal operation is defined by: \[ (p_{1}\times q_{1})\oplus(p_{2}\times q_{2})=(p_{1}\oplus p_{2})\times(\alpha(p_{2})(q_{1})\oplus q_{2})\quad. \] @@ -2707,8 +2693,8 @@ \subsection{Monoids\label{subsec:Monoids-constructions}} \textbf{\footnotesize{}Construction} & \textbf{\footnotesize{}Type signature to implement}\tabularnewline \hline \hline -{\footnotesize{}The }\lstinline!Unit!{\footnotesize{} type, or primitive -types} & {\footnotesize{}$\text{Monoid}^{\bbnum 1}$, $\text{Monoid}^{\text{Int}}$, +{\footnotesize{}The }\lstinline!Unit!{\footnotesize{} type or other +primitive types} & {\footnotesize{}$\text{Monoid}^{\bbnum 1}$, $\text{Monoid}^{\text{Int}}$, etc.}\tabularnewline \hline {\footnotesize{}Product of monoids $A$ and $B$} & {\footnotesize{}$\text{Monoid}^{A}\times\text{Monoid}^{B}\rightarrow\text{Monoid}^{A\times B}$}\tabularnewline @@ -2755,10 +2741,10 @@ \subsection{Pointed functors: motivation and laws\label{subsec:Pointed-functors- of these type constructors has a method that \textsf{``}wraps\textsf{''} a given single value: \begin{lstlisting} -val x: Option[Int] = Some(10) // A non-empty option that holds a value. -val y: List[String] = List("abc") // A list that holds a single value. -val z: Try[Int] = Success(200) // A value computed without errors. -val f: Future[String] = Future.successful("OK") // A `Future` value that is already available. +val x: Option[Int] = Some(10) // A non-empty option that holds a value. +val y: List[String] = List("abc") // A list that holds a single value. +val z: Try[Int] = Success(200) // A value computed without errors. +val f: Future[String] = Future.successful("OK") // A ready `Future` value. \end{lstlisting} As we can see, \textsf{``}wrapping a single value\textsf{''} means a different thing for each of the type constructors. Although the relevant methods of @@ -2769,19 +2755,18 @@ \subsection{Pointed functors: motivation and laws\label{subsec:Pointed-functors- typeclass\index{typeclass!Pointed@\texttt{Pointed}} \lstinline!Pointed! via this code: \begin{lstlisting} -trait PointedF[F[_]] { def pure[A]: A => F[A] } +trait Pointed[F[_]] { def pure[A]: A => F[A] } \end{lstlisting} Now we can implement instances of the \lstinline!Pointed! typeclass for some functors: \begin{lstlisting} -implicit val pointedOption = new PointedF[Option] { def pure[A]: A => Option[A] = x => Some(x) } -implicit val pointedList = new PointedF[List] { def pure[A]: A => List[A] = x => List(x) } -implicit val pointedTry = new PointedF[Try] { def pure[A]: A => Try[A] = x => Success(x) } +implicit val pointedOption = new Pointed[Option] { def pure[A]: A => Option[A] = x => Some(x) } +implicit val pointedList = new Pointed[List] { def pure[A]: A => List[A] = x => List(x) } +implicit val pointedTry = new Pointed[Try] { def pure[A]: A => Try[A] = x => Success(x) } \end{lstlisting} The PTVF \lstinline!pure! can be defined and used like this: \begin{lstlisting} -def pure[F[_]: PointedF, A](x: A): F[A] = - implicitly[PointedF[F]].pure(x) +def pure[F[_]: Pointed, A](x: A): F[A] = implicitly[Pointed[F]].pure(x) scala> pure[Option, Int](123) res0: Option[Int] = Some(123) @@ -2808,12 +2793,12 @@ \subsection{Pointed functors: motivation and laws\label{subsec:Pointed-functors- \[ \text{pu}_{F}(x)\triangleright f^{\uparrow F}=\text{pu}_{F}(f(x))\quad. \] -In the $\triangleright$-notation, this law is $x\triangleright\text{pu}_{F}\triangleright f^{\uparrow F}=x\triangleright f\triangleright\text{pu}_{F}$ -or equivalently $x\triangleright\text{pu}_{F}\bef f^{\uparrow F}=x\triangleright f\bef\text{pu}_{F}$. +In the $\triangleright$-notation, this law is written as $x\triangleright\text{pu}_{F}\triangleright f^{\uparrow F}=x\triangleright f\triangleright\text{pu}_{F}$ +or equivalently as $x\triangleright\text{pu}_{F}\bef f^{\uparrow F}=x\triangleright f\bef\text{pu}_{F}$. Since both sides of the law are functions applied to an arbitrary value $x^{:A}$, we can omit $x$ and write: \begin{equation} -\text{pu}_{F}\bef f^{\uparrow F}=f\bef\text{pu}_{F}\quad.\label{eq:naturality-law-of-pure} +\text{for any }f^{:A\rightarrow B}:\quad\quad\text{pu}_{F}\bef f^{\uparrow F}=f\bef\text{pu}_{F}\quad.\label{eq:naturality-law-of-pure} \end{equation} \[ \xymatrix{\xyScaleY{2.0pc}\xyScaleX{4.0pc}A\ar[r]\sp(0.5){\text{pu}_{F}}\ar[d]\sb(0.45){f} & F^{A}\ar[d]\sp(0.45){f^{\uparrow F}}\\ @@ -2822,8 +2807,7 @@ \subsection{Pointed functors: motivation and laws\label{subsec:Pointed-functors- \] This motivates the following definition: A functor $F$ is \index{pointed functor|textit}\textbf{pointed} if there exists a function $\text{pu}_{F}:\forall A.\,A\rightarrow F^{A}$ -satisfying the naturality law~(\ref{eq:naturality-law-of-pure}) -for any function $f^{:A\rightarrow B}$. +satisfying the naturality law~(\ref{eq:naturality-law-of-pure}). It turns out that we can avoid checking the naturality law of pointed functors if we use a trick: reduce \lstinline!pure! to a simpler @@ -2833,8 +2817,8 @@ \subsection{Pointed functors: motivation and laws\label{subsec:Pointed-functors- are functions of type $A\rightarrow F^{B}$. The trick is to choose $A=\bbnum 1$ (the \lstinline!Unit! type) and $f^{:\bbnum 1\rightarrow B}\triangleq(\_\rightarrow b)$, a constant function returning some fixed value $b^{:B}$. Both sides -of the naturality law may then be applied to the unit value $1$ and -must evaluate to the same result: +of the naturality law may then be applied to the unit value ($1$) +and must evaluate to the same result: \[ 1\triangleright\text{pu}_{F}\triangleright(\_\rightarrow b)^{\uparrow F}=1\triangleright f\triangleright\text{pu}_{F}\quad. \] @@ -2846,11 +2830,11 @@ \subsection{Pointed functors: motivation and laws\label{subsec:Pointed-functors- all types $A,B$ and to any function $f^{:A\rightarrow B}$. Thus, Eq.~(\ref{eq:pu-via-wu}) must apply to an arbitrary value $b^{:B}$ for any type $B$. That formula expresses the function $\text{pu}_{F}$ -through one value $\text{pu}_{F}(1)$ of type $F^{\bbnum 1}$. This +through the value $\text{pu}_{F}(1)$ of type $F^{\bbnum 1}$. That value can be viewed as a \textsf{``}wrapped unit\index{wrapped@\textsf{``}wrapped unit\textsf{''} value}\textsf{''} value. -To perform the same derivation in Scala syntax, we may write +To perform the same derivation in the Scala syntax, we may write: \begin{lstlisting} val one: Unit = () val f: Unit => B = { _ => b } @@ -2904,10 +2888,9 @@ \subsection{Pointed functors: motivation and laws\label{subsec:Pointed-functors- \end{itemize} So, the types of $\text{pu}_{F}$ and $\text{wu}_{F}$ are\textbf{ equivalent}\index{type equivalence}: each one can be converted into -the other and back without loss of information (as long as we assume -the naturality law of $\text{pu}_{F}$). We may define a pointed functor -equivalently as a functor with a chosen value $\text{wu}_{F}$ of -type $F^{\bbnum 1}$. +the other and back without loss of information (as long as the naturality +law of $\text{pu}_{F}$ holds). We may define a pointed functor equivalently +as a functor with a chosen value $\text{wu}_{F}$ of type $F^{\bbnum 1}$. As an example, consider the functor $F^{A}\triangleq\bbnum 1+A\times A$. It is a pointed functor, but there are two possible instances of \lstinline!Pointed[F]!. @@ -2927,7 +2910,7 @@ \subsection{Pointed functors: motivation and laws\label{subsec:Pointed-functors- val wu2: F[Unit] = Some(((), ())) \end{lstlisting} \[ -\text{wu}_{1F}\triangleq\bbnum 1+\bbnum 0^{:\bbnum 1\times\bbnum 1}\quad,\quad\quad\text{wu}_{2F}\triangleq\bbnum 0+1\times1\quad. +\text{wu}_{1F}\triangleq1+\bbnum 0^{:\bbnum 1\times\bbnum 1}\quad,\quad\quad\text{wu}_{2F}\triangleq\bbnum 0+1\times1\quad. \] To check the equivalence between \lstinline!pure! and \lstinline!wu!, first note that: @@ -2990,12 +2973,12 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: a list of length $2$: \lstinline!List((), ())!, and so on. Each of these choices gives a valid \lstinline!Pointed! instance for the \lstinline!List! functor. It is up to the programmer to choose the -\lstinline!Pointed! instance that will be useful for the application -at hand. In the case of \lstinline!List!, the standard choice \lstinline!wu == List(())! -and \lstinline!pure(x) = List(x)! is motivated by the usage of the -\lstinline!List! functor to represent a choice of possibilities, -e.g., in a search problem. Then the \textsf{``}pure\textsf{''} list represents a situation -with only one possibility. +\lstinline!Pointed! instance that will be useful in the application +at hand. In the case of \lstinline!List!, the standard definitions +\lstinline!wu == List(())! and \lstinline!pure(x) = List(x)! are +motivated by the usage of the \lstinline!List! functor to represent +a choice of possibilities, e.g., in a search problem. Then a \textsf{``}pure\textsf{''} +list represents a situation with only one possibility. \paragraph{Nameless type-to-type functions} @@ -3017,19 +3000,19 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: However, the following declaration of the analogous function \lstinline!pointedPair! is invalid in Scala: \begin{lstlisting} -def pointedPair[F[_]: Pointed, G[_]: Pointed]: Pointed[L] // Does not work in Scala. +def pointedPair[F[_]: Pointed, G[_]: Pointed]: Pointed[L] // Does not work. \end{lstlisting} It is not possible to use the type alias \lstinline!L! within this function declaration, because the type alias needs to use the type parameters \lstinline!F! and \lstinline!G! that are defined only -within the type signature of the function. To achieve that, we would -need somehow to insert a new type alias declaration within the type -signature of \lstinline!pointedPair!, but the syntax of Scala does -not support that: +\emph{within} the type signature of the function. To achieve that, +we would need somehow to insert a new type alias declaration within +the type signature of \lstinline!pointedPair!, but the syntax of +Scala does not support that: \begin{lstlisting} -def pointedPair[F[_]: Pointed, G[_]: Pointed]: ( // Not a valid Scala syntax. - type L[A] = (F[A], G[A]) // Temporarily define a type constructor L, and now use it: - Pointed[L] ) +def pointedPair[F[_]: Pointed, G[_]: Pointed]: ( // Not a valid syntax: + type L[A] = (F[A], G[A]) // Temporarily define a type constructor L. + Pointed[L] ) // Use L here. \end{lstlisting} The return type is required to be \lstinline!Pointed[L]!, where \lstinline!L! needs to be a type expression that defines a type constructor, i.e., @@ -3048,11 +3031,11 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: The special Scala plugin called the \index{kind@\textsf{``}kind projector\textsf{''} plugin}\textsf{``}kind projector\textsf{''}\footnote{\texttt{\href{https://github.com/typelevel/kind-projector}{https://github.com/typelevel/kind-projector}}} adds syntax for nameless type constructors. The syntax is similar -to defining a nameless function: for instance, the pair functor $F^{\bullet}\times G^{\bullet}$ +to defining a nameless function: for instance, the pair functor $F\times G$ is defined as \lstinline!Lambda[X => (F[X], G[X])]!. Such type expressions -can be understood as nameless\index{nameless type-to-type function} -type-to-type functions. When using the \textsf{``}kind projector\textsf{''} plugin, -the syntax for defining \lstinline!pointedPair! is: +represent nameless\index{nameless type-to-type function} type-to-type +functions. When using the \textsf{``}kind projector\textsf{''} plugin, the syntax +for defining \lstinline!pointedPair! is: \begin{lstlisting} def pointedPair[F[_]: Pointed, G[_]: Pointed]: Pointed[Lambda[X => (F[X], G[X])]] = ??? \end{lstlisting} @@ -3095,8 +3078,9 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: method to $\text{wu}_{G}:G^{\bbnum 1}$ and obtain a value of type $F^{G^{\bbnum 1}}$. \begin{lstlisting} -def pointedFoG[F[_]: Pointed : Functor, G[_]: Pointed]: Pointed[Lambda[X => F[G[X]]]] - = Pointed[Lambda[X => F[G[X]]]](pure[F, G[Unit]](implicitly[Pointed[G]].wu)) +def pointedFoG[F[_]: Pointed : Functor, G[_]: Pointed] + : Pointed[Lambda[X => F[G[X]]]] + = Pointed[Lambda[X => F[G[X]]]](implicitly[Pointed[F]].pure[F, G[Unit]](implicitly[Pointed[G]].wu)) \end{lstlisting} The case when $F$ and $G$ are contrafunctors requires us to assume that $F$ belongs to the \textsf{``}pointed contrafunctor\textsf{''} typeclass (see @@ -3110,8 +3094,9 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: In this way, we can create a value of type $F^{G^{\bbnum 1}}$. The contrafunctor $G$ does not need to be pointed. \begin{lstlisting} -def pointedCFoG[F[_]: Pointed : Contrafunctor, G[_]]: Pointed[Lambda[X => F[G[X]]]] = - Pointed[Lambda[X => F[G[X]]]](cpure[F, G[Unit]]) +def pointedCFoG[F[_]: Pointed : Contrafunctor, G[_]] + : Pointed[Lambda[X => F[G[X]]]] + = Pointed[Lambda[X => F[G[X]]]](cpure[F, G[Unit]]) \end{lstlisting} @@ -3123,8 +3108,9 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: and we have values $\text{wu}_{F}:F^{\bbnum 1}$ and $\text{wu}_{G}:G^{\bbnum 1}$. It is clear that we must set $\text{wu}_{L}=\text{wu}_{F}\times\text{wu}_{G}$. \begin{lstlisting} -def pointedFxG[F[_]: Pointed, G[_]: Pointed]: Pointed[Lambda[X => (F[X],G[X])]] = - Pointed[Lambda[X => (F[X],G[X])]]((implicitly[Pointed[F]].wu, implicitly[Pointed[G]].wu)) +def pointedFxG[F[_]: Pointed, G[_]: Pointed] + : Pointed[Lambda[X => (F[X],G[X])]] + = Pointed[Lambda[X => (F[X],G[X])]]((implicitly[Pointed[F]].wu, implicitly[Pointed[G]].wu)) \end{lstlisting} @@ -3136,14 +3122,15 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: $\text{wu}_{F}:F^{\bbnum 1}$ and $\text{wu}_{G}:G^{\bbnum 1}$. There are two choices, $\text{wu}_{L}=\text{wu}_{F}+\bbnum 0^{:G^{\bbnum 1}}$ and $\text{wu}_{L}=\bbnum 0^{:F^{\bbnum 1}}+\text{wu}_{G}$, both -making $L^{\bullet}$ a pointed functor. +making $L$ a pointed functor. -It is sufficient if just $F^{\bullet}$ is a pointed functor: $\text{wu}_{L}\triangleq\text{wu}_{F}+\bbnum 0^{:G^{\bbnum 1}}$ -is a \lstinline!Pointed! typeclass instance for $F^{\bullet}+G^{\bullet}$, -even if $G^{\bullet}$ is not pointed. +It is sufficient if just $F$ is a pointed functor: $\text{wu}_{L}\triangleq\text{wu}_{F}+\bbnum 0^{:G^{\bbnum 1}}$ +is a \lstinline!Pointed! typeclass instance for $F+G$, even if $G$ +is not pointed. \begin{lstlisting} -def pointedEitherFG[F[_]: Pointed, G[_]]: Pointed[Lambda[X => Either[F[X],G[X]]]] = - Pointed[Lambda[X => Either[F[X],G[X]]]](Left(implicitly[Pointed[F]].wu)) +def pointedEitherFG[F[_]: Pointed, G[_]] + : Pointed[Lambda[X => Either[F[X],G[X]]]] + = Pointed[Lambda[X => Either[F[X],G[X]]]](Left(implicitly[Pointed[F]].wu)) \end{lstlisting} @@ -3165,11 +3152,11 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: \paragraph{Recursive types} The recursive construction for functors (see Statement~\ref{subsec:functor-Statement-functor-recursive}) -assumes a bifunctor $S^{\bullet,\bullet}$ and defines a recursive -functor $F^{\bullet}$ via the type equation $F^{A}\triangleq S^{A,F^{A}}$. -The functor $F^{\bullet}$ will be pointed if we can compute a value -$\text{wu}_{F}$ of type $F^{\bbnum 1}$. The type $F^{\bbnum 1}$ -is a recursive type defined via the type equation $F^{\bbnum 1}\triangleq S^{\bbnum 1,F^{\bbnum 1}}$. +assumes a bifunctor $S$ and defines a recursive functor $F$ via +the type equation $F^{A}\triangleq S^{A,F^{A}}$. The functor $F$ +will be pointed if we can compute a value $\text{wu}_{F}$ of type +$F^{\bbnum 1}$. The type $F^{\bbnum 1}$ is a recursive type defined +via the type equation $F^{\bbnum 1}\triangleq S^{\bbnum 1,F^{\bbnum 1}}$. If that type is not void, i.e., if there exists some value of that type, we will be able to define $\text{wu}_{F}$ as that value. @@ -3260,7 +3247,7 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: \] The type $A$ is now an arbitrary and unknown type, so we cannot compute any values of $A$ or $\text{String}\times A\times A$ from scratch. -The function type $(\bbnum 1+A\rightarrow\text{Int})\rightarrow A\times\bbnum 1$ +Values of type $(\bbnum 1+A\rightarrow\text{Int})\rightarrow A\times\bbnum 1$ cannot be implemented because a value of type $A$ cannot be computed from a function $\bbnum 1+A\rightarrow\text{Int}$ that \emph{consumes} values of type $A$. So, $F^{A,B}$ is not pointed with respect to @@ -3277,15 +3264,15 @@ \subsection{Pointed functors: structural analysis\label{subsec:Pointed-functors: \hline {\footnotesize{}Identity functor} & {\footnotesize{}$\bbnum 1$}\tabularnewline \hline -{\footnotesize{}Composition of pointed (contra)functors} & {\footnotesize{}$\text{Pointed}^{F^{\bullet}}\times\text{Pointed}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{G^{\bullet}}}$}\tabularnewline +{\footnotesize{}Composition of pointed (contra)functors} & {\footnotesize{}$\text{Pointed}^{F}\times\text{Pointed}^{G}\rightarrow\text{Pointed}^{F\circ G}$}\tabularnewline \hline -{\footnotesize{}Product of pointed functors $F$ and $G$} & {\footnotesize{}$\text{Pointed}^{F^{\bullet}}\times\text{Pointed}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}\times G^{\bullet}}$}\tabularnewline +{\footnotesize{}Product of pointed functors $F$ and $G$} & {\footnotesize{}$\text{Pointed}^{F}\times\text{Pointed}^{G}\rightarrow\text{Pointed}^{F\times G}$}\tabularnewline \hline -{\footnotesize{}Co-product of a pointed functor $F$ and any $G$} & {\footnotesize{}$\text{Pointed}^{F^{\bullet}}\times\text{Functor}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}+G^{\bullet}}$}\tabularnewline +{\footnotesize{}Co-product of a pointed functor $F$ and any $G$} & {\footnotesize{}$\text{Pointed}^{F}\times\text{Functor}^{G}\rightarrow\text{Pointed}^{F+G}$}\tabularnewline \hline -{\footnotesize{}Function from any $C$ to a pointed $F$} & {\footnotesize{}$\text{Pointed}^{F^{\bullet}}\times\text{Contrafunctor}^{C^{\bullet}}\rightarrow\text{Pointed}^{C^{\bullet}\rightarrow F^{\bullet}}$}\tabularnewline +{\footnotesize{}Function from any $C$ to a pointed $F$} & {\footnotesize{}$\text{Pointed}^{F}\times\text{Contrafunctor}^{C}\rightarrow\text{Pointed}^{C\rightarrow F}$}\tabularnewline \hline -{\footnotesize{}Recursive type} & {\footnotesize{}$\text{Pointed}^{F^{\bullet}}\rightarrow\text{Pointed}^{S^{\bullet,F^{\bullet}}}$ +{\footnotesize{}Recursive type} & {\footnotesize{}$\text{Pointed}^{F}\rightarrow\text{Pointed}^{S^{\bullet,F}}$ where $F^{A}\triangleq S^{A,F^{A}}$}\tabularnewline \hline \end{tabular} @@ -3307,14 +3294,14 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} \] Functors having this operation are called \index{co-pointed functor}\textbf{co-pointed}. We may define the \lstinline!Copointed! typeclass\index{typeclass!Copointed@\texttt{Copointed}} -as +as: \begin{lstlisting} trait Copointed[F[_]] { def ex[A]: F[A] => A } def extract[F[_]: Copointed, A](f: F[A]): A = implicitly[Copointed[F]].ex(f) \end{lstlisting} The \lstinline!extract! function must be fully parametric and obey the \index{naturality law!of extract@of \texttt{extract}}\textbf{naturality -law} (compare to Eq.~(\ref{eq:naturality-law-of-pure})): +law} (compare with Eq.~(\ref{eq:naturality-law-of-pure})): \begin{equation} \text{ex}_{F}\bef f=f^{\uparrow F}\bef\text{ex}_{F}\quad.\label{eq:naturality-law-of-extract} \end{equation} @@ -3339,7 +3326,7 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} the \lstinline!Pointed! typeclass is equivalent to the type $F^{\bbnum 1}$. That provides a simpler form of the \lstinline!Pointed! typeclass. For co-pointed functors, there is no simpler form of the \lstinline!extract! -method. If we set $A=\bbnum 1$ and $f^{:\bbnum 1\rightarrow B}\triangleq(1\rightarrow b)$ +method. If we set $A=\bbnum 1$ and $f^{:\bbnum 1\rightarrow B}\triangleq(\_\rightarrow b)$ in the naturality law, both sides will become functions of type $F^{\bbnum 1}\rightarrow B$. But the type $F^{\bbnum 1}$ might be void, or a value of type $F^{\bbnum 1}$ may not be computable via fully parametric code. So, we cannot deduce @@ -3419,10 +3406,9 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} If functors $F$ or $G$ are co-pointed, we can implement a function of type $F^{A}\times G^{A}\rightarrow A$ by discarding $F^{A}$ (if $G$ is co-pointed) or by discarding $G^{A}$ (if $F$ is co-pointed). -The functor product $F^{\bullet}\times G^{\bullet}$ is then made -into a co-pointed functor. For instance, if $F$ is co-pointed, we -discard $G^{A}$, so the code for the \lstinline!extract! method -will be: +The functor product $F\times G$ is then made into a co-pointed functor. +For instance, if $F$ is co-pointed, we discard $G^{A}$, so the code +for the \lstinline!extract! method will be: \[ \text{ex}_{F\times G}\triangleq f^{:F^{A}}\times g^{:G^{A}}\rightarrow\text{ex}_{F}(f)=\pi_{1}\bef\text{ex}_{F}\quad, \] @@ -3519,7 +3505,7 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} assume additionally that $P$ is co-pointed and use its method $\text{ex}_{P}:P^{A}\rightarrow A$. Finally, we have: \begin{align} - & \text{ex}_{L}\triangleq h^{:C^{A}\rightarrow P^{A}}\rightarrow\text{ex}_{P}(h(\text{cpu}_{C}))\label{eq:def-of-ex-for-C-mapsto-P}\\ + & \text{ex}_{L}\triangleq h^{:C^{A}\rightarrow P^{A}}\rightarrow\text{ex}_{P}(h(\text{cpu}_{C}))\quad,\label{eq:def-of-ex-for-C-mapsto-P}\\ {\color{greenunder}\text{ or equivalently}:}\quad & h^{:C^{A}\rightarrow P^{A}}\triangleright\text{ex}_{L}=\text{cpu}_{C}\triangleright h\triangleright\text{ex}_{P}\quad.\nonumber \end{align} To verify the naturality law, we apply both sides to an arbitrary @@ -3530,11 +3516,12 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} {\color{greenunder}\text{definition of }f^{\uparrow L},\text{ see Eq.~(\ref{eq:f-functor-exponential-def-fmap-f-h})}:}\quad & =\text{cpu}_{C}\triangleright f^{\downarrow C}\bef h\bef\gunderline{f^{\uparrow P}\bef\text{ex}_{P}}\\ {\color{greenunder}\text{naturality law of }\text{ex}_{P}:}\quad & =\text{cpu}_{C}\triangleright f^{\downarrow C}\bef h\bef\text{ex}_{P}\bef f\quad. \end{align*} -We expect the last expression to equal this one: +We expect the last expression to equal $h\triangleright\text{ex}_{L}\bef f$, +which is rewritten as: \[ h\triangleright\text{ex}_{L}\bef f=\text{cpu}_{C}\triangleright h\triangleright\text{ex}_{P}\triangleright f=\text{cpu}_{C}\triangleright h\bef\text{ex}_{P}\bef f\quad. \] -This is possible only if $\text{cpu}_{C}\triangleright f^{\downarrow C}=\text{pu}_{C}$ +The equality is possible only if $\text{cpu}_{C}\triangleright f^{\downarrow C}=\text{cpu}_{C}$ for all $f$. This motivates us to assume that law as the \index{naturality law!of pure for contrafunctors@of \texttt{pure} for contrafunctors}\textbf{naturality law} of $\text{cpu}_{C}$ for pointed contrafunctors. With that last assumption, the naturality law of $\text{ex}_{L}$ is proved. The @@ -3543,7 +3530,7 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} def copointedFunc[C[_]: Pointed, P[_]: Copointed]: Copointed[Lambda[X => C[X] => P[X]]] = new Copointed[Lambda[X => C[X] => P[X]]] { def ex[A]: (C[A] => P[A]) => A = h => extract[P, A](h(cpure[C, A])) - } // In Scala 2.13: h => cpure[C, A] pipe h pipe extract[P, A], which corresponds to ${\color{dkgreen} h\rightarrow\text{pu}_{C}\triangleright h\triangleright\text{ex}_{P} }$ + } // In Scala 2.13: h => cpure[C, A] pipe h pipe extract[P, A], which corresponds to ${\color{dkgreen} h\rightarrow\text{cpu}_{C}\triangleright h\triangleright\text{ex}_{P} }$. \end{lstlisting} We will analyze pointed contrafunctors and their naturality law in Section~\ref{subsec:Pointed-contrafunctors}. @@ -3568,7 +3555,7 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} Assuming that $S$ is co-pointed, we can finally define $\text{ex}_{F}$ by recursion: \begin{align*} - & \text{ex}_{F}\triangleq s^{:S^{A,F^{A}}}\rightarrow s\triangleright\big(\text{bimap}_{S}(\text{id})(\text{ex}_{F})\big)\triangleright\text{ex}_{S}\\ + & \text{ex}_{F}\triangleq s^{:S^{A,F^{A}}}\rightarrow s\triangleright\big(\text{bimap}_{S}(\text{id})(\text{ex}_{F})\big)\triangleright\text{ex}_{S}\quad,\\ {\color{greenunder}\text{ or equivalently}:}\quad & \text{ex}_{F}\triangleq\text{bimap}_{S}(\text{id})(\text{ex}_{F})\bef\text{ex}_{S}\quad. \end{align*} To verify the naturality law of $\text{ex}_{F}$, we denote recursive @@ -3650,8 +3637,8 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} functor (with respect to $A$). It is clear that $A\times B$ is co-pointed with respect to $A$ since we have $\pi_{1}:A\times B\rightarrow A$. It remains to check that the contrafunctor $C^{A}\triangleq\bbnum 1+A\rightarrow\text{Int}$ -is pointed. A contrafunctor $C^{\bullet}$ is pointed if values of -type $C^{\bbnum 1}$ can be computed (see Section~\ref{subsec:Pointed-contrafunctors}); +is pointed. A contrafunctor $C$ is pointed if values of type $C^{\bbnum 1}$ +can be computed (see Section~\ref{subsec:Pointed-contrafunctors}); this requires us to compute a value of type $\bbnum 1+\bbnum 1\rightarrow\text{Int}$. One such value is \lstinline!{ _ => 0 }!, a constant function that always returns the integer \lstinline!0!. @@ -3668,15 +3655,15 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} \hline {\footnotesize{}Identity functor} & {\footnotesize{}$\text{id}:A\rightarrow A$}\tabularnewline \hline -{\footnotesize{}Composition of co-pointed functors} & {\footnotesize{}$\text{Copointed}^{F^{\bullet}}\times\text{Copointed}^{G^{\bullet}}\rightarrow\text{Copointed}^{F^{G^{\bullet}}}$}\tabularnewline +{\footnotesize{}Composition of co-pointed functors} & {\footnotesize{}$\text{Copointed}^{F}\times\text{Copointed}^{G}\rightarrow\text{Copointed}^{F\circ G}$}\tabularnewline \hline -{\footnotesize{}Product of co-pointed functor $F$ and any $G$} & {\footnotesize{}$\text{Copointed}^{F^{\bullet}}\times\text{Functor}^{G^{\bullet}}\rightarrow\text{Copointed}^{F^{\bullet}\times G^{\bullet}}$}\tabularnewline +{\footnotesize{}Product of co-pointed functor $F$ and any $G$} & {\footnotesize{}$\text{Copointed}^{F}\times\text{Functor}^{G}\rightarrow\text{Copointed}^{F\times G}$}\tabularnewline \hline -{\footnotesize{}Co-product of co-pointed functors $F$ and $G$} & {\footnotesize{}$\text{Copointed}^{F^{\bullet}}\times\text{Copointed}^{G^{\bullet}}\rightarrow\text{Copointed}^{F^{\bullet}+G^{\bullet}}$}\tabularnewline +{\footnotesize{}Co-product of co-pointed functors $F$ and $G$} & {\footnotesize{}$\text{Copointed}^{F}\times\text{Copointed}^{G}\rightarrow\text{Copointed}^{F+G}$}\tabularnewline \hline -{\footnotesize{}Function from pointed $C$ to co-pointed $F$} & {\footnotesize{}$\text{\ensuremath{\text{Pointed}^{C^{\bullet}}}}\times\text{Copointed}^{F^{\bullet}}\rightarrow\text{Copointed}^{C^{\bullet}\rightarrow F^{\bullet}}$}\tabularnewline +{\footnotesize{}Function from pointed $C$ to co-pointed $F$} & {\footnotesize{}$\text{\ensuremath{\text{Pointed}^{C}}}\times\text{Copointed}^{F}\rightarrow\text{Copointed}^{C\rightarrow F}$}\tabularnewline \hline -{\footnotesize{}Recursive type} & {\footnotesize{}$\text{Copointed}^{F^{\bullet}}\rightarrow\text{Copointed}^{S^{\bullet,F^{\bullet}}}$ +{\footnotesize{}Recursive type} & {\footnotesize{}$\text{Copointed}^{F}\rightarrow\text{Copointed}^{S^{\bullet,F}}$ where $F^{A}\triangleq S^{A,F^{A}}$}\tabularnewline \hline \end{tabular} @@ -3688,10 +3675,9 @@ \subsection{Co-pointed functors\label{subsec:Co-pointed-functors}} \subsection{Pointed contrafunctors\label{subsec:Pointed-contrafunctors}} In the previous section, the function-type construction required a -contrafunctor $C^{\bullet}$ to have a method $\text{cpu}_{C}$ of -type $\forall A.\,C^{A}$; we called such contrafunctors \textbf{pointed}. -We also needed to assume that the naturality law holds for all functions -$f^{:A\rightarrow B}$: +contrafunctor $C$ to have a method $\text{cpu}_{C}$ of type $\forall A.\,C^{A}$; +we called such contrafunctors \textbf{pointed}. We also needed to +assume that the naturality law holds for all functions $f^{:A\rightarrow B}$: \begin{align} & \text{cpu}_{C}\triangleright f^{\downarrow C}=\text{cpu}_{C}\quad,\label{eq:naturality-law-for-pure-for-contrafunctors}\\ {\color{greenunder}\text{or equivalently}:}\quad & \text{cmap}_{C}(f^{:A\rightarrow B})(\text{cpu}_{C}^{:C^{B}})=\text{cpu}_{C}^{:C^{A}}\quad.\nonumber @@ -3723,8 +3709,8 @@ \subsection{Pointed contrafunctors\label{subsec:Pointed-contrafunctors}} {\color{greenunder}\text{composition law of contrafunctor }C:}\quad & =\text{wu}_{C}\triangleright(\gunderline{f\bef(\_^{:B}\rightarrow1)})^{\downarrow C}\\ {\color{greenunder}\text{compute function composition}:}\quad & =\text{wu}_{C}\triangleright(\_^{:A}\rightarrow1)^{\downarrow C}=\text{cpu}_{C}^{A}\quad. \end{align*} -So, a pointed contrafunctor instance for $C^{\bullet}$ is just a -chosen value of type $C^{\bbnum 1}$. +So, a pointed contrafunctor instance for $C$ is just a chosen value +of type $C^{\bbnum 1}$. \begin{lstlisting} final case class Pointed[F[_]](wu: F[Unit]) def cpure[F[_]: Pointed : Contrafunctor, A]: F[A] = implicitly[Pointed[F]].wu.cmap(_ => ()) @@ -3745,27 +3731,28 @@ \subsection{Pointed contrafunctors\label{subsec:Pointed-contrafunctors}} \paragraph{Type parameters} Since the identity functor $\text{Id}^{A}\triangleq A$ is not a contrafunctor, -it remains to consider the functor compositions $C^{F^{A}}$ and $F^{C^{A}}$ -where $C^{\bullet}$ is a contrafunctor and $F^{\bullet}$ is a functor. +it remains to consider the functor compositions $C^{F^{A}}$ and $F^{C^{A}}$, +where $C$ is a contrafunctor and $F$ is a functor. If $C$ is pointed, we can always obtain a value $\text{cpu}_{C}$ of type $C^{A}$ for any type $A$, in particular for $A=F^{\bbnum 1}$ (whether or not a value of type $F^{\bbnum 1}$ can be computed). -So, $C^{F^{\bullet}}$ is a pointed contrafunctor whenever $C^{\bullet}$ -is one, for any (not necessarily pointed) functor $F$. +So, $C\circ F$ is a pointed contrafunctor whenever $C$ is one, for +\emph{any} (not necessarily pointed) functor $F$. \begin{lstlisting} -def pointedCoF[C[_]: Pointed: Contrafunctor, F[_]]: Pointed[Lambda[X => C[F[X]]]] = - Pointed[Lambda[X => C[F[X]]]](cpure[C, F[Unit]]) +def pointedCoF[C[_]: Pointed: Contrafunctor, F[_]] + : Pointed[Lambda[X => C[F[X]]]] + = Pointed[Lambda[X => C[F[X]]]](cpure[C, F[Unit]]) \end{lstlisting} Creating a value of type $F^{C^{\bbnum 1}}$ requires $F$ to have a \lstinline!pure! method that could be applied to a value of type $C^{\bbnum 1}$ to compute a value of type $F^{C^{\bbnum 1}}$. So, -$F^{C^{\bullet}}$ is pointed whenever both $C^{\bullet}$ and $F^{\bullet}$ -are pointed. +$F\circ C$ is pointed whenever both $C$ and $F$ are pointed. \begin{lstlisting} -def pointedFoC[C[_]: Pointed, F[_]: Pointed : Functor]: Pointed[Lambda[X => F[C[X]]]] = - Pointed[Lambda[X => F[C[X]]]](pure[F, C[Unit]](implicitly[Pointed[C]].wu)) +def pointedFoC[C[_]: Pointed, F[_]: Pointed : Functor] + : Pointed[Lambda[X => F[C[X]]]] + = Pointed[Lambda[X => F[C[X]]]](pure[F, C[Unit]](implicitly[Pointed[C]].wu)) \end{lstlisting} @@ -3774,26 +3761,25 @@ \subsection{Pointed contrafunctors\label{subsec:Pointed-contrafunctors}} The construction is the same as for pointed functors: If we have values of type $C^{\bbnum 1}$ and $D^{\bbnum 1}$, we can compute the pair $C^{\bbnum 1}\times D^{\bbnum 1}$. This makes the product contrafunctor -$L^{A}\triangleq C^{A}\times D^{A}$ pointed if both $C^{\bullet}$ -and $D^{\bullet}$ are pointed contrafunctors. +$L^{A}\triangleq C^{A}\times D^{A}$ pointed if both $C$ and $D$ +are pointed contrafunctors. \paragraph{Co-products} The construction is the same as for pointed functors: If at least -one of the contrafunctors $C^{\bullet}$ and $D^{\bullet}$ is pointed, -we can create a \lstinline!Pointed! instance for the co-product contrafunctor -$L^{A}\triangleq C^{A}+D^{A}$ as either $\text{wu}_{L}=\text{wu}_{C}+\bbnum 0^{:D^{\bbnum 1}}$ +one of the contrafunctors $C$ and $D$ is pointed, we can create +a \lstinline!Pointed! instance for the co-product contrafunctor $L^{A}\triangleq C^{A}+D^{A}$ +as either $\text{wu}_{L}=\text{wu}_{C}+\bbnum 0^{:D^{\bbnum 1}}$ or $\text{wu}_{L}=\bbnum 0^{:C^{\bbnum 1}}+\text{wu}_{D}$. \paragraph{Functions} The construction is $L^{A}\triangleq F^{A}\rightarrow C^{A}$, where -$C^{\bullet}$ is a contrafunctor and $F^{\bullet}$ is a functor. -To create a value $\text{wu}_{L}:L^{\bbnum 1}$ means to create a -function of type $F^{\bbnum 1}\rightarrow C^{\bbnum 1}$. That function -cannot use its argument of type $F^{\bbnum 1}$ for computing a value -$C^{\bbnum 1}$ since $F$ is an arbitrary functor. So, $\text{wu}_{L}$ -must be a constant function $(\_^{:F^{\bbnum 1}}\rightarrow\text{wu}_{C})$, +$C$ is a contrafunctor and $F$ is a functor. To create a value $\text{wu}_{L}:L^{\bbnum 1}$ +means to create a function of type $F^{\bbnum 1}\rightarrow C^{\bbnum 1}$. +That function cannot use its argument of type $F^{\bbnum 1}$ for +computing a value $C^{\bbnum 1}$ since $F$ is an arbitrary functor. +So, $\text{wu}_{L}$ must be a constant function $(\_^{:F^{\bbnum 1}}\rightarrow\text{wu}_{C})$, where we assumed that a value $\text{wu}_{C}:C^{\bbnum 1}$ is available. Thus, $F^{A}\rightarrow C^{A}$ is pointed when $C$ is a pointed contrafunctor and $F$ is any functor. @@ -3834,15 +3820,15 @@ \subsection{Pointed contrafunctors\label{subsec:Pointed-contrafunctors}} \hline {\footnotesize{}Constant functor; $Z$ is a fixed type} & {\footnotesize{}value of type $Z$}\tabularnewline \hline -{\footnotesize{}Composition of functors/contrafunctors} & {\footnotesize{}$\text{Pointed}^{F^{\bullet}}\times\text{Pointed}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{G^{\bullet}}}$}\tabularnewline +{\footnotesize{}Composition of functors/contrafunctors} & {\footnotesize{}$\text{Pointed}^{F}\times\text{Pointed}^{G}\rightarrow\text{Pointed}^{F\circ G}$}\tabularnewline \hline -{\footnotesize{}Product of contrafunctors $F$ and $G$} & {\footnotesize{}$\text{Pointed}^{F^{\bullet}}\times\text{Pointed}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}\times G^{\bullet}}$}\tabularnewline +{\footnotesize{}Product of contrafunctors $F$ and $G$} & {\footnotesize{}$\text{Pointed}^{F}\times\text{Pointed}^{G}\rightarrow\text{Pointed}^{F\times G}$}\tabularnewline \hline -{\footnotesize{}Co-product of a pointed $F$ and any $G$} & {\footnotesize{}$\text{Pointed}^{F^{\bullet}}\times\text{Contrafunctor}^{G^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}+G^{\bullet}}$}\tabularnewline +{\footnotesize{}Co-product of a pointed $F$ and any $G$} & {\footnotesize{}$\text{Pointed}^{F}\times\text{Contrafunctor}^{G}\rightarrow\text{Pointed}^{F+G}$}\tabularnewline \hline -{\footnotesize{}Function type, $F^{A}\rightarrow C^{A}$} & {\footnotesize{}$\text{Pointed}^{C^{\bullet}}\times\text{Functor}^{F^{\bullet}}\rightarrow\text{Pointed}^{F^{\bullet}\rightarrow C^{\bullet}}$}\tabularnewline +{\footnotesize{}Function type, $F^{A}\rightarrow C^{A}$} & {\footnotesize{}$\text{Pointed}^{C}\times\text{Functor}^{F}\rightarrow\text{Pointed}^{F\rightarrow C}$}\tabularnewline \hline -{\footnotesize{}Recursive type} & {\footnotesize{}$\text{Pointed}^{C^{\bullet}}\rightarrow\text{Pointed}^{S^{\bullet,C^{\bullet}}}$, +{\footnotesize{}Recursive type} & {\footnotesize{}$\text{Pointed}^{C}\rightarrow\text{Pointed}^{S^{\bullet,C}}$, where $C^{A}\triangleq S^{A,C^{A}}$}\tabularnewline \hline \end{tabular} @@ -3873,13 +3859,13 @@ \section{Summary} We may want to write code such as: \begin{lstlisting} type F[A] = (A => Int) => A // Define a type constructor. -implicit val functorF: Functor[F] = implement // Automatically implement typeclass instance for F. +implicit val functorF: Functor[F] = implement // Automatically implement a typeclass instance for F. implicit val pointedF: Pointed[F] = implement // Automatically use the function-type construction. \end{lstlisting} However, no currently available library provides such functionality. Also, typeclass instances are not always derived uniquely, as we have -seen in several cases (e.g., the co-product construction of monoids -or pointed functors). +seen in several cases (e.g., the co-product constructions for monoids +and for pointed functors). We will discuss how to combine typeclasses in Section~\ref{subsec:Inheritance-and-automatic-typeclass} below. @@ -3961,7 +3947,7 @@ \subsubsection{Example \label{subsec:tc-Example-1}\ref{subsec:tc-Example-1}} An implementation via a \lstinline!trait! requires longer code but brings no significant advantages: \begin{lstlisting} -trait HasBitsize[T] { def size: Int } // Declare the trait as `sealed` to prohibit further instances. +sealed trait HasBitsize[T] { def size: Int } // Declare the trait as `sealed` to prohibit further instances. object HasBitsize { implicit val bitsizeShort = new HasBitsize[Short]{ def size: Int = 16 } implicit val bitsizeInt = new HasBitsize[Int] { def size: Int = 32 } @@ -4042,7 +4028,7 @@ \subsubsection{Example \label{subsec:tc-Example-4}\ref{subsec:tc-Example-4} (a Each route may respond to one or more URL paths by evaluating a custom function. The task is to implement a \lstinline!combine! operation for routes. The combined route should respond to all paths that at -least one of the previous routes responds to: +least one of the routes responds to: \begin{lstlisting} // The types `Path` and `Response` are defined only as an illustration. The code uses those types as type parameters. type Path = String @@ -4094,7 +4080,7 @@ \subsubsection{Example \label{subsec:tc-Example-4}\ref{subsec:tc-Example-4} (a via a \lstinline!trait! with methods \lstinline!empty! and \lstinline!combine!. We can define that typeclass using our existing code: \begin{lstlisting} -import $ivy.`org.typelevel::cats-core:1.5.0`, cats.Monoid // Using `ammonite` for convenience. +import $ivy.`org.typelevel::cats-core:1.5.0`, cats.Monoid // Using `ammonite`. implicit val catsMonoidRoute: Monoid[Route] = new Monoid[Route] { def empty: Route = emptyRoute def combine(x: Route, y: Route): Route = combineRoutes(x, y) @@ -4243,8 +4229,8 @@ \subsubsection{Example \label{subsec:tc-Example-6}\ref{subsec:tc-Example-6}} \subsubsection{Example \label{subsec:tc-Example-7}\ref{subsec:tc-Example-7}} -Define a \lstinline!Functor! instance for recursive type constructor -$Q^{A}\triangleq\left(\text{Int}\rightarrow A\right)+\text{Int}+Q^{A}$. +Define a \lstinline!Functor! instance for the recursive type constructor +$Q$ defined by $Q^{A}\triangleq\left(\text{Int}\rightarrow A\right)+\text{Int}+Q^{A}$. \subparagraph{Solution} @@ -4292,11 +4278,11 @@ \subsubsection{Example \label{subsec:tc-Example-8}\ref{subsec:tc-Example-8}} } } \end{lstlisting} -We will now to rewrite this code by making \lstinline!F! and \lstinline!G! -into type parameters. To achieve that, we need to use the \textsf{``}kind -projector\index{kind@\textsf{``}kind projector\textsf{''} plugin}\textsf{''} plugin and replace -the type constructor \lstinline!L! by a nameless type function \lstinline!Lambda[X => Either[F[X], G[X]]]!. -The code becomes: +We now need to rewrite this code by making \lstinline!F! and \lstinline!G! +into type parameters. For that, we need to use the \textsf{``}kind projector\index{kind@\textsf{``}kind projector\textsf{''} plugin}\textsf{''} +plugin and replace the type constructor \lstinline!L! by a nameless +type function \lstinline!Lambda[X => Either[F[X], G[X]]]!. The code +becomes: \begin{lstlisting} implicit def functorEither[F[_], G[_]] = new Functor[Lambda[X => Either[F[X], G[X]]]] { type L[A] = Either[F[A], G[A]] // We may use F and G to define a type alias in this scope. @@ -4324,14 +4310,14 @@ \subsubsection{Example \label{subsec:tc-Example-10}\ref{subsec:tc-Example-10}} \subparagraph{Solution} -\textbf{(a)} We need to implement a function with type signature +\textbf{(a)} We need to implement a function with type signature: \[ \forall(A,B).\,C^{A}+C^{B}\rightarrow C^{A\times B}\quad. \] -Begin by looking at the types involved. We need to relate values $C^{A\times B}$, -$C^{A}$, and $C^{B}$; can we relate $A\times B$, $A$, and $B$? -There exist unique fully parametric functions $\pi_{1}$ and $\pi_{2}$ -of types $A\times B\rightarrow A$ and $A\times B\rightarrow B$. +Begin by looking at the types involved. We need to relate values of +types $C^{A\times B}$, $C^{A}$, and $C^{B}$; can we relate $A\times B$, +$A$, and $B$? There exist unique fully parametric functions $\pi_{1}$ +and $\pi_{2}$ of types $A\times B\rightarrow A$ and $A\times B\rightarrow B$. If we lift these functions to the contrafunctor $C$, we will get $\pi_{1}^{\downarrow C}:C^{A}\rightarrow C^{A\times B}$ and $\pi_{2}^{\downarrow C}:C^{B}\rightarrow C^{A\times B}$. The required type signature is then implemented via a \lstinline!match! @@ -4395,7 +4381,7 @@ \subsubsection{Example \label{subsec:tc-Example-10}\ref{subsec:tc-Example-10}} (p.map { case (a, b) => a }, p.map { case (a, b) => b }) // Shorter code: (p.map(_._1), p.map(_._2)) \end{lstlisting} -A shorter code for $f$ via the \textsf{``}diagonal\textsf{''} function $\Delta\triangleq(q^{:Q}\rightarrow q\times q)$ +A shorter formula for $f$ via the \textsf{``}diagonal\textsf{''} function $\Delta\triangleq(q^{:Q}\rightarrow q\times q)$ and the pair product $\boxtimes$ is: \[ f^{:F^{A\times B}\rightarrow F^{A}\times F^{B}}\triangleq\Delta\bef(\pi_{1}^{\uparrow F}\boxtimes\pi_{2}^{\uparrow F})\quad. @@ -4430,15 +4416,15 @@ \subsubsection{Example \label{subsec:tc-Example-10}\ref{subsec:tc-Example-10}} and we are required to produce a value of type $(P\rightarrow A\times B)+(Q\rightarrow A\times B)$. The resulting type signature is: \[ -(P\rightarrow A)\times(Q\rightarrow B)\rightarrow(P\rightarrow A\times B)+(Q\rightarrow A\times B) +(P\rightarrow A)\times(Q\rightarrow B)\rightarrow(P\rightarrow A\times B)+(Q\rightarrow A\times B)\quad. \] -and cannot be implemented by fully parametric code. Indeed, to return -a value of type $P\rightarrow A\times B$, we would need to produce -a pair of type $A\times B$ from a value of type $P$. But the given -arguments have types $P\rightarrow A$ and $Q\rightarrow B$, so producing -a pair $A\times B$ requires us to have also a value of type $Q$, -which we do not have. Similarly, we cannot return a value of type -$Q\rightarrow A\times B$. +This type cannot be implemented by fully parametric code. Indeed, +to return a value of type $P\rightarrow A\times B$, we would need +to produce a pair of type $A\times B$ from a value of type $P$. +But the given arguments have types $P\rightarrow A$ and $Q\rightarrow B$, +so producing a pair $A\times B$ requires us to have also a value +of type $Q$, which we do not have. Similarly, we cannot return a +value of type $Q\rightarrow A\times B$. We find that the function $g$ cannot be implemented for the functor $F^{A}\triangleq(P\rightarrow A)+(Q\rightarrow A)$. Functors $F$ @@ -4450,7 +4436,7 @@ \subsubsection{Example \label{subsec:tc-Example-10}\ref{subsec:tc-Example-10}} \subsubsection{Example \label{subsec:tc-Example-pointed-alternative}\ref{subsec:tc-Example-pointed-alternative}} -(R.~O\textsf{'}Connor\index{Russell O\textsf{'}Connor} \footnote{See \texttt{\href{https://mail.haskell.org/pipermail/haskell-cafe/2015-November/122357.html}{https://mail.haskell.org/pipermail/haskell-cafe/2015-November/122357.html}}}) +(R.~O\textsf{'}Connor\index{Russell O\textsf{'}Connor}\footnote{See \texttt{\href{https://mail.haskell.org/pipermail/haskell-cafe/2015-November/122357.html}{https://mail.haskell.org/pipermail/haskell-cafe/2015-November/122357.html}}}) Assume that a functor $F$ admits a function $p$ with type signature: \begin{lstlisting} def p[A, B, F[_]: Functor]: Either[A, F[B]] => F[Either[A, B]] @@ -4458,7 +4444,7 @@ \subsubsection{Example \label{subsec:tc-Example-pointed-alternative}\ref{subsec: \[ p^{A,B}:A+F^{B}\rightarrow F^{A+B}\quad, \] -additionally satisfying the special laws of identity and associativity: + satisfying the following special laws of identity and associativity: \[ p^{\bbnum 0,B}=(b^{:B}\rightarrow\bbnum 0+b)^{\uparrow F}\quad,\quad\quad p^{A+B,C}=\,\begin{array}{|c||cc|} & A & F^{B+C}\\ @@ -4489,8 +4475,7 @@ \subsubsection{Example \label{subsec:tc-Example-pointed-alternative}\ref{subsec: Conversely, assuming that $F$ is pointed, we use its $\text{pu}_{F}$ function to define $p$ as: \begin{lstlisting} -def p[F[_]: Functor : Pointed, A, B] - : Either[A, F[B]] => F[Either[A, B]] = { +def p[F[_]: Functor : Pointed, A, B]: Either[A, F[B]] => F[Either[A, B]] = { case Left(a) => pure[F, Either[A, B]](Left(a)) case Right(fb) => fb.map { b => Right[A, B](b) } } @@ -4516,11 +4501,11 @@ \subsubsection{Example \label{subsec:tc-Example-pointed-alternative}\ref{subsec: To verify the associativity law, we begin with its right-hand side since it is more complicated: \begin{align*} - & \begin{array}{|c||cc|} +\begin{array}{|c||cc|} & A & F^{B+C}\\ \hline A & \text{id} & \bbnum 0\\ B+F^{C} & \bbnum 0 & p^{B,C} -\end{array}\,\bef p^{A,B+C}=\,\begin{array}{|c||cc|} +\end{array}\,\bef p^{A,B+C} & =\,\begin{array}{|c||cc|} & A & F^{B+C}\\ \hline A & \text{id} & \bbnum 0\\ B+F^{C} & \bbnum 0 & p^{B,C} @@ -4528,15 +4513,13 @@ \subsubsection{Example \label{subsec:tc-Example-pointed-alternative}\ref{subsec: & F^{A+B+C}\\ \hline A & (a^{:A}\rightarrow a+\bbnum 0^{:B+C})\bef\text{pu}_{F}\\ F^{B+C} & (x^{:B+C}\rightarrow\bbnum 0^{:A}+x)^{\uparrow F} -\end{array} -\end{align*} -\begin{align*} - & =\,\begin{array}{|c||c|} +\end{array}\\ + & =\,\,\begin{array}{|c||c|} & F^{A+B+C}\\ \hline A & (a^{:A}\rightarrow a+\bbnum 0^{:B+C})\bef\text{pu}_{F}\\ B+F^{C} & p^{B,C}\bef(x^{:B+C}\rightarrow\bbnum 0^{:A}+x)^{\uparrow F} \end{array}\\ - & =\,\,\begin{array}{|c||c|} + & =\,\begin{array}{|c||c|} & F^{A+B+C}\\ \hline A & (a^{:A}\rightarrow a+\bbnum 0^{:B+C})\bef\text{pu}_{F}\\ B & (b^{:B}\rightarrow b+\bbnum 0^{:C})\bef\text{pu}_{F}\bef(x^{:B+C}\rightarrow\bbnum 0^{:A}+x)^{\uparrow F}\\ @@ -4545,8 +4528,8 @@ \subsubsection{Example \label{subsec:tc-Example-pointed-alternative}\ref{subsec: \end{align*} In the last line, we have expanded the type matrix to three rows corresponding to the disjunctive type $A+B+F^{C}$. We need to show that the last -matrix equals $p^{A+B,C}$; so let us rewrite $p^{A+B,C}$ as a similarly -expanded type matrix, using the type isomorphisms such as $\bbnum 0^{:A}+\bbnum 0^{:B}\cong\bbnum 0^{:A+B}$: +matrix equals $p^{A+B,C}$. So, let us rewrite $p^{A+B,C}$ as an +expanded type matrix with help of the type isomorphisms such as $\bbnum 0^{:A}+\bbnum 0^{:B}\cong\bbnum 0^{:A+B}$: \[ p^{A+B,C}=\,\begin{array}{|c||c|} & F^{A+B+C}\\ @@ -4618,7 +4601,7 @@ \subsubsection{Exercise \label{subsec:Exercise-1-monads-1}\ref{subsec:Exercise-1 \subsubsection{Exercise \label{subsec:tc-Exercise-5}\ref{subsec:tc-Exercise-5}} Using the \texttt{cats} library, implement a \lstinline!Functor! -instance for \lstinline!type F[T] = Try[Seq[T]]!. +instance for the type constructor \lstinline!F! defined by \lstinline!F[T] = Try[Seq[T]]!. \subsubsection{Exercise \label{subsec:tc-Exercise-6}\ref{subsec:tc-Exercise-6}} @@ -4640,9 +4623,9 @@ \subsubsection{Exercise \label{subsec:tc-Exercise-8}\ref{subsec:tc-Exercise-8}} \subsubsection{Exercise \label{subsec:tc-Exercise-8-1}\ref{subsec:tc-Exercise-8-1}} -Show explicitly that a value $\text{wu}_{C}:C^{\bbnum 1}$ is computationally -equivalent to a value $\text{pu}_{C}:\forall A.\,C^{A}$ that satisfies -the naturality law~(\ref{eq:naturality-law-for-pure-for-contrafunctors}). +Show explicitly that a value $\text{wu}_{C}:C^{\bbnum 1}$ is equivalent +to a value $\text{pu}_{C}:\forall A.\,C^{A}$ that satisfies the naturality +law~(\ref{eq:naturality-law-for-pure-for-contrafunctors}). \subsubsection{Exercise \label{subsec:tc-Exercise-9}\ref{subsec:tc-Exercise-9}} @@ -4679,7 +4662,8 @@ \subsubsection{Exercise \label{subsec:tc-Exercise-9-1-1}\ref{subsec:tc-Exercise- parameterized by a functor $F$ and arbitrary types $A$, $B$. Show that the inverse type signature, $(A\rightarrow F^{B})\rightarrow F^{A\rightarrow B}$, cannot be implemented for some functors $F$. (Functors admitting -a function with that type signature are called \textsf{``}rigid\textsf{''}; see Section~\ref{subsec:Rigid-functors}.) +a function with that type signature are called \textsf{``}rigid\index{rigid functors}\textsf{''} +in this book; see Section~\ref{subsec:Rigid-functors}.) \subsubsection{Exercise \label{subsec:tc-Exercise-9-1-1-1}\ref{subsec:tc-Exercise-9-1-1-1}} @@ -4731,24 +4715,24 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t in a program? In all examples seen so far, the recursive type equations\index{recursive type equation} -had the form $T\triangleq S^{T}$ where the type constructor $S$ -is a \emph{functor}. Type equations with non-functor $S$ (e.g., the +had the form $T\triangleq S^{T}$ where the recursion scheme $S$ +is a \emph{functor}. Non-covariant recursion schemes (e.g., in the equation $T\triangleq T\rightarrow\text{Int}$) do not seem to be useful in practice, and we will not consider them in this book. In a rigorous approach, showing that $T$ is a \textsf{``}solution\textsf{''} (called a \textbf{fixpoint}\index{fixpoint type}) of the type equation $T\triangleq S^{T}$ means proving that the types $T$ and $S^{T}$ are equivalent (isomorphic). -We must implement this type isomorphism as two functions, named e.g., +We must implement this type isomorphism as two functions, often named \lstinline!fix! and \lstinline!unfix!, satisfying the conditions: \[ \text{fix}:S^{T}\rightarrow T\quad,\quad\quad\text{unfix}:T\rightarrow S^{T}\quad,\quad\quad\text{fix}\bef\text{unfix}=\text{id}\quad,\quad\quad\text{unfix}\bef\text{fix}=\text{id}\quad. \] -Given a type constructor $S$, we can define the recursive type $T$ -with this Scala code: +Given a recursion scheme $S$, we can define the recursive type $T$ +with this code: \begin{lstlisting} -final case class T(s: S[T]) // Type constructor S[_] must be already defined. +final case class T(s: S[T]) // Type constructor S must be already defined. def fix: S[T] => T = { s => T(s) } def unfix: T => S[T] = { t => t.s } \end{lstlisting} @@ -4787,7 +4771,8 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t instead of \lstinline!val! does not help: the evaluation of the code for \lstinline!x! will not terminate. We conclude that this recursive definition is invalid: one cannot create values of the resulting type -\lstinline!T!. The type \lstinline!T! is void.\index{void type}\index{infinite loop in type recursion} +\lstinline!T!. The type \lstinline!T! (as defined by this code) +is void.\index{void type}\index{infinite loop in type recursion} Next, consider a product functor such as $S^{A}\triangleq A\times A\times\text{Int}$. Can we create values of type $T\triangleq S^{T}$? @@ -4801,8 +4786,8 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t can create a value of type \lstinline!T!. That requirement is impossible to satisfy. -For some disjunctive type constructors $S^{\bullet}$, values of type -$\text{Fix}^{S}$ are created with no difficulty. One example is $S^{A}\triangleq\text{Int}+A\times A$: +For some disjunctive recursion schemes $S$, values of type $\text{Fix}^{S}$ +are created with no difficulty. One example is $S^{A}\triangleq\text{Int}+A\times A$: \begin{lstlisting} final case class T(s: Either[Int, (T, T)]) val x: T = T(Left(123)) // OK @@ -4812,8 +4797,8 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t without need for any previous values of $T$. We can then use \lstinline!x! to create a value \lstinline!y! of type $T$. This resembles defining a value by induction: the base case is the type $\text{Int}+\bbnum 0^{:T\times T}$, -which is a disjunctive part of $S^{A}$ that does \emph{not} contain -any values of type $A$. The inductive step is the type $\bbnum 0^{:\text{Int}}+T\times T$, +which is a disjunctive part of the type $S^{A}$ that does \emph{not} +contain any values of type $A$. The inductive step is the type $\bbnum 0^{:\text{Int}}+T\times T$, which creates a new value of type $T$ from two previous values. Type recursion terminates when the base case exists. @@ -4821,10 +4806,10 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t do not have a base case where a value of type $S^{A}$ could be computed without need for any previous values of type $A$. -Given a functor $S^{A}$, how can we determine whether the type recursion -$T\triangleq S^{T}$ terminates? If $S^{A}$ is a \emph{polynomial} -functor, we can view $S^{A}$ as a polynomial function of $A$ and -reduce it to the form: +Given a \index{recursion scheme}recursion scheme $S$, how can we +determine whether the type recursion $T\triangleq S^{T}$ terminates? +If $S$ is a \emph{polynomial} functor, we can view $S^{A}$ as a +polynomial function of $A$ and reduce it to the form: \begin{equation} S^{A}\cong C_{0}+C_{1}\times A+C_{2}\times A\times A+...\label{eq:functor-polynomial-normal-form} \end{equation} @@ -4876,8 +4861,8 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t applied, returns a new value of the same type $T$. The new value of type $T$ does not need to be computed in advance: its evaluation is \emph{delayed} until some code decides to call \lstinline!next!. -For this reason, an infinite loop is avoided even though the structure -functor $S$ has no \textsf{``}base case\textsf{''}. Values of type $T$ can be viewed +For this reason, an infinite loop is avoided even though the recursion +scheme $S$ has no \textsf{``}base case\textsf{''}. Values of type $T$ can be viewed as an infinite stream of \lstinline!String! values computed on demand. An \lstinline!Int! value is required in order to produce the next element of the stream. A symbolic (and non-rigorous) representation @@ -4888,12 +4873,12 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t As another example, consider $S^{A}\triangleq\bbnum 1\rightarrow\text{String}+\text{Int}\times A$. Using the type equivalence $P\cong(\bbnum 1\rightarrow P)$, we could -transform $S^{A}$ into an equivalent functor $\tilde{S}^{A}$ in -the polynomial form~(\ref{eq:functor-polynomial-normal-form}): +transform the type expression $S^{A}$ into an equivalent type $\tilde{S}^{A}$ +in the polynomial form~(\ref{eq:functor-polynomial-normal-form}): \[ \tilde{S}^{A}\triangleq\text{String}+\text{Int}\times A\quad. \] -Although $S^{A}$ and $\tilde{S}^{A}$ are equivalent, the recursive +Although $S^{A}$ and $\tilde{S}^{A}$ are equivalent types, the recursive types $\text{Fix}^{S}$ and $\text{Fix}^{\tilde{S}}$ are different. We have: \begin{align*} @@ -4907,13 +4892,13 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t \begin{lstlisting}[mathescape=true] final case class T(e: () => Either[String, (Int, T)]) -val t1: T = T(() => Right((1, t1))) // Stream [1, 1, 1, ...]. -def t2(n: Int): T = T(() => Right((n, t2(n+1)))) // Stream [n, n+1, n+2, ...]. +val t1: T = T(() => Right((1, t1))) // [1, 1, 1, ...] +def t2(n: Int): T = T(() => Right((n, t2(n+1)))) // [n, n+1, n+2, ...] \end{lstlisting} -The type $T\triangleq\text{Fix}^{S}$ also admits bounded streams -defined without recursion, for example: +The type $T\triangleq\text{Fix}^{S}$ also admits bounded streams, +for example: \begin{lstlisting} -val t0 = T(() => Right((10, T(() => Left("stop"))))) // Stream [10, "stop"]. +val t0 = T(() => Right((10, T(() => Left("stop"))))) // [10, "stop"] \end{lstlisting} We can recognize that $\text{Fix}^{S}$ has some non-recursive values by checking that the type $S^{\bbnum 0}$ is not void: @@ -4929,14 +4914,13 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t implement a value of type $C^{T}\rightarrow P^{T}$? We can use recursion to implement a value of $T$, as long as we can then somehow produce a value of type $P^{T}$ out of a value of type $T$. This is precisely -the condition for $P^{\bullet}$ to be a pointed functor. The contrafunctor -$C^{A}$ is an argument of the function of type $C^{A}\rightarrow P^{A}$, +the condition for $P$ to be a pointed functor. The type $C^{A}$ +is an argument of the function of type $C^{A}\rightarrow P^{A}$, so we are not required to produce values of type $C^{A}$\textemdash{} we \emph{consume} those values. It follows that we can implement a value of type $C^{T}\rightarrow P^{T}$ (with $T=\text{Fix}^{S}$) -as long as $P^{\bullet}$ is a pointed functor. As we saw in Section~\ref{subsec:Pointed-functors-motivation-equivalence}, -a functor $P^{\bullet}$ is pointed if we can compute a value of type -$P^{\bbnum 1}$. +as long as $P$ is a pointed functor. As we saw in Section~\ref{subsec:Pointed-functors-motivation-equivalence}, +a functor $P$ is pointed if we can compute a value of type $P^{\bbnum 1}$. In the example $S^{A}\triangleq\text{String}\times(\text{Int}\rightarrow A)$, the functor $S^{A}$ is pointed since: @@ -4945,14 +4929,14 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t \] This consideration applies to any sub-expression of the form $C^{A}\rightarrow P^{A}$ -within the type constructor $S^{A}$. The condition for values of -$\text{Fix}^{S}$ to exist is that every functor $P^{A}$ involved -in such sub-expressions should be pointed. To check that, we can set -the type parameter $A=\bbnum 1$ in all return types of \emph{functions} -within $S^{A}$. If the resulting type is not void, we will be able -to implement a recursively defined value of type $\text{Fix}^{S}$. - -If the functor $S^{A}$ has the property $S^{\bbnum 0}\not\cong\bbnum 0$ +within the type $S^{A}$. The condition for values of $\text{Fix}^{S}$ +to exist is that every functor $P$ involved in such sub-expressions +should be pointed. To check that, we can set the type parameter $A=\bbnum 1$ +in all return types of \emph{functions} within the type $S^{A}$. +If the resulting type is not void, we will be able to implement a +recursively defined value of type $\text{Fix}^{S}$. + +If the functor $S$ has the property $S^{\bbnum 0}\not\cong\bbnum 0$ (i.e., we have a base case for the inductive definition of $\text{Fix}^{S}$), we will also be able to implement non-recursive values of type $\text{Fix}^{S}$. @@ -4966,8 +4950,8 @@ \subsection{The existence of values for recursive types\label{subsec:Recursive-t \subsection{Proofs of associativity of \texttt{concat} for lists and arrays\label{subsec:Proofs-for-associativity-law-lists-and-arrays-concat}} -The \lstinline!concat! function is defined for both lists and arrays, -and works similarly for these data types: +The \lstinline!concat! function is defined for both lists and arrays +and works similarly for those data types: \begin{lstlisting} scala> Array.concat(Array(1, 2), Array(3, 4), Array(5, 6)) res0: Array[Int] = Array(1, 2, 3, 4, 5, 6) @@ -4975,8 +4959,8 @@ \subsection{Proofs of associativity of \texttt{concat} for lists and arrays\labe scala> List.concat(List(1, 2), List(3, 4), List(5, 6)) res1: List[Int] = List(1, 2, 3, 4, 5, 6) \end{lstlisting} -In this section, we will show rigorously that concatenation is an -associative operation. +In this section, we will prove that concatenation is an associative +operation. In Scala, \lstinline!Array[A]! is a sequence whose elements can be accessed by index. The array access function (the \lstinline!apply! @@ -4989,7 +4973,7 @@ \subsection{Proofs of associativity of \texttt{concat} for lists and arrays\labe scala> x(2) // The syntax `x(2)` is the same as `x.apply(2)`. res2: String = c -scala> x(3) // Applying the partial function `x.apply` to the value 3 will fail: +scala> x(3) // Applying the partial function `x.apply` to the value 3 fails: java.lang.ArrayIndexOutOfBoundsException: 3 \end{lstlisting} We can denote the type of this function by $\text{Int}_{[0,n-1]}\rightarrow A$ @@ -5009,14 +4993,14 @@ \subsection{Proofs of associativity of \texttt{concat} for lists and arrays\labe \subsubsection{Statement \label{subsec:Statement-concat-array-associativity}\ref{subsec:Statement-concat-array-associativity}} For arrays, $\text{Array}_{n}^{A}\triangleq\text{Int}_{[0,n-1]}\rightarrow A$, -the \lstinline!concat! function (denoted $\pplus$) defined by: +the \lstinline!concat! function (denoted $\pplus$) is defined by: \[ a_{1}^{:\text{Array}_{n_{1}}^{A}}\pplus a_{2}^{:\text{Array}_{n_{2}}^{A}}\triangleq i^{:\text{Int}_{[0,n_{1}+n_{2}-1]}}\rightarrow\begin{cases} 0\leq i Array[A] = -{ case List() => Array() - case x :: s => Array(x) ++ f2.apply(s) +def f2[A: ClassTag]: List[A] => Array[A] = { + case List() => Array() + case x :: s => Array(x) ++ f2.apply(s) } \end{lstlisting} \begin{align*} @@ -5135,10 +5129,10 @@ \subsubsection{Statement \label{subsec:Statement-array-list-equivalence}\ref{sub & =\bbnum 0+x\times\overline{f_{1}}(\gunderline{i\rightarrow\overline{f_{2}}(s)(i+1-1)})=\bbnum 0+x\times\gunderline{\overline{f_{1}}(\overline{f_{2}}}(s))=\bbnum 0+x\times s\quad. \end{align*} This concludes the proof of the isomorphism between \lstinline!Array! -and \lstinline!List!. +and \lstinline!List!. $\square$ -Since arrays and lists are isomorphic as types, the concatenation -for lists is associative as long as we show that the concatenation +Since arrays and lists are isomorphic as types, it will follow that +the concatenation for lists is associative if we show that the concatenation operation for lists is isomorphic to that we defined for arrays. \subsubsection{Statement \label{subsec:Statement-concat-array-as-list}\ref{subsec:Statement-concat-array-as-list}} @@ -5146,8 +5140,7 @@ \subsubsection{Statement \label{subsec:Statement-concat-array-as-list}\ref{subse The \lstinline!concat! function for lists is defined recursively as: \begin{lstlisting} -def concat[A](p: List[A], q: List[A]) - : List[A] = p match { +def concat[A](p: List[A], q: List[A]): List[A] = p match { case List() => q case a :: t => a :: concat(t, q) } @@ -5341,7 +5334,7 @@ \subsection{Typeclasses with several type parameters (type relations)\label{subs scala> convertNumber(123) res0: Double = 123.0 -scala> convertNumber(123:Short) +scala> convertNumber(123: Short) res1: Float = 123.0 scala> convertNumber("abc") @@ -5382,8 +5375,8 @@ \subsection{Typeclasses with several type parameters (type relations)\label{subs values of type \lstinline!Quantity[U]! are defined without using \lstinline!U!. The parameter \lstinline!U! would not be phantom if \lstinline!Quantity[U]! were not a constant functor\index{constant functor!example of use} -but instead contained values, e.g., of type \lstinline!U! or of type -\lstinline!U => Double! or of some other type that uses \lstinline!U!. +but instead contained values of type \lstinline!U! or of type \lstinline!U => Double! +or of some other type that uses \lstinline!U!. The function \lstinline!add[U1, U2](x, y)! must impose a type relation constraint on \lstinline!U1! and \lstinline!U2!, so that \lstinline!x! @@ -5613,7 +5606,7 @@ \subsection{$P$-typeclasses, their laws and structure\label{subsec:P-typeclasses \begin{centering} \begin{tabular}{|c|c|c|} \hline -\textbf{\small{}Typeclass} & \textbf{\small{}Type of instance values} & \textbf{\small{}Inductive form}\tabularnewline +\textbf{\small{}Typeclass} & \textbf{\small{}Type of instance values} & \textbf{\small{}Symbolic form}\tabularnewline \hline \hline {\small{}pointed type} & {\small{}$\bbnum 1\rightarrow A$} & {\small{}$P^{A}\rightarrow A$}\tabularnewline @@ -5671,18 +5664,17 @@ \subsection{$P$-typeclasses, their laws and structure\label{subsec:P-typeclasses Typeclasses for type constructors (\lstinline!Functor!, \lstinline!Pointed!, etc.) may be also described in a similar way. Instead of a structure functor $P^{A}$, we need to use a higher-order type function denoted -by $P^{A,B,F^{\bullet}}$ in Table~\ref{tab:Types-of-typeclass-instance-values}. -The type $P^{A,B,F^{\bullet}}$ is parameterized by a type constructor -$F^{\bullet}$ as well as by extra type parameters $A$ and $B$, -so $P$ has kind $*\times*\times(*\rightarrow*)\rightarrow*$. The -evidence values are expressed as functions with several type parameters, -such as $\forall(A,B).\,P^{A,B,F}\rightarrow F^{B}$. This type signature -is of the form $P^{F}\rightarrow F$ except for the additional type -parameters. The laws of type constructor typeclasses are also more -complicated because they often involve multiple type parameters and -arbitrary functions. To simplify the presentation in this section, -we will only consider typeclasses for simple types, such as \lstinline!Semigroup! -or \lstinline!Monoid!. +by $P^{A,B,F}$ in Table~\ref{tab:Types-of-typeclass-instance-values}. +The type $P^{A,B,F}$ is parameterized by a type constructor $F$ +as well as by extra type parameters $A$ and $B$, so $P$ has kind +$*\times*\times(*\rightarrow*)\rightarrow*$. The evidence values +are expressed as functions with several type parameters, such as $\forall(A,B).\,P^{A,B,F}\rightarrow F^{B}$. +This type signature is of the form $P^{F}\rightarrow F$ except for +the additional type parameters. The laws of type constructor typeclasses +are also more complicated because they often involve multiple type +parameters and arbitrary functions. To simplify the presentation in +this section, we will only consider typeclasses for simple types, +such as \lstinline!Semigroup! or \lstinline!Monoid!. Implementing the \lstinline!Monoid! typeclass via a structure functor $P$ and a function $P^{A}\rightarrow A$ is inconvenient for practical @@ -5710,7 +5702,7 @@ \subsection{$P$-typeclasses, their laws and structure\label{subsec:P-typeclasses This argument does \emph{not} prove that the typeclass laws will also hold for $A\times B$. We will prove that in Chapter~\ref{chap:Free-type-constructions} -that develops some advanced techniques for reasoning about $P$-typeclass +after developing some advanced techniques for reasoning about $P$-typeclass laws. For now, we note that the co-product construction \emph{cannot} be @@ -5739,9 +5731,9 @@ \subsection{$P$-typeclasses, their laws and structure\label{subsec:P-typeclasses \paragraph{Recursive types} Consider a recursive type $T$ defined by a type equation $T\triangleq S^{T}$, -where the functor $S^{\bullet}$ \textsf{``}preserves\textsf{''} $P$-typeclass instances: -if $A$ has an instance then $S^{A}$ also does, as we saw in all -our examples in this chapter. In other words, we have a function $\text{tcS}:(P^{A}\rightarrow A)\rightarrow P^{S^{A}}\rightarrow S^{A}$ +where the functor $S$ \textsf{``}preserves\textsf{''} $P$-typeclass instances: if +$A$ has an instance then $S^{A}$ also does, as we saw in all our +examples in this chapter. In other words, we have a function $\text{tcS}:(P^{A}\rightarrow A)\rightarrow P^{S^{A}}\rightarrow S^{A}$ that creates evidence values of type $P^{S^{A}}\rightarrow S^{A}$ out of evidence values of type $P^{A}\rightarrow A$. Then we define an evidence value \lstinline!tcT! for $T$ as: @@ -5764,7 +5756,7 @@ \subsection{$P$-typeclasses, their laws and structure\label{subsec:P-typeclasses \begin{lstlisting}[mathescape=true] type S[A] = ... // Define a functor S as required. final case class T(s: S[T]) // Define the recursive type T as ${ \color{dkgreen}\scriptstyle{T\triangleq \,S^{T}} }$. -def tcS: TC[A] => TC[S[A]] = ... // Compute instances for S[A] from instances of A. +def tcS: TC[A] => TC[S[A]] = ... // Compute instances for S[A] given instances for A. def tcT: P[T] => T = p => T(tcS(tcT)(p.map(_.s))) // The recursive instance. \end{lstlisting} In this way, the recursive-type construction works for any $P$-typeclass. diff --git a/sofp-src/tex/sofp.tex b/sofp-src/tex/sofp.tex index 4e4988e23..31aa31553 100644 --- a/sofp-src/tex/sofp.tex +++ b/sofp-src/tex/sofp.tex @@ -320,9 +320,9 @@ {\footnotesize{}ISBN (e-book): 978-0-359-76877-6}\\ {\footnotesize{}ISBN: 978-0-359-76877-6}\\ \\ -{\scriptsize{}Source hash (sha256): cbd327cf5baeac2e9d58471a6d010dc2c542ea70d4d5c5a4c4c8ce793c5a5b43}\\ -{\scriptsize{}Git commit: 35ba0d9120711fb25927dc04d722f92824b93408}\\ -{\scriptsize{}PDF file built on Sat, 04 May 2024 21:18:10 +0200 by pdfTeX 3.141592653-2.6-1.40.25 (TeX Live 2023) on Darwin}\\ +{\scriptsize{}Source hash (sha256): ddc887c85af605b9dfc78384a9b367cca3e7ff5ac1dae8ac02fa0be8639c9f9a}\\ +{\scriptsize{}Git commit: 6567bd3d1d697221e6d321f0827dac34f79bc150}\\ +{\scriptsize{}PDF file built on Mon, 15 Jul 2024 14:50:11 +0200 by pdfTeX 3.141592653-2.6-1.40.25 (TeX Live 2023) on Darwin}\\ ~\\ {\scriptsize{}Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, @@ -354,13 +354,13 @@ laws, structural analysis, and code for functors, monads, and other typeclasses based on exponential-polynomial data types; techniques of symbolic derivation and proof; free typeclass constructions; and -parametricity theorems.}\\ +practical applications of parametricity.}\\ {\scriptsize{}}\\ {\scriptsize{}Long and difficult, yet boring explanations are logically -developed in excruciating detail through 1893 Scala +developed in excruciating detail through 1892 Scala code snippets, 191 statements with step-by-step derivations, 103 diagrams, 223 examples with tested Scala -code, and 300 exercises. Discussions build upon each +code, and 308 exercises. Discussions build upon each chapter's material further.}\\ {\scriptsize{}}\\ {\scriptsize{}Beginners in FP will find tutorials about the map/reduce diff --git a/talk_slides/fp_system_f_omega_dhall.lyx b/talk_slides/fp_system_f_omega_dhall.lyx new file mode 100644 index 000000000..2f7d2a47f --- /dev/null +++ b/talk_slides/fp_system_f_omega_dhall.lyx @@ -0,0 +1,1261 @@ +#LyX 2.3 created this file. For more info see http://www.lyx.org/ +\lyxformat 544 +\begin_document +\begin_header +\save_transient_properties true +\origin unavailable +\textclass beamer +\begin_preamble +\usetheme[secheader]{Boadilla} +\usecolortheme{seahorse} +\title[FP in System F$\omega$ using Dhall]{Functional programming in System F$\omega$ using Dhall} +\author{Sergei Winitzki} +\date{2024-05-05} +\institute[ABTB]{Academy by the Bay 2024} +\setbeamertemplate{headline}{} % disable headline at top +\setbeamertemplate{navigation symbols}{} % disable navigation bar at bottom +\usepackage[all]{xy} % xypic +%\makeatletter +% Macros to assist LyX with XYpic when using scaling. +\newcommand{\xyScaleX}[1]{% +\makeatletter +\xydef@\xymatrixcolsep@{#1} +\makeatother +} % end of \xyScaleX +\makeatletter +\newcommand{\xyScaleY}[1]{% +\makeatletter +\xydef@\xymatrixrowsep@{#1} +\makeatother +} % end of \xyScaleY + +% Double-stroked fonts to replace the non-working \mathbb{1}. +\usepackage{bbold} +\DeclareMathAlphabet{\bbnumcustom}{U}{BOONDOX-ds}{m}{n} % Use BOONDOX-ds or bbold. +\newcommand{\custombb}[1]{\bbnumcustom{#1}} +% The LyX document will define a macro \bbnum{#1} that calls \custombb{#1}. + +\usepackage{relsize} % make math symbols larger or smaller +\usepackage{stmaryrd} % some extra symbols such as \fatsemi +% Note: using \forwardcompose inside a \text{} will cause a LaTeX error! +\newcommand{\forwardcompose}{\hspace{1.5pt}\ensuremath\mathsmaller{\fatsemi}\hspace{1.5pt}} + + +% Make underline green. +\definecolor{greenunder}{rgb}{0.1,0.6,0.2} +%\newcommand{\munderline}[1]{{\color{greenunder}\underline{{\color{black}#1}}\color{black}}} +\def\mathunderline#1#2{\color{#1}\underline{{\color{black}#2}}\color{black}} +% The LyX document will define a macro \gunderline{#1} that will use \mathunderline with the color `greenunder`. +%\def\gunderline#1{\mathunderline{greenunder}{#1}} % This is now defined by LyX itself with GUI support. + +% Scala syntax highlighting. See https://tex.stackexchange.com/questions/202479/unable-to-define-scala-language-with-listings +%\usepackage[T1]{fontenc} +%\usepackage[utf8]{inputenc} +%\usepackage{beramono} +%\usepackage{listings} +% The listing settings are now supported by LyX in a separate section "Listings". +\usepackage{xcolor} + +\definecolor{scalakeyword}{rgb}{0.16,0.07,0.5} +\definecolor{dkgreen}{rgb}{0,0.6,0} +\definecolor{gray}{rgb}{0.5,0.5,0.5} +\definecolor{mauve}{rgb}{0.58,0,0.82} +\definecolor{aqua}{rgb}{0.9,0.96,0.999} +\definecolor{scalatype}{rgb}{0.2,0.3,0.2} +\usepackage[nocenter]{qtree} +\usepackage{relsize} +\renewcommand\arraystretch{1.4} +\end_preamble +\use_default_options true +\maintain_unincluded_children false +\language english +\language_package default +\inputencoding auto +\fontencoding global +\font_roman "default" "default" +\font_sans "default" "default" +\font_typewriter "default" "default" +\font_math "auto" "auto" +\font_default_family default +\use_non_tex_fonts false +\font_sc false +\font_osf false +\font_sf_scale 100 100 +\font_tt_scale 100 100 +\use_microtype false +\use_dash_ligatures true +\graphics default +\default_output_format default +\output_sync 0 +\bibtex_command default +\index_command default +\paperfontsize default +\spacing single +\use_hyperref true +\pdf_bookmarks true +\pdf_bookmarksnumbered false +\pdf_bookmarksopen false +\pdf_bookmarksopenlevel 1 +\pdf_breaklinks false +\pdf_pdfborder false +\pdf_colorlinks true +\pdf_backref false +\pdf_pdfusetitle true +\papersize default +\use_geometry true +\use_package amsmath 1 +\use_package amssymb 1 +\use_package cancel 1 +\use_package esint 1 +\use_package mathdots 1 +\use_package mathtools 1 +\use_package mhchem 1 +\use_package stackrel 1 +\use_package stmaryrd 1 +\use_package undertilde 1 +\cite_engine basic +\cite_engine_type default +\biblio_style plain +\use_bibtopic false +\use_indices false +\paperorientation portrait +\suppress_date false +\justification true +\use_refstyle 1 +\use_minted 0 +\index Index +\shortcut idx +\color #008000 +\end_index +\secnumdepth 3 +\tocdepth 3 +\paragraph_separation indent +\paragraph_indentation default +\is_math_indent 0 +\math_numbering_side default +\quotes_style english +\dynamic_quotes 0 +\papercolumns 1 +\papersides 1 +\paperpagestyle default +\listings_params "language=Scala,morekeywords={{scala}},otherkeywords={=,=>,<-,<\%,<:,>:,\#,@,:,[,],.,???},keywordstyle={\color{scalakeyword}},morekeywords={[2]{String,Short,Int,Long,Char,Boolean,Double,Float,BigDecimal,Seq,Map,Set,List,Option,Either,Future,Vector,Range,IndexedSeq,Try,true,false,None,Some,Left,Right,Nothing,Any,Array,Unit,Iterator,Stream}},keywordstyle={[2]{\color{scalatype}}},frame=tb,aboveskip={1.5mm},belowskip={0.5mm},showstringspaces=false,columns=fullflexible,keepspaces=true,basicstyle={\smaller\ttfamily},extendedchars=true,numbers=none,numberstyle={\tiny\color{gray}},commentstyle={\color{dkgreen}},stringstyle={\color{mauve}},frame=single,framerule={0.0mm},breaklines=true,breakatwhitespace=true,tabsize=3,framexleftmargin={0.5mm},framexrightmargin={0.5mm},xleftmargin={1.5mm},xrightmargin={1.5mm},framextopmargin={0.5mm},framexbottommargin={0.5mm},fillcolor={\color{aqua}},rulecolor={\color{aqua}},rulesepcolor={\color{aqua}},backgroundcolor={\color{aqua}},mathescape=false,extendedchars=true" +\tracking_changes false +\output_changes false +\html_math_output 0 +\html_css_as_file 0 +\html_be_strict false +\end_header + +\begin_body + +\begin_layout Standard +\begin_inset FormulaMacro +\newcommand{\gunderline}[1]{\mathunderline{greenunder}{#1}} +{\underline{#1}} +\end_inset + + +\begin_inset FormulaMacro +\newcommand{\bef}{\forwardcompose} +{\fatsemi} +\end_inset + + +\begin_inset FormulaMacro +\newcommand{\bbnum}[1]{\custombb{#1}} +{\underline{#1}} +\end_inset + + +\begin_inset ERT +status open + +\begin_layout Plain Layout + + +\backslash +frame{ +\backslash +titlepage} +\end_layout + +\end_inset + + +\begin_inset Note Note +status collapsed + +\begin_layout Plain Layout +Beginning of slides. +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Frame +\begin_inset Argument 4 +status open + +\begin_layout Plain Layout +Outline +\end_layout + +\end_inset + + +\end_layout + +\begin_deeper +\begin_layout Standard +The core of modern functional programming is System F +\begin_inset Formula $\omega$ +\end_inset + + +\end_layout + +\begin_layout Itemize +Algebraic data types +\end_layout + +\begin_layout Itemize +Type parameters and type constructors +\end_layout + +\begin_layout Itemize +Higher-kinded types +\end_layout + +\begin_deeper +\begin_layout Itemize +Haskell and Scala share the core of System F +\begin_inset Formula $\omega$ +\end_inset + + +\end_layout + +\begin_layout Itemize +OCaml, F#, Swift, Rust only have System F (no HKT) +\end_layout + +\begin_deeper +\begin_layout Itemize +Cannot have typeclasses for type constructors +\end_layout + +\end_deeper +\end_deeper +\begin_layout Standard +The +\begin_inset CommandInset href +LatexCommand href +name "Dhall language" +target "https://dhall-lang.org" +literal "false" + +\end_inset + + can be used as System F +\begin_inset Formula $\omega$ +\end_inset + + +\end_layout + +\begin_layout Itemize +A language for programmable configuration file templates +\end_layout + +\begin_layout Itemize +Can be also used for exploring advanced FP idioms +\end_layout + +\begin_layout Itemize +What I learned from my Dhall experience: +\end_layout + +\begin_deeper +\begin_layout Itemize +Difference between λ(a : A) → (b : B) and +\begin_inset Formula $\forall$ +\end_inset + +(a : A) → B +\end_layout + +\begin_layout Itemize +We can make recursion safe +\end_layout + +\begin_layout Itemize +Encodings of dependent types and GADTs +\end_layout + +\end_deeper +\end_deeper +\begin_layout Standard +\begin_inset Separator parbreak +\end_inset + + +\end_layout + +\begin_layout Frame +\begin_inset Argument 4 +status open + +\begin_layout Plain Layout +Functions with type parameters +\end_layout + +\end_inset + + +\begin_inset Separator latexpar +\end_inset + + +\end_layout + +\begin_deeper +\begin_layout Standard +OCaml: +\end_layout + +\begin_layout LyX-Code +let first : 'a * 'b -> 'a = fun (x, y) -> x ;; +\end_layout + +\begin_layout Standard +Haskell: +\end_layout + +\begin_layout LyX-Code +first :: (a, b) -> a +\end_layout + +\begin_layout LyX-Code +first (x, y) = x +\end_layout + +\begin_layout Standard +Scala: +\end_layout + +\begin_layout LyX-Code +def first[A, B] : ((A, B)) => A +\end_layout + +\begin_layout LyX-Code + = { case (x, y) => x } +\end_layout + +\begin_layout Standard +Java: +\end_layout + +\begin_layout LyX-Code +public static X first(X x, Y y) { return x; } +\end_layout + +\begin_layout Standard +Go: +\end_layout + +\begin_layout LyX-Code +func identity[A, B any](a A, b B) A { return a } +\end_layout + +\begin_layout Standard +Swift: +\end_layout + +\begin_layout LyX-Code +func first(of a: A, and b: B) -> A { +\end_layout + +\begin_layout LyX-Code + return a +\end_layout + +\begin_layout LyX-Code +} +\end_layout + +\end_deeper +\begin_layout Standard +\begin_inset Separator parbreak +\end_inset + + +\end_layout + +\begin_layout Frame +\begin_inset Argument 4 +status open + +\begin_layout Plain Layout +Programming with functions and nothing else +\end_layout + +\end_inset + + +\begin_inset Separator latexpar +\end_inset + + +\end_layout + +\begin_deeper +\begin_layout Standard +The core language of System F: +\end_layout + +\begin_layout Itemize +Value parameters and type parameters +\end_layout + +\begin_layout LyX-Code +a : A +\end_layout + +\begin_layout LyX-Code +A : Type +\end_layout + +\begin_layout Itemize +Create a function +\end_layout + +\begin_layout LyX-Code +( λ(a : A) → (body : B) ) : ( +\begin_inset Formula $\forall$ +\end_inset + +(a : A) → B ) +\end_layout + +\begin_layout LyX-Code +( λ(A : Type) → (body : B) ) : ( +\begin_inset Formula $\forall$ +\end_inset + +(A : Type) → B ) +\end_layout + +\begin_layout Itemize +Use a function +\end_layout + +\begin_layout LyX-Code +f x +\emph on +or equivalently +\emph default + ( (f : A → B) (x : A) ) : B +\end_layout + +\begin_layout LyX-Code +f A +\emph on +or equivalently +\emph default + ( (f : Type → B) (A : Type) ) : B +\end_layout + +\begin_layout Itemize +System F +\begin_inset Formula $\omega$ +\end_inset + + adds +\emph on +type constructor parameters +\end_layout + +\begin_layout LyX-Code +λ(F : Type → Type) → (body : B) : +\begin_inset Formula $\forall$ +\end_inset + +(F : Type → Type) → B +\end_layout + +\begin_layout LyX-Code +λ(F : (Type → Type) → Type → Type) → ... +\end_layout + +\end_deeper +\begin_layout Standard +\begin_inset Separator parbreak +\end_inset + + +\end_layout + +\begin_layout Frame +\begin_inset Argument 4 +status open + +\begin_layout Plain Layout +Why use System F / System F +\begin_inset Formula $\omega$ +\end_inset + + +\end_layout + +\end_inset + + +\begin_inset Separator latexpar +\end_inset + + +\end_layout + +\begin_deeper +\begin_layout Standard +Pro +\end_layout + +\begin_layout Itemize +PL theory is simpler +\end_layout + +\begin_deeper +\begin_layout Itemize +A small language that is easy to implement as a sublanguage +\end_layout + +\begin_layout Itemize +LISP with type safety +\end_layout + +\end_deeper +\begin_layout Itemize +Programs always terminate ( +\begin_inset Quotes eld +\end_inset + +strong normalization +\begin_inset Quotes erd +\end_inset + +) +\end_layout + +\begin_layout Standard +Contra +\end_layout + +\begin_layout Itemize +No built-in data structures +\end_layout + +\begin_layout Itemize +No direct support for recursion +\end_layout + +\begin_layout Itemize +Very verbose +\end_layout + +\begin_layout Standard +To improve usability: +\end_layout + +\begin_layout Itemize +Add built-in types ( +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Bool +\end_layout + +\end_inset + +, +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Natural +\end_layout + +\end_inset + +, +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +Text +\end_layout + +\end_inset + +, etc.) +\end_layout + +\begin_layout Itemize +Add syntax for named values, products, co-products, pattern matching +\end_layout + +\begin_layout Itemize +Add a module system +\end_layout + +\begin_layout Itemize +Add type inference +\end_layout + +\end_deeper +\begin_layout Standard +\begin_inset Separator parbreak +\end_inset + + +\end_layout + +\begin_layout Frame +\begin_inset Argument 4 +status open + +\begin_layout Plain Layout +System F +\begin_inset Formula $\omega$ +\end_inset + + can encode standard data types +\end_layout + +\end_inset + + +\begin_inset Separator latexpar +\end_inset + + +\end_layout + +\begin_deeper +\begin_layout Standard +Void type +\end_layout + +\begin_layout LyX-Code +\begin_inset Formula $\forall$ +\end_inset + +(A : Type) → A +\end_layout + +\begin_layout Standard +Unit type +\begin_inset VSpace -100baselineskip% +\end_inset + + +\end_layout + +\begin_layout LyX-Code +( λ(A : Type) → λ(x : A) → x ) : ( +\begin_inset Formula $\forall$ +\end_inset + +(A : Type) → A → A ) +\end_layout + +\begin_layout Standard +Product of types +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +B +\end_layout + +\end_inset + + (denoted by +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A +\end_layout + +\end_inset + + +\begin_inset Formula $\times$ +\end_inset + + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +B +\end_layout + +\end_inset + +) +\end_layout + +\begin_layout LyX-Code +λ(A : Type) → λ(B : Type) → +\end_layout + +\begin_layout LyX-Code + +\begin_inset Formula $\forall$ +\end_inset + +(R : Type) → (A → B → R) → R +\end_layout + +\begin_layout Standard +Co-product of types +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A +\end_layout + +\end_inset + + and +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +B +\end_layout + +\end_inset + + (denoted by +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A +\end_layout + +\end_inset + + +\begin_inset Formula $+$ +\end_inset + + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +B +\end_layout + +\end_inset + +) +\end_layout + +\begin_layout LyX-Code +λ(A : Type) → λ(B : Type) → +\end_layout + +\begin_layout LyX-Code + +\begin_inset Formula $\forall$ +\end_inset + +(R : Type) → (A → R) → (B → R) → R +\end_layout + +\begin_layout Standard +Least fixpoint of +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A = F A +\end_layout + +\end_inset + + (denoted by +\begin_inset Formula $\mu$ +\end_inset + + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A. + F A +\end_layout + +\end_inset + +) +\end_layout + +\begin_layout LyX-Code +λ(F : Type → Type) → +\begin_inset Formula $\forall$ +\end_inset + +(R : Type) → (F R → R) → R +\end_layout + +\begin_layout Standard +Existentially quantified types (denoted by +\begin_inset Formula $\exists$ +\end_inset + + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A. + F A +\end_layout + +\end_inset + +) +\begin_inset VSpace -100baselineskip% +\end_inset + + +\end_layout + +\begin_layout LyX-Code +λ(A : Type) → +\begin_inset Formula $\forall$ +\end_inset + +(R : Type) → ( +\begin_inset Formula $\forall$ +\end_inset + +(S : Type) → F S → R) → R +\end_layout + +\begin_layout Standard +Greatest fixpoint of +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A = F A +\end_layout + +\end_inset + + is encoded as +\begin_inset Formula $\exists$ +\end_inset + + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +A. + A +\end_layout + +\end_inset + + +\begin_inset Formula $\times$ +\end_inset + + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +(A +\end_layout + +\end_inset + + +\begin_inset Formula $\to$ +\end_inset + + +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +F A) +\end_layout + +\end_inset + + +\end_layout + +\end_deeper +\begin_layout Standard +\begin_inset Separator parbreak +\end_inset + + +\end_layout + +\begin_layout Frame +\begin_inset Argument 4 +status open + +\begin_layout Plain Layout +System F +\begin_inset Formula $\omega$ +\end_inset + + can encode nested types and GADTs +\end_layout + +\end_inset + + +\begin_inset Separator latexpar +\end_inset + + +\end_layout + +\begin_deeper +\begin_layout Standard +Example nested type: a perfect-shaped tree +\end_layout + +\begin_layout LyX-Code +data PT x where +\end_layout + +\begin_layout LyX-Code + Leaf :: a → PT a +\end_layout + +\begin_layout LyX-Code + Branch :: PT (a, a) +\end_layout + +\begin_layout Itemize +Encoding of +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +PT +\end_layout + +\end_inset + + in System F +\begin_inset Formula $\omega$ +\end_inset + +: +\end_layout + +\begin_layout LyX-Code +λ(A : Type) → +\begin_inset Formula $\forall$ +\end_inset + +(K : Type → Type) → +\end_layout + +\begin_layout LyX-Code + (A → K A) → K (Pair A A) → K A +\end_layout + +\begin_layout Standard +Example GADT: +\end_layout + +\begin_layout LyX-Code +data MyAPI x where +\end_layout + +\begin_layout LyX-Code + GetUser :: Text → MyAPI Int +\end_layout + +\begin_layout LyX-Code + Validate :: Int → MyAPI Bool +\end_layout + +\begin_layout Itemize +Encoding of +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +MyAPI +\end_layout + +\end_inset + + in System F +\begin_inset Formula $\omega$ +\end_inset + +: +\end_layout + +\begin_layout LyX-Code +λ(A : Type) → +\begin_inset Formula $\forall$ +\end_inset + +(K : Type → Type) → +\end_layout + +\begin_layout LyX-Code + (Text → K Int) → (Int → K Bool) → K A +\end_layout + +\end_deeper +\begin_layout Standard +\begin_inset Separator parbreak +\end_inset + + +\end_layout + +\begin_layout Frame +\begin_inset Argument 4 +status open + +\begin_layout Plain Layout +Dhall as a System F +\begin_inset Formula $\omega$ +\end_inset + + interpreter + syntax sugar +\end_layout + +\end_inset + + +\begin_inset Separator latexpar +\end_inset + + +\end_layout + +\begin_deeper +\begin_layout Standard +Named constants +\end_layout + +\begin_layout LyX-Code +let identity = λ(A : Type) → λ(x : A) → x +\end_layout + +\begin_layout LyX-Code +in ... +\end_layout + +\begin_layout Standard +Built-in types and type constructors +\end_layout + +\begin_layout LyX-Code +Bool, Natural, Integer, Text, Optional, List +\end_layout + +\begin_layout Standard +Record types +\end_layout + +\begin_layout LyX-Code +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +let x = { name : Text, valid : Bool } +\end_layout + +\end_inset + + +\end_layout + +\begin_layout LyX-Code +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +in x.name +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +Labeled union types +\end_layout + +\begin_layout LyX-Code +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +< Left : Text | Right : Natural > +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +Usage +\end_layout + +\begin_layout LyX-Code +\begin_inset listings +inline true +status open + +\begin_layout Plain Layout + +let x = < Left : Text | Right : Natural >.Right 123 in ... +\end_layout + +\end_inset + + +\end_layout + +\begin_layout Standard +Pattern matching +\end_layout + +\begin_layout LyX-Code +merge { Left = λ(x : Text) → 0, +\end_layout + +\begin_layout LyX-Code +Right = λ(y : Natural) → y + 1 } x +\end_layout + +\end_deeper +\begin_layout Standard +\begin_inset Separator parbreak +\end_inset + + +\end_layout + +\begin_layout Frame +\begin_inset Argument 4 +status open + +\begin_layout Plain Layout +Conclusions +\end_layout + +\end_inset + + +\end_layout + +\begin_deeper +\begin_layout Standard +*** +\end_layout + +\end_deeper +\begin_layout Standard +\begin_inset Note Note +status open + +\begin_layout Plain Layout +End of slides. +\end_layout + +\end_inset + + +\end_layout + +\end_body +\end_document