-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/


-- | A library for parallel programming based on a monad
--   
--   This library offers an alternative parallel programming API to that
--   provided by the <tt>parallel</tt> package. The <a>Par</a> monad allows
--   the simple description of parallel computations, and can be used to
--   add parallelism to pure Haskell code. The basic API is
--   straightforward: the monad supports forking and simple communication
--   in terms of <a>IVar</a>s. The library comes with an efficient
--   work-stealing implementation, but the internals are also exposed so
--   that you can build your own scheduler if necessary. Examples of use
--   can be found in the examples/ directory of the source package.
@package monad-par
@version 0.1.0.3


-- | This module exposes the internals of the <tt>Par</tt> monad so that
--   you can build your own scheduler or other extensions. Do not use this
--   module for purposes other than extending the <tt>Par</tt> monad with
--   new functionality.
module Control.Monad.Par.Internal
data Trace
Get :: (IVar a) -> (a -> Trace) -> Trace
Put :: (IVar a) -> a -> Trace -> Trace
New :: (IVarContents a) -> (IVar a -> Trace) -> Trace
Fork :: Trace -> Trace -> Trace
Done :: Trace
Yield :: Trace -> Trace
data Sched
Sched :: {-# UNPACK #-} !Int -> IORef [Trace] -> IORef [MVar Bool] -> [Sched] -> Sched
no :: Sched -> {-# UNPACK #-} !Int
workpool :: Sched -> IORef [Trace]
idle :: Sched -> IORef [MVar Bool]
scheds :: Sched -> [Sched]
newtype Par a
Par :: ((a -> Trace) -> Trace) -> Par a
runCont :: Par a -> (a -> Trace) -> Trace
newtype IVar a
IVar :: (IORef (IVarContents a)) -> IVar a
data IVarContents a
Full :: a -> IVarContents a
Empty :: IVarContents a
Blocked :: [a -> Trace] -> IVarContents a

-- | The main scheduler loop.
sched :: Bool -> Sched -> Trace -> IO ()
runPar :: Par a -> a

-- | An asynchronous version in which the main thread of control in a Par
--   computation can return while forked computations still run in the
--   background.
runParAsync :: Par a -> a

-- | An alternative version in which the consumer of the result has | the
--   option to <a>help</a> run the Par computation if results it is |
--   interested in are not ready yet.
runParAsyncHelper :: Par a -> (a, IO ())

-- | creates a new <tt>IVar</tt>
new :: Par (IVar a)

-- | creates a new <tt>IVar</tt> that contains a value
newFull :: NFData a => a -> Par (IVar a)

-- | creates a new <tt>IVar</tt> that contains a value (head-strict only)
newFull_ :: a -> Par (IVar a)

-- | read the value in a <tt>IVar</tt>. The <a>get</a> can only return when
--   the value has been written by a prior or parallel <tt>put</tt> to the
--   same <tt>IVar</tt>.
get :: IVar a -> Par a

-- | like <a>put</a>, but only head-strict rather than fully-strict.
put_ :: IVar a -> a -> Par ()

-- | put a value into a <tt>IVar</tt>. Multiple <a>put</a>s to the same
--   <tt>IVar</tt> are not allowed, and result in a runtime error.
--   
--   <a>put</a> fully evaluates its argument, which therefore must be an
--   instance of <a>NFData</a>. The idea is that this forces the work to
--   happen when we expect it, rather than being passed to the consumer of
--   the <tt>IVar</tt> and performed later, which often results in less
--   parallelism than expected.
--   
--   Sometimes partial strictness is more appropriate: see <a>put_</a>.
put :: NFData a => IVar a -> a -> Par ()
pollIVar :: IVar a -> IO (Maybe a)

-- | Allows other parallel computations to progress. (should not be
--   necessary in most cases).
yield :: Par ()
instance NFData (IVar a)
instance Applicative Par
instance Monad Par
instance Functor Par


-- | This module provides a monad <tt>Par</tt>, for speeding up pure
--   computations using parallel processors. It cannot be used for speeding
--   up computations that use IO (for that, see
--   <tt>Control.Concurrent</tt>). The result of a given <tt>Par</tt>
--   computation is always the same - ie. it is deterministic, but the
--   computation may be performed more quickly if there are processors
--   available to share the work.
--   
--   For example, the following program fragment computes the values of
--   <tt>(f x)</tt> and <tt>(g x)</tt> in parallel, and returns a pair of
--   their results:
--   
--   <pre>
--   runPar $ do
--       fx &lt;- pval (f x)  -- start evaluating (f x)
--       gx &lt;- pval (g x)  -- start evaluating (g x)
--       a &lt;- get fx       -- wait for fx
--       b &lt;- get gx       -- wait for gx
--       return (a,b)      -- return results
--   </pre>
--   
--   <tt>Par</tt> can be used for specifying pure parallel computations in
--   which the order of the computation is not known beforehand. The
--   programmer specifies how information flows from one part of the
--   computation to another, but not the order in which computations will
--   be evaluated at runtime. Information flow is described using
--   <a>variables</a> called <tt>IVar</tt>s, which support <a>put</a> and
--   <a>get</a> operations. For example, suppose you have a problem that
--   can be expressed as a network with four nodes, where <tt>b</tt> and
--   <tt>c</tt> require the value of <tt>a</tt>, and <tt>d</tt> requires
--   the value of <tt>b</tt> and <tt>c</tt>:
--   
--   <pre>
--     a
--    / \  
--   b   c
--    \ /
--     d
--   </pre>
--   
--   Then you could express this in the <tt>Par</tt> monad like this:
--   
--   <pre>
--   runPar $ do
--       [a,b,c,d] &lt;- sequence [new,new,new,new]
--       fork $ do x &lt;- get a; put b (x+1)
--       fork $ do x &lt;- get a; put c (x+2)
--       fork $ do x &lt;- get b; y &lt;- get c; put d (x+y)
--       fork $ do put a (3 :: Int)
--       get d
--   </pre>
--   
--   The result of the above computation is always 9. The <a>get</a>
--   operation waits until its input is available; multiple <a>put</a>s to
--   the same <tt>IVar</tt> are not allowed, and result in a runtime error.
--   Values stored in <tt>IVar</tt>s are usually fully evaluated (although
--   there are ways provided to pass lazy values if necessary).
--   
--   In the above example, <tt>b</tt> and <tt>c</tt> will be evaluated in
--   parallel. In practice the work involved at each node is too small here
--   to see the benefits of parallelism though: typically each node should
--   involve much more work. The granularity is completely under your
--   control - too small and the overhead of the <tt>Par</tt> monad will
--   outweigh any parallelism benefits, whereas if the nodes are too large
--   then there might not be enough parallelism to use all the available
--   processors.
--   
--   Unlike <tt>Control.Parallel</tt>, in <tt>Control.Monad.Par</tt>
--   parallelism is not combined with laziness, so sharing and granulairty
--   are completely under the control of the programmer. New units of
--   parallel work are only created by <tt>fork</tt>, <tt>par</tt>, and a
--   few other combinators.
--   
--   The implementation is based on a work-stealing scheduler that divides
--   the work as evenly as possible betwen the available processors at
--   runtime.
module Control.Monad.Par
data Par a
runPar :: Par a -> a

-- | forks a computation to happen in parallel. The forked computation may
--   exchange values with other computations using <tt>IVar</tt>s.
fork :: Par () -> Par ()
data IVar a

-- | creates a new <tt>IVar</tt>
new :: Par (IVar a)

-- | creates a new <tt>IVar</tt> that contains a value
newFull :: NFData a => a -> Par (IVar a)

-- | creates a new <tt>IVar</tt> that contains a value (head-strict only)
newFull_ :: a -> Par (IVar a)

-- | read the value in a <tt>IVar</tt>. The <a>get</a> can only return when
--   the value has been written by a prior or parallel <tt>put</tt> to the
--   same <tt>IVar</tt>.
get :: IVar a -> Par a

-- | put a value into a <tt>IVar</tt>. Multiple <a>put</a>s to the same
--   <tt>IVar</tt> are not allowed, and result in a runtime error.
--   
--   <a>put</a> fully evaluates its argument, which therefore must be an
--   instance of <a>NFData</a>. The idea is that this forces the work to
--   happen when we expect it, rather than being passed to the consumer of
--   the <tt>IVar</tt> and performed later, which often results in less
--   parallelism than expected.
--   
--   Sometimes partial strictness is more appropriate: see <a>put_</a>.
put :: NFData a => IVar a -> a -> Par ()

-- | like <a>put</a>, but only head-strict rather than fully-strict.
put_ :: IVar a -> a -> Par ()

-- | equivalent to <tt>spawn . return</tt>
pval :: NFData a => a -> Par (IVar a)

-- | Like <a>fork</a>, but returns a <tt>IVar</tt> that can be used to
--   query the result of the forked computataion.
--   
--   <pre>
--   spawn p = do
--     r &lt;- new
--     fork (p &gt;&gt;= put r)
--     return r
--   </pre>
spawn :: NFData a => Par a -> Par (IVar a)

-- | Like <a>spawn</a>, but the result is only head-strict, not
--   fully-strict.
spawn_ :: Par a -> Par (IVar a)

-- | Applies the given function to each element of a data structure in
--   parallel (fully evaluating the results), and returns a new data
--   structure containing the results.
--   
--   <pre>
--   parMap f xs = mapM (pval . f) xs &gt;&gt;= mapM get
--   </pre>
--   
--   <tt>parMap</tt> is commonly used for lists, where it has this
--   specialised type:
--   
--   <pre>
--   parMap :: NFData b =&gt; (a -&gt; b) -&gt; [a] -&gt; Par [b]
--   </pre>
parMap :: (Traversable t, NFData b) => (a -> b) -> t a -> Par (t b)

-- | Like <a>parMap</a>, but the function is a <tt>Par</tt> monad
--   operation.
--   
--   <pre>
--   parMapM f xs = mapM (spawn . f) xs &gt;&gt;= mapM get
--   </pre>
parMapM :: (Traversable t, NFData b) => (a -> Par b) -> t a -> Par (t b)

-- | Computes a binary map/reduce over a finite range. The range is
--   recursively split into two, the result for each half is computed in
--   parallel, and then the two results are combined. When the range
--   reaches the threshold size, the remaining elements of the range are
--   computed sequentially.
--   
--   For example, the following is a parallel implementation of
--   
--   <pre>
--   foldl (+) 0 (map (^2) [1..10^6])
--   </pre>
--   
--   <pre>
--   parMapReduceRangeThresh 100 (InclusiveRange 1 (10^6))
--          (\x -&gt; return (x^2))
--          (\x y -&gt; return (x+y))
--          0
--   </pre>
parMapReduceRangeThresh :: NFData a => Int -> InclusiveRange -> (Int -> Par a) -> (a -> a -> Par a) -> a -> Par a

-- | "Auto-partitioning" version of <a>parMapReduceRangeThresh</a> that
--   chooses the threshold based on the size of the range and the number of
--   processors..
parMapReduceRange :: NFData a => InclusiveRange -> (Int -> Par a) -> (a -> a -> Par a) -> a -> Par a
data InclusiveRange
InclusiveRange :: Int -> Int -> InclusiveRange

-- | Parallel for-loop over an inclusive range. Semantically equivalent to
--   
--   <pre>
--   parFor (InclusiveRange n m) f = forM_ [n..m] f
--   </pre>
--   
--   except that the implementation will split the work into an unspecified
--   number of subtasks in an attempt to gain parallelism. The exact number
--   of subtasks is chosen at runtime, and is probably a small multiple of
--   the available number of processors.
--   
--   Strictly speaking the semantics of <a>parFor</a> depends on the number
--   of processors, and its behaviour is therefore not deterministic.
--   However, a good rule of thumb is to not have any interdependencies
--   between the elements; if this rule is followed then <tt>parFor</tt>
--   has deterministic semantics. One easy way to follow this rule is to
--   only use <a>put</a> or <a>put_</a> in <tt>f</tt>, never <a>get</a>.
parFor :: InclusiveRange -> (Int -> Par ()) -> Par ()

module Control.Monad.Par.IList

-- | An <a>IList</a> is the equivalent of a lazy list in the <a>Par</a>
--   monad. The tail of the list is an <a>IVar</a>, which allows the list
--   to be produced and consumed in parallel.
data IList a
Null :: IList a
Cons :: a -> IVar (IList a) -> IList a
hd :: IList a -> a
tl :: IList a -> IVar (IList a)
instance NFData a => NFData (IList a)


-- | Some experimental support for <a>OpenList</a>s, which are streams in
--   the <a>Par</a> monad that support constant-time append.
module Control.Monad.Par.OpenList
data OpenList a

-- | An empty open list. Supports further extension.
empty :: OpenList a

-- | A single element open list.
singleton :: a -> Par (OpenList a)

-- | Add an element to the front of an OpenList. Works irrespective | of
--   whether the input is closed.
cons :: NFData a => a -> OpenList a -> Par (OpenList a)

-- | Head of an OpenList.
head :: OpenList a -> a

-- | Tail of an OpenList. Beware, if the list contains only one element
--   (e.g. the result of tail will be null), it must be CLOSED for tail to
--   work.
tail :: OpenList a -> Par (OpenList a)
length :: Num t => OpenList a -> Par t

-- | Terminate a non-empty open list so that it cannot be extended further.
close :: NFData a => OpenList a -> Par (OpenList a)

-- | Destructive append operation.
join :: NFData a => OpenList a -> OpenList a -> Par (OpenList a)

-- | Convert a CLOSED OpenList to a list.
toList :: NFData a => (OpenList a) -> Par [a]

-- | Convert a list to an OpenList, open to extension at the tail.
fromList :: NFData a => [a] -> Par (OpenList a)

-- | Asynchronously convert an OpenList to a lazy list. Returns
--   immediately.
toLazyList :: OpenList a -> Par [a]
parMapM :: NFData a => (a1 -> Par a) -> OpenList a1 -> Par (OpenList a)

-- | Build an OpenList with a divide-and-conquer parallel strategy.
parBuild :: NFData a => InclusiveRange -> (Int -> a) -> Par (OpenList a)

-- | Build an OpenList with a divide-and-conquer parallel strategy,
--   allowing nested parallelism in the per-element computation.
parBuildM :: NFData a => InclusiveRange -> (Int -> Par a) -> Par (OpenList a)
openlist_tests :: Test
chaintest :: Int -> Par (IList Int)
async_test :: IO ()
lazy_chaintest :: Int -> Par [Int]

-- | An <a>IList</a> is the equivalent of a lazy list in the <a>Par</a>
--   monad. The tail of the list is an <a>IVar</a>, which allows the list
--   to be produced and consumed in parallel.
data IList a
Null :: IList a
Cons :: a -> IVar (IList a) -> IList a
hd :: IList a -> a
tl :: IList a -> IVar (IList a)
newCell :: a -> Par (IList a)
instance Show a => Show (OpenList a)
instance NFData a => NFData (OpenList a)


-- | This module defines the <a>AList</a> type, a list that supports
--   constant-time append, and is therefore ideal for building the result
--   of tree-shaped parallel computations.
module Control.Monad.Par.AList

-- | List that support constant-time append (sometimes called join-lists).
data AList a
ANil :: AList a
ASing :: a -> AList a
Append :: (AList a) -> (AList a) -> AList a
AList :: [a] -> AList a

-- | <i>O(1)</i> an empty <a>AList</a>
empty :: AList a

-- | <i>O(1)</i> a singleton <a>AList</a>
singleton :: a -> AList a

-- | <i>O(1)</i> prepend an element
cons :: a -> AList a -> AList a

-- | <i>O(n)</i> take the head element of an <a>AList</a>
--   
--   NB. linear-time, because the list might look like this:
--   
--   <pre>
--   (((... `append` a) `append` b) `append` c)
--   </pre>
head :: AList a -> a

-- | <i>O(n)</i> take the tail element of an <a>AList</a>
tail :: AList a -> AList a

-- | <i>O(n)</i> find the length of an <a>AList</a>
length :: AList a -> Int

-- | <i>O(n)</i> returns <a>True</a> if the <a>AList</a> is empty
null :: AList a -> Bool

-- | <i>O(1)</i> Append two <a>AList</a>s
append :: AList a -> AList a -> AList a

-- | <i>O(n)</i> converts an <a>AList</a> to an ordinary list
toList :: AList a -> [a]

-- | <i>O(1)</i> convert an ordinary list to an <a>AList</a>
fromList :: [a] -> AList a

-- | Build a balanced <a>AList</a> in parallel, constructing each element
--   as a function of its index. The threshold argument provides control
--   over the degree of parallelism. It indicates under what number of
--   elements the build process should switch from parallel to serial.
parBuildThresh :: NFData a => Int -> InclusiveRange -> (Int -> a) -> Par (AList a)

-- | Variant of <a>parBuildThresh</a> in which the element-construction
--   function is itself a <a>Par</a> computation.
parBuildThreshM :: NFData a => Int -> InclusiveRange -> (Int -> Par a) -> Par (AList a)

-- | "Auto-partitioning" version of <a>parBuildThresh</a> that chooses the
--   threshold based on the size of the range and the number of
--   processors..
parBuild :: NFData a => InclusiveRange -> (Int -> a) -> Par (AList a)

-- | like <a>parBuild</a>, but the construction function is monadic
parBuildM :: NFData a => InclusiveRange -> (Int -> Par a) -> Par (AList a)
instance Show a => Show (AList a)
instance NFData a => NFData (AList a)
