ImplantFile.hs
NEWS
data/CompileMimeTypes.hs
+ data/Makefile
data/mime.types
examples/HelloWorld.hs
examples/Implanted.hs
Library
Build-Depends:
HsOpenSSL == 0.10.*,
+ ascii == 0.0.*,
+ attoparsec == 0.9.*,
base == 4.3.*,
base-unicode-symbols == 0.2.*,
base64-bytestring == 0.1.*,
Network.HTTP.Lucu.MIMEType
Network.HTTP.Lucu.MIMEType.DefaultExtensionMap
Network.HTTP.Lucu.MIMEType.Guess
- Network.HTTP.Lucu.Parser
Network.HTTP.Lucu.Parser.Http
Network.HTTP.Lucu.Request
Network.HTTP.Lucu.Resource
, authCredentialP -- private
)
where
+import Data.Ascii (Ascii)
+import qualified Data.Ascii as A
import qualified Data.ByteString.Base64 as B64
import qualified Data.ByteString.Char8 as C8
-import Network.HTTP.Lucu.Parser
import Network.HTTP.Lucu.Parser.Http
import Network.HTTP.Lucu.Utils
import Prelude.Unicode
deriving (Eq)
-- |'Realm' is just a string which must not contain any non-ASCII letters.
-type Realm = String
+type Realm = Ascii
-- |Authorization credential to be sent by client with
-- \"Authorization\" header. See
-- |'UserID' is just a string which must not contain colon and any
-- non-ASCII letters.
-type UserID = String
+type UserID = Ascii
-- |'Password' is just a string which must not contain any non-ASCII
-- letters.
-type Password = String
+type Password = Ascii
+-- FIXME: Don't use String for network output.
instance Show AuthChallenge where
show (BasicAuthChallenge realm)
= "Basic realm=" ⧺ quoteStr realm
+{-# LANGUAGE
+ UnicodeSyntax
+ #-}
module Network.HTTP.Lucu.Chunk
( chunkHeaderP -- Num a => Parser a
, chunkFooterP -- Parser ()
, chunkTrailerP -- Parser Headers
)
where
-
-import Network.HTTP.Lucu.Headers
-import Network.HTTP.Lucu.Parser
-import Network.HTTP.Lucu.Parser.Http
-import Numeric
-
-
-chunkHeaderP :: Num a => Parser a
-chunkHeaderP = do hexLen <- many1 hexDigit
- _ <- extension
- _ <- crlf
-
- let [(len, _)] = readHex hexLen
+import Control.Applicative
+import Data.Attoparsec.Char8
+import Data.Bits
+import Network.HTTP.Lucu.Headers
+import Network.HTTP.Lucu.Parser.Http
+
+chunkHeaderP ∷ (Integral a, Bits a) ⇒ Parser a
+{-# INLINEABLE chunkHeaderP #-}
+chunkHeaderP = do len ← hexadecimal
+ extension
+ crlf
return len
where
- extension :: Parser ()
- extension = many ( char ';' >>
- token >>
- char '=' >>
- ( token <|> quotedStr )
- )
- >>
- return ()
-{-# SPECIALIZE chunkHeaderP :: Parser Int #-}
-
-
-chunkFooterP :: Parser ()
-chunkFooterP = crlf >> return ()
-
-
-chunkTrailerP :: Parser Headers
+ extension ∷ Parser ()
+ extension = skipMany $
+ do _ ← char ';'
+ _ ← token
+ _ ← char '='
+ _ ← token <|> quotedStr
+ return ()
+
+chunkFooterP ∷ Parser ()
+chunkFooterP = crlf
+
+chunkTrailerP ∷ Parser Headers
chunkTrailerP = headersP
+{-# LANGUAGE
+ OverloadedStrings
+ , UnicodeSyntax
+ #-}
module Network.HTTP.Lucu.ContentCoding
- ( acceptEncodingListP
+ ( AcceptEncoding(..)
+
+ , acceptEncodingListP
, normalizeCoding
, unnormalizeCoding
- , orderAcceptEncodings
)
where
-
-import Data.Char
-import Data.Ord
-import Data.Maybe
-import Network.HTTP.Lucu.Parser
-import Network.HTTP.Lucu.Parser.Http
-
-
-acceptEncodingListP :: Parser [(String, Maybe Double)]
-acceptEncodingListP = allowEOF $! listOf accEncP
-
-
-accEncP :: Parser (String, Maybe Double)
-accEncP = do coding <- token
- qVal <- option Nothing
- $ do _ <- string ";q="
- q <- qvalue
- return $ Just q
+import Control.Applicative
+import Data.Ascii (CIAscii, toCIAscii)
+import Data.Attoparsec.Char8
+import Data.Ord
+import Data.Maybe
+import Network.HTTP.Lucu.Parser.Http
+import Prelude.Unicode
+
+data AcceptEncoding
+ = AcceptEncoding !CIAscii !(Maybe Double)
+ deriving (Eq, Show)
+
+instance Ord AcceptEncoding where
+ (AcceptEncoding c1 q1) `compare` (AcceptEncoding c2 q2)
+ | q1' > q1' = GT
+ | q1' < q2' = LT
+ | otherwise = compare c1 c2
+ where
+ q1' = fromMaybe 0 q1
+ q2' = fromMaybe 0 q2
+
+acceptEncodingListP ∷ Parser [(CIAscii, Maybe Double)]
+acceptEncodingListP = listOf accEncP
+
+accEncP ∷ Parser (CIAscii, Maybe Double)
+accEncP = do coding ← toCIAscii <$> token
+ qVal ← option Nothing
+ $ do _ ← string ";q="
+ q ← qvalue
+ return $ Just q
return (normalizeCoding coding, qVal)
-
-normalizeCoding :: String -> String
+normalizeCoding ∷ CIAscii → CIAscii
normalizeCoding coding
- = case map toLower coding of
- "x-gzip" -> "gzip"
- "x-compress" -> "compress"
- other -> other
-
-
-unnormalizeCoding :: String -> String
+ = if coding ≡ "x-gzip" then
+ "gzip"
+ else
+ if coding ≡ "x-compress" then
+ "compress"
+ else
+ coding
+
+unnormalizeCoding ∷ CIAscii → CIAscii
unnormalizeCoding coding
- = case map toLower coding of
- "gzip" -> "x-gzip"
- "compress" -> "x-compress"
- other -> other
-
-
-orderAcceptEncodings :: (String, Maybe Double) -> (String, Maybe Double) -> Ordering
-orderAcceptEncodings (_, q1) (_, q2)
- = comparing (fromMaybe 0) q1 q2
-
+ = if coding ≡ "gzip" then
+ "x-gzip"
+ else
+ if coding ≡ "compress" then
+ "x-compress"
+ else
+ coding
, eTagListP
)
where
-
-import Control.Monad
-import Network.HTTP.Lucu.Parser
-import Network.HTTP.Lucu.Parser.Http hiding (token)
-import Network.HTTP.Lucu.Utils
+import Control.Monad
+import Network.HTTP.Lucu.Parser.Http hiding (token)
+import Network.HTTP.Lucu.Utils
-- |An entity tag is made of a weakness flag and a opaque string.
data ETag = ETag {
+{-# LANGUAGE
+ BangPatterns
+ , GeneralizedNewtypeDeriving
+ , OverloadedStrings
+ , UnicodeSyntax
+ #-}
module Network.HTTP.Lucu.Headers
( Headers
, HasHeaders(..)
- , noCaseCmp
- , noCaseEq
-
- , emptyHeaders
, toHeaders
, fromHeaders
, hPutHeaders
)
where
-
-import qualified Data.ByteString as Strict (ByteString)
-import Data.ByteString.Internal (toForeignPtr, w2c, inlinePerformIO)
-import qualified Data.ByteString.Char8 as C8 hiding (ByteString)
-import Data.Char
-import Data.List
-import Data.Map (Map)
+import Control.Applicative
+import Data.Ascii (Ascii, CIAscii)
+import qualified Data.Ascii as A
+import Data.Attoparsec.Char8 as P
+import qualified Data.ByteString as BS
+import Data.Map (Map)
import qualified Data.Map as M
-import Data.Ord
-import Data.Word
-import Foreign.ForeignPtr
-import Foreign.Ptr
-import Foreign.Storable
-import Network.HTTP.Lucu.HandleLike
-import Network.HTTP.Lucu.Parser
-import Network.HTTP.Lucu.Parser.Http
-import Network.HTTP.Lucu.Utils
-
-type Headers = Map NCBS Strict.ByteString
-newtype NCBS = NCBS Strict.ByteString
-
-toNCBS :: Strict.ByteString -> NCBS
-toNCBS = NCBS
-{-# INLINE toNCBS #-}
-
-fromNCBS :: NCBS -> Strict.ByteString
-fromNCBS (NCBS x) = x
-{-# INLINE fromNCBS #-}
-
-instance Eq NCBS where
- (NCBS a) == (NCBS b) = a == b
-
-instance Ord NCBS where
- (NCBS a) `compare` (NCBS b) = a `noCaseCmp` b
-
-instance Show NCBS where
- show (NCBS x) = show x
-
-noCaseCmp :: Strict.ByteString -> Strict.ByteString -> Ordering
-noCaseCmp a b = a `seq` b `seq`
- toForeignPtr a `cmp` toForeignPtr b
- where
- cmp :: (ForeignPtr Word8, Int, Int) -> (ForeignPtr Word8, Int, Int) -> Ordering
- cmp (x1, s1, l1) (x2, s2, l2)
- | x1 `seq` s1 `seq` l1 `seq` x2 `seq` s2 `seq` l2 `seq` False = undefined
- | l1 == 0 && l2 == 0 = EQ
- | x1 == x2 && s1 == s2 && l1 == l2 = EQ
- | otherwise
- = inlinePerformIO $
- withForeignPtr x1 $ \ p1 ->
- withForeignPtr x2 $ \ p2 ->
- noCaseCmp' (p1 `plusPtr` s1) l1 (p2 `plusPtr` s2) l2
-
-
--- もし先頭の文字列が等しければ、短い方が小さい。
-noCaseCmp' :: Ptr Word8 -> Int -> Ptr Word8 -> Int -> IO Ordering
-noCaseCmp' p1 l1 p2 l2
- | p1 `seq` l1 `seq` p2 `seq` l2 `seq` False = undefined
- | l1 == 0 && l2 == 0 = return EQ
- | l1 == 0 = return LT
- | l2 == 0 = return GT
- | otherwise
- = do c1 <- peek p1
- c2 <- peek p2
- case comparing (toLower . w2c) c1 c2 of
- EQ -> noCaseCmp' (p1 `plusPtr` 1) (l1 - 1) (p2 `plusPtr` 1) (l2 - 1)
- x -> return x
-
-
-noCaseEq :: Strict.ByteString -> Strict.ByteString -> Bool
-noCaseEq a b = a `seq` b `seq`
- toForeignPtr a `cmp` toForeignPtr b
- where
- cmp :: (ForeignPtr Word8, Int, Int) -> (ForeignPtr Word8, Int, Int) -> Bool
- cmp (x1, s1, l1) (x2, s2, l2)
- | x1 `seq` s1 `seq` l1 `seq` x2 `seq` s2 `seq` l2 `seq` False = undefined
- | l1 /= l2 = False
- | l1 == 0 && l2 == 0 = True
- | x1 == x2 && s1 == s2 && l1 == l2 = True
- | otherwise
- = inlinePerformIO $
- withForeignPtr x1 $ \ p1 ->
- withForeignPtr x2 $ \ p2 ->
- noCaseEq' (p1 `plusPtr` s1) (p2 `plusPtr` s2) l1
-
-
-noCaseEq' :: Ptr Word8 -> Ptr Word8 -> Int -> IO Bool
-noCaseEq' p1 p2 l
- | p1 `seq` p2 `seq` l `seq` False = undefined
- | l == 0 = return True
- | otherwise
- = do c1 <- peek p1
- c2 <- peek p2
- if toLower (w2c c1) == toLower (w2c c2) then
- noCaseEq' (p1 `plusPtr` 1) (p2 `plusPtr` 1) (l - 1)
- else
- return False
+import Data.Monoid
+import Data.Monoid.Unicode
+import Network.HTTP.Lucu.HandleLike
+import Network.HTTP.Lucu.Parser.Http
+import Network.HTTP.Lucu.Utils
+import Prelude.Unicode
+newtype Headers
+ = Headers (Map CIAscii Ascii)
+ deriving (Eq, Show, Monoid)
class HasHeaders a where
- getHeaders :: a -> Headers
- setHeaders :: a -> Headers -> a
-
- getHeader :: Strict.ByteString -> a -> Maybe Strict.ByteString
- getHeader key a
- = key `seq` a `seq`
- M.lookup (toNCBS key) (getHeaders a)
-
- deleteHeader :: Strict.ByteString -> a -> a
- deleteHeader key a
- = key `seq` a `seq`
- setHeaders a $ M.delete (toNCBS key) (getHeaders a)
-
- setHeader :: Strict.ByteString -> Strict.ByteString -> a -> a
- setHeader key val a
- = key `seq` val `seq` a `seq`
- setHeaders a $ M.insert (toNCBS key) val (getHeaders a)
-
-
-emptyHeaders :: Headers
-emptyHeaders = M.empty
-
-
-toHeaders :: [(Strict.ByteString, Strict.ByteString)] -> Headers
-toHeaders xs = mkHeaders xs M.empty
-
-
-mkHeaders :: [(Strict.ByteString, Strict.ByteString)] -> Headers -> Headers
-mkHeaders [] m = m
-mkHeaders ((key, val):xs) m = mkHeaders xs $
- case M.lookup (toNCBS key) m of
- Nothing -> M.insert (toNCBS key) val m
- Just old -> M.insert (toNCBS key) (merge old val) m
+ getHeaders ∷ a → Headers
+ setHeaders ∷ a → Headers → a
+
+ getHeader ∷ CIAscii → a → Maybe Ascii
+ {-# INLINE getHeader #-}
+ getHeader !key !a
+ = case getHeaders a of
+ Headers m → M.lookup key m
+
+ deleteHeader ∷ CIAscii → a → a
+ {-# INLINE deleteHeader #-}
+ deleteHeader !key !a
+ = case getHeaders a of
+ Headers m
+ → setHeaders a $ Headers $ M.delete key m
+
+ setHeader ∷ CIAscii → Ascii → a → a
+ {-# INLINE setHeader #-}
+ setHeader !key !val !a
+ = case getHeaders a of
+ Headers m
+ → setHeaders a $ Headers $ M.insert key val m
+
+toHeaders ∷ [(CIAscii, Ascii)] → Headers
+{-# INLINE toHeaders #-}
+toHeaders = flip mkHeaders (∅)
+
+mkHeaders ∷ [(CIAscii, Ascii)] → Headers → Headers
+mkHeaders [] (Headers m) = Headers m
+mkHeaders ((key, val):xs) (Headers m)
+ = mkHeaders xs $ Headers $
+ case M.lookup key m of
+ Nothing → M.insert key val m
+ Just old → M.insert key (merge old val) m
where
- merge :: Strict.ByteString -> Strict.ByteString -> Strict.ByteString
- -- カンマ區切りである事を假定する。RFC ではカンマ區切りに出來ない
- -- ヘッダは複數個あってはならない事になってゐる。
+ merge ∷ Ascii → Ascii → Ascii
+ {-# INLINE merge #-}
merge a b
- | C8.null a && C8.null b = C8.empty
- | C8.null a = b
- | C8.null b = a
- | otherwise = C8.concat [a, C8.pack ", ", b]
-
+ | nullA a ∧ nullA b = (∅)
+ | nullA a = b
+ | nullA b = a
+ | otherwise = a ⊕ ", " ⊕ b
-fromHeaders :: Headers -> [(Strict.ByteString, Strict.ByteString)]
-fromHeaders hs = [(fromNCBS a, b) | (a, b) <- M.toList hs]
+ nullA ∷ Ascii → Bool
+ {-# INLINE nullA #-}
+ nullA = BS.null ∘ A.toByteString
+fromHeaders ∷ Headers → [(CIAscii, Ascii)]
+fromHeaders (Headers m) = M.toList m
{-
message-header = field-name ":" [ field-value ]
field-value の先頭および末尾にある LWS は全て削除され、それ以外の
LWS は單一の SP に變換される。
-}
-headersP :: Parser Headers
-headersP = do xs <- many header
- _ <- crlf
- return $! toHeaders xs
+headersP ∷ Parser Headers
+{-# INLINEABLE headersP #-}
+headersP = do xs ← P.many header
+ crlf
+ return $ toHeaders xs
where
- header :: Parser (Strict.ByteString, Strict.ByteString)
- header = do name <- token
- _ <- char ':'
- -- FIXME: これは多少インチキだが、RFC 2616 のこの部分
- -- の記述はひどく曖昧であり、この動作が本當に間違って
- -- ゐるのかどうかも良く分からない。例へば
- -- quoted-string の内部にある空白は纏めていいのか惡い
- -- のか?直勸的には駄目さうに思へるが、そんな記述は見
- -- 付からない。
- contents <- many (lws <|> many1 text)
- _ <- crlf
- let value = foldr (++) "" contents
- norm = normalize value
- return (C8.pack name, C8.pack norm)
-
- normalize :: String -> String
- normalize = trimBody . trim isWhiteSpace
-
- trimBody = concat
- . map (\ s -> if head s == ' ' then
- " "
- else
- s)
- . group
- . map (\ c -> if isWhiteSpace c
- then ' '
- else c)
-
-
-hPutHeaders :: HandleLike h => h -> Headers -> IO ()
-hPutHeaders h hds
- = h `seq` hds `seq`
- mapM_ putH (M.toList hds) >> hPutBS h (C8.pack "\r\n")
+ header ∷ Parser (CIAscii, Ascii)
+ header = try $
+ do name ← A.toCIAscii <$> token
+ _ ← char ':'
+ skipMany lws
+ values ← sepBy content lws
+ skipMany lws
+ crlf
+ return (name, joinValues values)
+
+ content ∷ Parser Ascii
+ {-# INLINE content #-}
+ content = A.unsafeFromByteString
+ <$>
+ takeWhile1 (\c → ((¬) (isSPHT c)) ∧ isText c)
+
+ joinValues ∷ [Ascii] → Ascii
+ {-# INLINE joinValues #-}
+ joinValues = A.fromAsciiBuilder ∘ joinWith "\x20"
+
+hPutHeaders ∷ HandleLike h => h → Headers → IO ()
+hPutHeaders !h !(Headers m)
+ = mapM_ putH (M.toList m) >> hPutBS h "\r\n"
where
- putH :: (NCBS, Strict.ByteString) -> IO ()
- putH (name, value)
- = name `seq` value `seq`
- do hPutBS h (fromNCBS name)
- hPutBS h (C8.pack ": ")
- hPutBS h value
- hPutBS h (C8.pack "\r\n")
+ putH ∷ (CIAscii, Ascii) → IO ()
+ putH (!name, !value)
+ = do hPutBS h (A.ciToByteString name)
+ hPutBS h ": "
+ hPutBS h (A.toByteString value)
+ hPutBS h "\r\n"
{-# LANGUAGE
BangPatterns
+ , OverloadedStrings
, UnicodeSyntax
#-}
{-# OPTIONS_HADDOCK prune #-}
, hPutHttpVersion
)
where
-
-import qualified Data.ByteString.Char8 as C8
-import Network.HTTP.Lucu.HandleLike
-import Network.HTTP.Lucu.Parser
-import Prelude hiding (min)
+import Control.Monad.Unicode
+import Data.Attoparsec.Char8
+import Network.HTTP.Lucu.HandleLike
+import Prelude hiding (min)
-- |@'HttpVersion' major minor@ represents \"HTTP\/major.minor\".
-data HttpVersion = HttpVersion !Int !Int
- deriving (Eq)
-
-instance Show HttpVersion where
- show (HttpVersion maj min) = "HTTP/" ++ show maj ++ "." ++ show min
+data HttpVersion
+ = HttpVersion !Int !Int
+ deriving (Eq, Show)
instance Ord HttpVersion where
(HttpVersion majA minA) `compare` (HttpVersion majB minB)
| minA < minB = LT
| otherwise = EQ
-
-httpVersionP :: Parser HttpVersion
+httpVersionP ∷ Parser HttpVersion
httpVersionP = string "HTTP/"
- >>
- -- 頻出するので高速化
- choice [ string "1.0" >> return (HttpVersion 1 0)
- , string "1.1" >> return (HttpVersion 1 1)
- -- 一般の場合
- , do major <- many1 digit
- _ <- char '.'
- minor <- many1 digit
- return $ HttpVersion (read major) (read minor)
+ ≫
+ choice [ string "1.1" ≫ return (HttpVersion 1 1)
+ , string "1.0" ≫ return (HttpVersion 1 0)
+ , do major ← decimal
+ _ ← char '.'
+ minor ← decimal
+ return $ HttpVersion major minor
]
-
-hPutHttpVersion :: HandleLike h => h -> HttpVersion -> IO ()
+hPutHttpVersion ∷ HandleLike h ⇒ h → HttpVersion → IO ()
hPutHttpVersion !h !v
= case v of
-- 頻出するので高速化
- HttpVersion 1 0 -> hPutBS h (C8.pack "HTTP/1.0")
- HttpVersion 1 1 -> hPutBS h (C8.pack "HTTP/1.1")
+ HttpVersion 1 0 → hPutBS h "HTTP/1.0"
+ HttpVersion 1 1 → hPutBS h "HTTP/1.1"
-- 一般の場合
HttpVersion !maj !min
- -> do hPutBS h (C8.pack "HTTP/")
- hPutStr h (show maj)
- hPutChar h '.'
- hPutStr h (show min)
+ → do hPutBS h "HTTP/"
+ hPutStr h (show maj)
+ hPutChar h '.'
+ hPutStr h (show min)
module Network.HTTP.Lucu.MIMEType
( MIMEType(..)
, parseMIMEType
+ , printMIMEType
+
, mimeTypeP
, mimeTypeListP
)
where
-
+import Data.Ascii (Ascii, CIAscii)
+import qualified Data.Ascii as A
import qualified Data.ByteString.Lazy as B
-import Network.HTTP.Lucu.Parser
-import Network.HTTP.Lucu.Parser.Http
-import Network.HTTP.Lucu.Utils
-import Prelude hiding (min)
+import Network.HTTP.Lucu.Parser.Http
+import Network.HTTP.Lucu.Utils
+import Prelude hiding (min)
-- |@'MIMEType' \"major\" \"minor\" [(\"name\", \"value\")]@
-- represents \"major\/minor; name=value\".
data MIMEType = MIMEType {
- mtMajor :: !String
- , mtMinor :: !String
- , mtParams :: ![ (String, String) ]
- } deriving (Eq)
-
+ mtMajor :: !CIAscii
+ , mtMinor :: !CIAscii
+ , mtParams :: ![ (CIAscii, Ascii) ]
+ } deriving (Eq, Show)
-instance Show MIMEType where
- show (MIMEType maj min params)
- = maj ++ "/" ++ min ++
- if null params then
- ""
- else
- "; " ++ joinWith "; " (map showPair params)
- where
- showPair :: (String, String) -> String
- showPair (name, value)
- = name ++ "=" ++ if any (not . isToken) value then
- quoteStr value
- else
- value
-
-
-instance Read MIMEType where
- readsPrec _ s = [(parseMIMEType s, "")]
+-- |Convert a 'MIMEType' to 'Ascii'.
+printMIMEType ∷ MIMEType → Ascii
+printMIMEType (MIMEType maj min params)
+ = A.fromAsciiBuilder $
+ ( A.toAsciiBuilder maj ⊕
+ A.toAsciiBuilder "/" ⊕
+ A.toAsciiBuilder min ⊕
+ if null params then
+ (∅)
+ else
+ A.toAsciiBuilder "; " ⊕
+ joinWith "; " (map printPair params)
+ )
+ where
+ printPair ∷ (CIAscii, Ascii) → A.AsciiBuilder
+ printPair (name, value)
+ = A.toAsciiBuilder (A.fromCIAscii name) ⊕
+ A.toAsciiBuilder "=" ⊕
+ if any ((¬) ∘ isToken) value then
+ quoteStr value
+ else
+ A.toAsciiBuilder value
-- |Parse 'MIMEType' from a 'Prelude.String'. This function throws an
-- exception for parse error.
, serializeExtMap
)
where
-
import qualified Data.ByteString.Lazy.Char8 as B
import qualified Data.Map as M
-import Data.Map (Map)
-import Data.Maybe
-import Language.Haskell.Pretty
-import Language.Haskell.Syntax
-import Network.HTTP.Lucu.MIMEType
-import Network.HTTP.Lucu.Parser
-import Network.HTTP.Lucu.Parser.Http
-import Network.HTTP.Lucu.Utils
+import Data.Map (Map)
+import Data.Maybe
+import Language.Haskell.Pretty
+import Language.Haskell.Syntax
+import Network.HTTP.Lucu.MIMEType
+import Network.HTTP.Lucu.Parser.Http
+import Network.HTTP.Lucu.Utils
-- |'Data.Map.Map' from extension to MIME Type.
type ExtMap = Map String MIMEType
, multipartFormP
)
where
-
import qualified Data.ByteString.Char8 as C8
import qualified Data.ByteString.Lazy.Char8 as L8
import Data.Char
import Data.List
import Network.HTTP.Lucu.Abortion
import Network.HTTP.Lucu.Headers
-import Network.HTTP.Lucu.Parser
import Network.HTTP.Lucu.Parser.Http
import Network.HTTP.Lucu.Response
import Network.HTTP.Lucu.Utils
-
data Part = Part Headers L8.ByteString
-- |This data type represents a form value and possibly an uploaded
+++ /dev/null
-{-# LANGUAGE
- BangPatterns
- , ScopedTypeVariables
- , UnboxedTuples
- , UnicodeSyntax
- #-}
--- |Yet another parser combinator. This is mostly a subset of
--- "Text.ParserCombinators.Parsec" but there are some differences:
---
--- * This parser works on 'Data.ByteString.Base.LazyByteString'
--- instead of 'Prelude.String'.
---
--- * Backtracking is the only possible behavior so there is no \"try\"
--- action.
---
--- * On success, the remaining string is returned as well as the
--- parser result.
---
--- * You can choose whether to treat reaching EOF (trying to eat one
--- more letter at the end of string) a fatal error or to treat it a
--- normal failure. If a fatal error occurs, the entire parsing
--- process immediately fails without trying any backtracks. The
--- default behavior is to treat EOF fatal.
---
--- In general, you don't have to use this module directly.
-module Network.HTTP.Lucu.Parser
- ( Parser
- , ParserResult(..)
-
- , failP
-
- , parse
- , parseStr
-
- , anyChar
- , eof
- , allowEOF
- , satisfy
- , char
- , string
- , (<|>)
- , choice
- , oneOf
- , digit
- , hexDigit
- , notFollowedBy
- , many
- , manyChar
- , many1
- , count
- , option
- , sepBy
- , sepBy1
-
- , sp
- , ht
- , crlf
- )
- where
-
-import Control.Monad.State.Strict hiding (state)
-import qualified Data.ByteString.Lazy as Lazy (ByteString)
-import qualified Data.ByteString.Lazy.Char8 as B hiding (ByteString)
-import qualified Data.Foldable as Fold
-import Data.Int
-import qualified Data.Sequence as Seq
-import Data.Sequence (Seq, (|>))
-
--- |@'Parser' a@ is obviously a parser which parses and returns @a@.
-newtype Parser a = Parser {
- runParser :: State ParserState (ParserResult a)
- }
-
-
-data ParserState
- = PST {
- pstInput :: Lazy.ByteString
- , pstIsEOFFatal :: !Bool
- }
- deriving (Eq, Show)
-
-
-data ParserResult a = Success !a
- | IllegalInput -- 受理出來ない入力があった
- | ReachedEOF -- 限界を越えて讀まうとした
- deriving (Eq, Show)
-
-
--- (>>=) :: Parser a -> (a -> Parser b) -> Parser b
-instance Monad Parser where
- p >>= f = Parser $! do saved <- get -- 失敗した時の爲に状態を保存
- result <- runParser p
- case result of
- Success a -> runParser (f a)
- IllegalInput -> do put saved -- 状態を復歸
- return IllegalInput
- ReachedEOF -> do put saved -- 状態を復歸
- return ReachedEOF
- return !x = Parser $! return $! Success x
- fail _ = Parser $! return $! IllegalInput
-
-instance Functor Parser where
- fmap f p = p >>= return . f
-
--- |@'failP'@ is just a synonym for @'Prelude.fail'
--- 'Prelude.undefined'@.
-failP :: Parser a
-failP = fail undefined
-
--- |@'parse' p bstr@ parses @bstr@ with @p@ and returns @(# result,
--- remaining #)@.
-parse :: Parser a -> Lazy.ByteString -> (# ParserResult a, Lazy.ByteString #)
-parse !p input -- input は lazy である必要有り。
- = let (!result, state') = runState (runParser p) (PST input True)
- in
- (# result, pstInput state' #) -- pstInput state' も lazy である必要有り。
-
--- |@'parseStr' p str@ packs @str@ and parses it.
-parseStr :: Parser a -> String -> (# ParserResult a, Lazy.ByteString #)
-parseStr !p input -- input は lazy である必要有り。
- = parse p (B.pack input)
-
-
-anyChar :: Parser Char
-anyChar = Parser $!
- do state@(PST input _) <- get
- if B.null input then
- return ReachedEOF
- else
- do put $! state { pstInput = B.tail input }
- return (Success $! B.head input)
-
-
-eof :: Parser ()
-eof = Parser $!
- do PST input _ <- get
- if B.null input then
- return $! Success ()
- else
- return IllegalInput
-
--- |@'allowEOF' p@ makes @p@ treat reaching EOF a normal failure.
-allowEOF :: Parser a -> Parser a
-allowEOF !f
- = Parser $! do saved@(PST _ isEOFFatal) <- get
- put $! saved { pstIsEOFFatal = False }
-
- result <- runParser f
-
- state <- get
- put $! state { pstIsEOFFatal = isEOFFatal }
-
- return result
-
-
-satisfy :: (Char -> Bool) -> Parser Char
-satisfy !f
- = do c <- anyChar
- if f c then
- return c
- else
- failP
-
-
-char :: Char -> Parser Char
-char !c = satisfy (== c)
-
-
-string :: String -> Parser String
-string !str
- = let bs = B.pack str
- len = B.length bs
- in
- Parser $!
- do st <- get
- let (bs', rest) = B.splitAt len $ pstInput st
- st' = st { pstInput = rest }
- if B.length bs' < len then
- return ReachedEOF
- else
- if bs == bs' then
- do put st'
- return $ Success str
- else
- return IllegalInput
-
-
-infixr 0 <|>
-
--- |This is the backtracking alternation. There is no non-backtracking
--- equivalent.
-(<|>) :: Parser a -> Parser a -> Parser a
-(!f) <|> (!g)
- = Parser $! do saved <- get -- 状態を保存
- result <- runParser f
- case result of
- Success a -> return $! Success a
- IllegalInput -> do put saved -- 状態を復歸
- runParser g
- ReachedEOF -> if pstIsEOFFatal saved then
- do put saved
- return ReachedEOF
- else
- do put saved
- runParser g
-
-
-choice :: [Parser a] -> Parser a
-choice = foldl (<|>) failP
-
-
-oneOf :: [Char] -> Parser Char
-oneOf = foldl (<|>) failP . map char
-
-
-notFollowedBy :: Parser a -> Parser ()
-notFollowedBy !p
- = Parser $! do saved <- get -- 状態を保存
- result <- runParser p
- case result of
- Success _ -> do put saved -- 状態を復歸
- return IllegalInput
- IllegalInput -> do put saved -- 状態を復歸
- return $! Success ()
- ReachedEOF -> do put saved -- 状態を復歸
- return $! Success ()
-
-
-digit :: Parser Char
-digit = do c <- anyChar
- if c >= '0' && c <= '9' then
- return c
- else
- failP
-
-
-hexDigit :: Parser Char
-hexDigit = do c <- anyChar
- if (c >= '0' && c <= '9') ||
- (c >= 'a' && c <= 'f') ||
- (c >= 'A' && c <= 'F') then
- return c
- else
- failP
-
-
-many :: forall a. Parser a -> Parser [a]
-many !p = Parser $!
- do state <- get
- let (# result, state' #) = many' state Seq.empty
- put state'
- return result
- where
- many' :: ParserState -> Seq a -> (# ParserResult [a], ParserState #)
- many' !st !soFar
- = case runState (runParser p) st of
- (Success a, st') -> many' st' (soFar |> a)
- (IllegalInput, _) -> (# Success (Fold.toList soFar), st #)
- (ReachedEOF , _) -> if pstIsEOFFatal st then
- (# ReachedEOF, st #)
- else
- (# Success (Fold.toList soFar), st #)
-
-manyChar :: Parser Char -> Parser Lazy.ByteString
-manyChar !p = Parser $!
- do state <- get
- case scan' state 0 of
- Success len
- -> do let (bs, rest) = B.splitAt len (pstInput state)
- state' = state { pstInput = rest }
- put state'
- return $ Success bs
- ReachedEOF
- -> if pstIsEOFFatal state then
- return ReachedEOF
- else
- error "internal error"
- _ -> error "internal error"
- where
- scan' :: ParserState -> Int64 -> ParserResult Int64
- scan' !st !soFar
- = case runState (runParser p) st of
- (Success _ , st') -> scan' st' (soFar + 1)
- (IllegalInput, _ ) -> Success soFar
- (ReachedEOF , _ ) -> if pstIsEOFFatal st then
- ReachedEOF
- else
- Success soFar
-
-
-many1 :: Parser a -> Parser [a]
-many1 !p = do x <- p
- xs <- many p
- return (x:xs)
-
-
-count :: Int -> Parser a -> Parser [a]
-count !n !p = Parser $! count' n p Seq.empty
-
--- This implementation is rather ugly but we need to make it
--- tail-recursive to avoid stack overflow.
-count' :: Int -> Parser a -> Seq a -> State ParserState (ParserResult [a])
-count' 0 _ !soFar = return $! Success $! Fold.toList soFar
-count' !n !p !soFar = do saved <- get
- result <- runParser p
- case result of
- Success a -> count' (n-1) p (soFar |> a)
- IllegalInput -> do put saved
- return IllegalInput
- ReachedEOF -> do put saved
- return ReachedEOF
-
-
--- def may be a _|_
-option :: a -> Parser a -> Parser a
-option def !p = p <|> return def
-
-
-sepBy :: Parser a -> Parser sep -> Parser [a]
-sepBy !p !sep = sepBy1 p sep <|> return []
-
-
-sepBy1 :: Parser a -> Parser sep -> Parser [a]
-sepBy1 !p !sep
- = do x <- p
- xs <- many $! sep >> p
- return (x:xs)
-
-
-sp :: Parser Char
-sp = char ' '
-
-
-ht :: Parser Char
-ht = char '\t'
-
-
-crlf :: Parser String
-crlf = string "\x0d\x0a"
{-# LANGUAGE
BangPatterns
+ , OverloadedStrings
, UnicodeSyntax
#-}
-- |This is an auxiliary parser utilities for parsing things related
-- In general you don't have to use this module directly.
module Network.HTTP.Lucu.Parser.Http
( isCtl
+ , isText
, isSeparator
, isChar
, isToken
+ , isSPHT
+
, listOf
- , token
+
+ , crlf
+ , sp
, lws
- , text
- , separator
+
+ , token
+ , separators
, quotedStr
, qvalue
+
+ , atMost
)
where
+import Control.Applicative
+import Control.Applicative.Unicode
+import Control.Monad.Unicode
+import Data.Ascii (Ascii)
+import qualified Data.Ascii as A
+import Data.Attoparsec.Char8 as P
+import qualified Data.Attoparsec.FastSet as FS
+import qualified Data.ByteString.Char8 as BS
+import Prelude.Unicode
-import Network.HTTP.Lucu.Parser
-
--- |@'isCtl' c@ is 'Prelude.False' iff @0x20 <= @c@ < 0x7F@.
-isCtl :: Char -> Bool
+-- |@'isCtl' c@ is 'Prelude.False' iff @0x20 <= c < 0x7F@.
+isCtl ∷ Char → Bool
+{-# INLINE isCtl #-}
isCtl c
- | c < '\x1f' = True
- | c >= '\x7f' = True
- | otherwise = False
+ | c ≤ '\x1f' = True
+ | c > '\x7f' = True
+ | otherwise = False
+
+-- |@'isText'@ is equivalent to @'not' '.' 'isCtl'@
+isText ∷ Char → Bool
+{-# INLINE isText #-}
+isText = (¬) ∘ isCtl
-- |@'isSeparator' c@ is 'Prelude.True' iff c is one of HTTP
-- separators.
-isSeparator :: Char -> Bool
-isSeparator '(' = True
-isSeparator ')' = True
-isSeparator '<' = True
-isSeparator '>' = True
-isSeparator '@' = True
-isSeparator ',' = True
-isSeparator ';' = True
-isSeparator ':' = True
-isSeparator '\\' = True
-isSeparator '"' = True
-isSeparator '/' = True
-isSeparator '[' = True
-isSeparator ']' = True
-isSeparator '?' = True
-isSeparator '=' = True
-isSeparator '{' = True
-isSeparator '}' = True
-isSeparator ' ' = True
-isSeparator '\t' = True
-isSeparator _ = False
+isSeparator ∷ Char → Bool
+{-# INLINE isSeparator #-}
+isSeparator = flip FS.memberChar set
+ where
+ {-# NOINLINE set #-}
+ set = FS.charClass "()<>@,;:\\\"/[]?={}\x20\x09"
-- |@'isChar' c@ is 'Prelude.True' iff @c <= 0x7f@.
-isChar :: Char -> Bool
-isChar c
- | c <= '\x7f' = True
- | otherwise = False
+isChar ∷ Char → Bool
+{-# INLINE isChar #-}
+isChar = (≤ '\x7F')
-- |@'isToken' c@ is equivalent to @not ('isCtl' c || 'isSeparator'
-- c)@
-isToken :: Char -> Bool
-isToken c = c `seq`
- not (isCtl c || isSeparator c)
-
--- |@'listOf' p@ is similar to @'Network.HTTP.Lucu.Parser.sepBy' p
--- ('Network.HTTP.Lucu.Parser.char' \',\')@ but it allows any
--- occurrences of LWS before and after each tokens.
-listOf :: Parser a -> Parser [a]
-listOf !p = do _ <- many lws
- sepBy p $! do _ <- many lws
- _ <- char ','
- many lws
-
--- |'token' is equivalent to @'Network.HTTP.Lucu.Parser.many1' $
--- 'Network.HTTP.Lucu.Parser.satisfy' 'isToken'@
-token :: Parser String
-token = many1 $! satisfy isToken
-
--- |'lws' is an HTTP LWS: @'Network.HTTP.Lucu.Parser.crlf'?
--- ('Network.HTTP.Lucu.Parser.sp' | 'Network.HTTP.Lucu.Parser.ht')+@
-lws :: Parser String
-lws = do s <- option "" crlf
- xs <- many1 (sp <|> ht)
- return (s ++ xs)
-
--- |'text' accepts one character which doesn't satisfy 'isCtl'.
-text :: Parser Char
-text = satisfy (not . isCtl)
-
--- |'separator' accepts one character which satisfies 'isSeparator'.
-separator :: Parser Char
-separator = satisfy isSeparator
+isToken ∷ Char → Bool
+{-# INLINE isToken #-}
+isToken !c
+ = (¬) (isCtl c ∨ isSeparator c)
+
+-- |@'listOf' p@ is similar to @'sepBy' p ('char' \',\')@ but it
+-- allows any occurrences of 'lws' before and after each tokens.
+listOf ∷ Parser a → Parser [a]
+{-# INLINEABLE listOf #-}
+listOf p
+ = try $
+ do skipMany lws
+ sepBy p $ do skipMany lws
+ _ <- char ','
+ skipMany lws
+
+-- |'token' is similar to @'takeWhile1' 'isToken'@
+token ∷ Parser Ascii
+{-# INLINE token #-}
+token = A.unsafeFromByteString <$> takeWhile1 isToken
+
+-- |The CRLF: 0x0D 0x0A.
+crlf ∷ Parser ()
+{-# INLINE crlf #-}
+crlf = string "\x0D\x0A" ≫ return ()
+
+-- |The SP: 0x20.
+sp ∷ Parser ()
+{-# INLINE sp #-}
+sp = char '\x20' ≫ return ()
+
+-- |HTTP LWS: crlf? (sp | ht)+
+lws ∷ Parser ()
+{-# INLINEABLE lws #-}
+lws = try $
+ do option () crlf
+ _ ← satisfy isSPHT
+ skipWhile isSPHT
+
+-- |Returns 'True' for SP and HT.
+isSPHT ∷ Char → Bool
+{-# INLINE isSPHT #-}
+isSPHT '\x20' = True
+isSPHT '\x09' = True
+isSPHT _ = False
+
+-- |@'separators'@ is similar to @'takeWhile1' 'isSeparator'@.
+separators ∷ Parser Ascii
+{-# INLINE separators #-}
+separators = A.unsafeFromByteString <$> takeWhile1 isSeparator
-- |'quotedStr' accepts a string surrounded by double quotation
-- marks. Quotes can be escaped by backslashes.
-quotedStr :: Parser String
-quotedStr = do _ <- char '"'
- xs <- many (qdtext <|> quotedPair)
- _ <- char '"'
- return $ foldr (++) "" xs
+quotedStr ∷ Parser Ascii
+{-# INLINEABLE quotedStr #-}
+quotedStr = try $
+ do _ ← char '"'
+ xs ← P.many (qdtext <|> quotedPair)
+ _ ← char '"'
+ return $ A.unsafeFromByteString $ BS.pack xs
where
- qdtext = do c <- satisfy (/= '"')
- return [c]
+ qdtext ∷ Parser Char
+ {-# INLINE qdtext #-}
+ qdtext = satisfy (\c → c ≢ '"' ∧ (¬) (isCtl c))
- quotedPair = do _ <- char '\\'
- c <- satisfy isChar
- return [c]
+ quotedPair ∷ Parser Char
+ {-# INLINE quotedPair #-}
+ quotedPair = char '\\' ≫ satisfy isChar
-- |'qvalue' accepts a so-called qvalue.
-qvalue :: Parser Double
-qvalue = do x <- char '0'
- xs <- option ""
- $ do y <- char '.'
- ys <- many digit -- 本當は三文字までに制限
- return (y:ys)
+qvalue ∷ Parser Double
+{-# INLINEABLE qvalue #-}
+qvalue = do x ← char '0'
+ xs ← option "" $
+ do y ← char '.'
+ ys ← atMost 3 digit
+ return (y:ys)
return $ read (x:xs)
<|>
- do x <- char '1'
- xs <- option ""
- $ do y <- char '.'
- ys <- many (char '0') -- 本當は三文字までに制限
- return (y:ys)
+ do x ← char '1'
+ xs ← option "" $
+ do y ← char '.'
+ ys ← atMost 3 (char '0')
+ return (y:ys)
return $ read (x:xs)
+
+-- |@'atMost' n v@ is like @'P.many' v@ but applies the given action
+-- at most @n@ times.
+atMost ∷ Alternative f ⇒ Int → f a → f [a]
+{-# INLINE atMost #-}
+atMost 0 _ = pure []
+atMost n v = ( (:) <$> v ⊛ atMost (n-1) v )
+ <|>
+ pure []
+{-# LANGUAGE
+ OverloadedStrings
+ , UnicodeSyntax
+ #-}
{-# OPTIONS_HADDOCK prune #-}
-- |Definition of things related on HTTP request.
, requestP
)
where
-
-import Network.HTTP.Lucu.Headers
-import Network.HTTP.Lucu.HttpVersion
-import Network.HTTP.Lucu.Parser
-import Network.HTTP.Lucu.Parser.Http
-import Network.URI
+import Control.Applicative
+import Control.Monad.Unicode
+import Data.Ascii (Ascii)
+import Data.Attoparsec.Char8
+import qualified Data.ByteString.Char8 as C8
+import Network.HTTP.Lucu.Headers
+import Network.HTTP.Lucu.HttpVersion
+import Network.HTTP.Lucu.Parser.Http
+import Network.URI
+import Prelude.Unicode
-- |This is the definition of HTTP request methods, which shouldn't
-- require any description.
| DELETE
| TRACE
| CONNECT
- | ExtensionMethod !String
+ | ExtensionMethod !Ascii
deriving (Eq, Show)
-- |This is the definition of HTTP reqest.
data Request
= Request {
- reqMethod :: !Method
- , reqURI :: !URI
- , reqVersion :: !HttpVersion
- , reqHeaders :: !Headers
+ reqMethod ∷ !Method
+ , reqURI ∷ !URI
+ , reqVersion ∷ !HttpVersion
+ , reqHeaders ∷ !Headers
}
- deriving (Show, Eq)
+ deriving (Eq, Show)
instance HasHeaders Request where
getHeaders = reqHeaders
setHeaders req hdr = req { reqHeaders = hdr }
-
-requestP :: Parser Request
-requestP = do _ <- many crlf
- (method, uri, version) <- requestLineP
- headers <- headersP
+requestP ∷ Parser Request
+requestP = do skipMany crlf
+ (method, uri, version) ← requestLineP
+ headers ← headersP
return Request {
reqMethod = method
, reqURI = uri
, reqHeaders = headers
}
-
-requestLineP :: Parser (Method, URI, HttpVersion)
-requestLineP = do method <- methodP
- _ <- sp
- uri <- uriP
- _ <- sp
- ver <- httpVersionP
- _ <- crlf
+requestLineP ∷ Parser (Method, URI, HttpVersion)
+requestLineP = do method ← methodP
+ sp
+ uri ← uriP
+ sp
+ ver ← httpVersionP
+ crlf
return (method, uri, ver)
+methodP ∷ Parser Method
+methodP = choice
+ [ string "OPTIONS" ≫ return OPTIONS
+ , string "GET" ≫ return GET
+ , string "HEAD" ≫ return HEAD
+ , string "POST" ≫ return POST
+ , string "PUT" ≫ return PUT
+ , string "DELETE" ≫ return DELETE
+ , string "TRACE" ≫ return TRACE
+ , string "CONNECT" ≫ return CONNECT
+ , ExtensionMethod <$> token
+ ]
-methodP :: Parser Method
-methodP = ( let methods = [ ("OPTIONS", OPTIONS)
- , ("GET" , GET )
- , ("HEAD" , HEAD )
- , ("POST" , POST )
- , ("PUT" , PUT )
- , ("DELETE" , DELETE )
- , ("TRACE" , TRACE )
- , ("CONNECT", CONNECT)
- ]
- in choice $ map (\ (str, mth)
- -> string str >> return mth) methods )
- <|>
- fmap ExtensionMethod token
-
-
-uriP :: Parser URI
-uriP = do str <- many1 $ satisfy (\ c -> not (isCtl c || c == ' '))
+uriP ∷ Parser URI
+uriP = try $
+ do bs ← takeWhile1 (\c → (¬) (isCtl c ∨ c ≡ '\x20'))
+ let str = C8.unpack bs
case parseURIReference str of
- Nothing -> failP
- Just uri -> return uri
\ No newline at end of file
+ Nothing -> fail ("Unparsable URI: " ⧺ str)
+ Just uri -> return uri
( requestReader
)
where
-
import Control.Concurrent.STM
import Control.Exception
import Control.Monad
import Network.HTTP.Lucu.DefaultPage
import Network.HTTP.Lucu.HandleLike
import Network.HTTP.Lucu.Interaction
-import Network.HTTP.Lucu.Parser
import Network.HTTP.Lucu.Postprocess
import Network.HTTP.Lucu.Preprocess
import Network.HTTP.Lucu.Request
, driftTo
)
where
-
import Control.Concurrent.STM
import Control.Monad.Reader
import qualified Data.ByteString as Strict (ByteString)
import Network.HTTP.Lucu.HttpVersion
import Network.HTTP.Lucu.Interaction
import Network.HTTP.Lucu.MultipartForm
-import Network.HTTP.Lucu.Parser
import Network.HTTP.Lucu.Postprocess
import Network.HTTP.Lucu.Request
import Network.HTTP.Lucu.Response
{-# LANGUAGE
BangPatterns
+ , OverloadedStrings
, UnicodeSyntax
#-}
-- |Utility functions used internally in the Lucu httpd. These
module Network.HTTP.Lucu.Utils
( splitBy
, joinWith
- , trim
- , isWhiteSpace
, quoteStr
, parseWWWFormURLEncoded
)
where
-
import Control.Monad
-import Data.List hiding (last)
+import Data.Ascii (Ascii, AsciiBuilder)
+import qualified Data.Ascii as A
+import qualified Data.ByteString.Char8 as BS
+import Data.List hiding (last)
+import Data.Monoid.Unicode
import Network.URI
-import Prelude hiding (last)
+import Prelude hiding (last)
+import Prelude.Unicode
-- |> splitBy (== ':') "ab:c:def"
-- > ==> ["ab", "c", "def"]
-splitBy :: (a -> Bool) -> [a] -> [[a]]
+splitBy ∷ (a → Bool) → [a] → [[a]]
splitBy isSep src
= case break isSep src
- of (last , [] ) -> [last]
- (first, _sep:rest) -> first : splitBy isSep rest
+ of (last , [] ) → [last]
+ (first, _sep:rest) → first : splitBy isSep rest
-- |> joinWith ":" ["ab", "c", "def"]
-- > ==> "ab:c:def"
-joinWith :: [a] -> [[a]] -> [a]
-joinWith = (join .) . intersperse
-
--- |> trim (== '_') "__ab_c__def___"
--- > ==> "ab_c__def"
-trim :: (a -> Bool) -> [a] -> [a]
-trim !p = trimTail . trimHead
+joinWith ∷ Ascii → [Ascii] → AsciiBuilder
+{-# INLINEABLE joinWith #-}
+joinWith sep = flip go (∅)
where
- trimHead = dropWhile p
- trimTail = reverse . trimHead . reverse
-
--- |@'isWhiteSpace' c@ is 'Prelude.True' iff c is one of SP, HT, CR
--- and LF.
-isWhiteSpace :: Char -> Bool
-isWhiteSpace ' ' = True
-isWhiteSpace '\t' = True
-isWhiteSpace '\r' = True
-isWhiteSpace '\n' = True
-isWhiteSpace _ = False
-{-# INLINE isWhiteSpace #-}
+ go ∷ [Ascii] → A.AsciiBuilder → A.AsciiBuilder
+ {-# INLINE go #-}
+ go [] ab = ab
+ go (x:[]) ab = ab ⊕ A.toAsciiBuilder x
+ go (x:xs) ab = go xs ( ab ⊕
+ A.toAsciiBuilder sep ⊕
+ A.toAsciiBuilder x )
-- |> quoteStr "abc"
-- > ==> "\"abc\""
--
-- > quoteStr "ab\"c"
-- > ==> "\"ab\\\"c\""
-quoteStr :: String -> String
-quoteStr !str = concat (["\""] ++ map quote str ++ ["\""])
+quoteStr ∷ Ascii → AsciiBuilder
+quoteStr str = A.toAsciiBuilder "\"" ⊕
+ go (A.toByteString str) (∅) ⊕
+ A.toAsciiBuilder "\""
where
- quote :: Char -> String
- quote '"' = "\\\""
- quote c = [c]
+ go ∷ BS.ByteString → AsciiBuilder → AsciiBuilder
+ go bs ab
+ = case BS.break (≡ '"') bs of
+ (x, y)
+ | BS.null y → ab ⊕ b2ab x
+ | otherwise → go (BS.tail y) (ab ⊕ b2ab x
+ ⊕ A.toAsciiBuilder "\\\"")
+ b2ab ∷ BS.ByteString → AsciiBuilder
+ b2ab = A.toAsciiBuilder ∘ A.unsafeFromByteString
-- |> parseWWWFormURLEncoded "aaa=bbb&ccc=ddd"
-- > ==> [("aaa", "bbb"), ("ccc", "ddd")]
-parseWWWFormURLEncoded :: String -> [(String, String)]
+parseWWWFormURLEncoded ∷ String → [(String, String)]
parseWWWFormURLEncoded src
| src == "" = []
- | otherwise = do pairStr <- splitBy (\ c -> c == ';' || c == '&') src
+ | otherwise = do pairStr <- splitBy (\ c → c == ';' || c == '&') src
let (key, value) = break (== '=') pairStr
return ( unEscapeString key
, unEscapeString $ case value of
- ('=':val) -> val
- val -> val
+ ('=':val) → val
+ val → val
)