From 1d4b9fafa0d23c01fa57de42231ae1fbbb7c5985 Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg Date: Fri, 20 Jan 2023 16:51:08 +0000 Subject: [PATCH] feat: data representations allow custom parsing and formatting of API fields. See PR #2523. Most notable code changes: - Load data representation casts into schema cache. - Data representations for reads, filters, inserts, updates, views, over joins. - `CoercibleField` represents name references in queries where coercion may be needed. - `ResolverContext` help facilitate field resolution during planning. - Planner 'resolves' names in the API query and pairs them with any implicit conversions to be used in the query builder stage. - Tests for all of the above. --- README.md | 1 + postgrest.cabal | 1 + src/PostgREST/Plan.hs | 253 ++++++++++++++----- src/PostgREST/Plan/MutatePlan.hs | 15 +- src/PostgREST/Plan/ReadPlan.hs | 12 +- src/PostgREST/Plan/Types.hs | 57 +++-- src/PostgREST/Query/QueryBuilder.hs | 2 +- src/PostgREST/Query/SqlFragment.hs | 75 ++++-- src/PostgREST/SchemaCache.hs | 88 +++++-- src/PostgREST/SchemaCache/Representations.hs | 29 +++ test/spec/Feature/Query/ComputedRelsSpec.hs | 36 +++ test/spec/Feature/Query/InsertSpec.hs | 116 ++++++++- test/spec/Feature/Query/QuerySpec.hs | 122 +++++++++ test/spec/Feature/Query/UpdateSpec.hs | 191 +++++++++++++- test/spec/Main.hs | 2 +- test/spec/fixtures/data.sql | 10 + test/spec/fixtures/schema.sql | 79 +++++- 17 files changed, 941 insertions(+), 148 deletions(-) create mode 100644 src/PostgREST/SchemaCache/Representations.hs diff --git a/README.md b/README.md index ae9df78100b..e6697446488 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,4 @@ + ![Logo](static/bigger-logo.png "Logo") [![Donate](https://img.shields.io/badge/Donate-Patreon-orange.svg?colorB=F96854)](https://www.patreon.com/postgrest) diff --git a/postgrest.cabal b/postgrest.cabal index 32d3a4cac71..ce77b3ce69b 100644 --- a/postgrest.cabal +++ b/postgrest.cabal @@ -48,6 +48,7 @@ library PostgREST.SchemaCache.Identifiers PostgREST.SchemaCache.Proc PostgREST.SchemaCache.Relationship + PostgREST.SchemaCache.Representations PostgREST.SchemaCache.Table PostgREST.Error PostgREST.Logger diff --git a/src/PostgREST/Plan.hs b/src/PostgREST/Plan.hs index 55c2afd8407..f595e4113e4 100644 --- a/src/PostgREST/Plan.hs +++ b/src/PostgREST/Plan.hs @@ -24,6 +24,7 @@ module PostgREST.Plan ) where import qualified Data.HashMap.Strict as HM +import qualified Data.HashMap.Strict.InsOrd as HMI import qualified Data.Set as S import qualified PostgREST.SchemaCache.Proc as Proc @@ -31,30 +32,34 @@ import Data.Either.Combinators (mapLeft, mapRight) import Data.List (delete) import Data.Tree (Tree (..)) -import PostgREST.ApiRequest (Action (..), - ApiRequest (..), - Mutation (..), - Payload (..)) -import PostgREST.Config (AppConfig (..)) -import PostgREST.Error (Error (..)) -import PostgREST.Query.SqlFragment (sourceCTEName) -import PostgREST.RangeQuery (NonnegRange, allRange, - convertToLimitZeroRange, - restrictRange) -import PostgREST.SchemaCache (SchemaCache (..)) -import PostgREST.SchemaCache.Identifiers (FieldName, - QualifiedIdentifier (..), - Schema) -import PostgREST.SchemaCache.Proc (ProcDescription (..), - ProcParam (..), - procReturnsScalar) -import PostgREST.SchemaCache.Relationship (Cardinality (..), - Junction (..), - Relationship (..), - RelationshipsMap, - relIsToOne) -import PostgREST.SchemaCache.Table (Table (tableName), - tablePKCols) +import PostgREST.ApiRequest (Action (..), + ApiRequest (..), + Mutation (..), + Payload (..)) +import PostgREST.Config (AppConfig (..)) +import PostgREST.Error (Error (..)) +import PostgREST.Query.SqlFragment (sourceCTEName) +import PostgREST.RangeQuery (NonnegRange, allRange, + convertToLimitZeroRange, + restrictRange) +import PostgREST.SchemaCache (SchemaCache (..)) +import PostgREST.SchemaCache.Identifiers (FieldName, + QualifiedIdentifier (..), + Schema) +import PostgREST.SchemaCache.Proc (ProcDescription (..), + ProcParam (..), + procReturnsScalar) +import PostgREST.SchemaCache.Relationship (Cardinality (..), + Junction (..), + Relationship (..), + RelationshipsMap, + relIsToOne) +import PostgREST.SchemaCache.Representations (DataRepresentation (..), + RepresentationsMap) +import PostgREST.SchemaCache.Table (Column (..), Table (..), + TablesMap, + tableColumnsList, + tablePKCols) import PostgREST.ApiRequest.Preferences import PostgREST.ApiRequest.Types @@ -90,26 +95,93 @@ callReadPlan proc conf sCache apiRequest = do let cPlan = callPlan proc apiRequest rPlan return $ CallReadPlan rPlan cPlan +-- | During planning we need to resolve Field -> CoercibleField (finding the context specific target type and map function). +-- | ResolverContext facilitates this without the need to pass around a laundry list of parameters. +data ResolverContext = ResolverContext + { tables :: TablesMap + , representations :: RepresentationsMap + , qi :: QualifiedIdentifier -- ^ The table we're currently attending; changes as we recurse into joins etc. + , outputType :: Text -- ^ The output type for the response payload; e.g. "csv", "json", "binary". + } + +resolveColumnField :: Column -> CoercibleField +resolveColumnField col = CoercibleField (colName col) [] (colNominalType col) Nothing + +resolveTableFieldName :: Table -> FieldName -> CoercibleField +resolveTableFieldName table fieldName = + fromMaybe (unknownField fieldName []) $ HMI.lookup fieldName (tableColumns table) >>= + Just . resolveColumnField + +resolveTableField :: Table -> Field -> CoercibleField +resolveTableField table (fieldName, []) = resolveTableFieldName table fieldName +-- If the field is known and a JSON path is given, always assume the JSON type. But don't assume a type for entirely unknown fields. +resolveTableField table (fieldName, jp) = + case resolveTableFieldName table fieldName of + tf@CoercibleField{tfIRType=""} -> tf{tfJsonPath=jp} + tf -> tf{tfJsonPath=jp, tfIRType="json"} + +-- | Resolve a type within the context based on the given field name and JSON path. Although there are situations where failure to resolve a field is considered an error (see `resolveOrError`), there are also situations where we allow it (RPC calls). If it should be an error and `resolveOrError` doesn't fit, ensure to check the `tfIRType` isn't empty. +resolveTypeOrUnknown :: ResolverContext -> Field -> CoercibleField +resolveTypeOrUnknown ResolverContext{..} field@(fn, jp) = + fromMaybe (unknownField fn jp) $ HM.lookup qi tables >>= + Just . flip resolveTableField field + +-- | Install any pre-defined data representation from source to target to coerce this reference. +-- +-- Note that we change the IR type here. This might seem unintuitive. The short of it is that for a CoercibleField without a transformer, input type == output type. A transformer maps from a -> b, so by definition the input type will be a and the output type b after. And tfIRType is the *input* type. +-- +-- It might feel odd that once a transformer is added we 'forget' the target type (because now a /= b). You might also note there's no obvious way to stack transforms (even if there was a stack, you erased what type you're working with so it's awkward). Alas as satisfying as it would be to engineer a layered mapping system with full type information, we just don't need it. +withTransformer :: ResolverContext -> Text -> Text -> CoercibleField -> CoercibleField +withTransformer ResolverContext{representations} sourceType targetType field = + fromMaybe field $ HM.lookup (sourceType, targetType) representations >>= + (\fieldRepresentation -> Just field{tfIRType=sourceType, tfTransform=Just (drFunction fieldRepresentation)}) + +-- | Map the intermediate representation type to the output type, if available. +withOutputFormat :: ResolverContext -> CoercibleField -> CoercibleField +withOutputFormat ctx@ResolverContext{outputType} field@CoercibleField{tfIRType} = withTransformer ctx tfIRType outputType field + +-- | Map text into the intermediate representation type, if available. +withTextParse :: ResolverContext -> CoercibleField -> CoercibleField +withTextParse ctx field@CoercibleField{tfIRType} = withTransformer ctx "text" tfIRType field + +-- | Map json into the intermediate representation type, if available. +withJsonParse :: ResolverContext -> CoercibleField -> CoercibleField +withJsonParse ctx field@CoercibleField{tfIRType} = withTransformer ctx "json" tfIRType field + +-- | Map the intermediate representation type to the output type defined by the resolver context (normally json), if available. +resolveOutputField :: ResolverContext -> Field -> CoercibleField +resolveOutputField ctx field = withOutputFormat ctx $ resolveTypeOrUnknown ctx field + +-- | Map the query string format of a value (text) into the intermediate representation type, if available. +resolveQueryInputField :: ResolverContext -> Field -> CoercibleField +resolveQueryInputField ctx field = withTextParse ctx $ resolveTypeOrUnknown ctx field + -- | Builds the ReadPlan tree on a number of stages. -- | Adds filters, order, limits on its respective nodes. -- | Adds joins conditions obtained from resource embedding. readPlan :: QualifiedIdentifier -> AppConfig -> SchemaCache -> ApiRequest -> Either Error ReadPlanTree -readPlan qi@QualifiedIdentifier{..} AppConfig{configDbMaxRows} SchemaCache{dbRelationships} apiRequest = - mapLeft ApiRequestError $ - treeRestrictRange configDbMaxRows (iAction apiRequest) =<< - addNullEmbedFilters =<< - validateSpreadEmbeds =<< - addRelatedOrders =<< - addRels qiSchema (iAction apiRequest) dbRelationships Nothing =<< - addLogicTrees apiRequest =<< - addRanges apiRequest =<< - addOrders apiRequest =<< - addFilters apiRequest (initReadRequest qi $ QueryParams.qsSelect $ iQueryParams apiRequest) +readPlan qi@QualifiedIdentifier{..} AppConfig{configDbMaxRows} SchemaCache{dbTables, dbRelationships, dbRepresentations} apiRequest = + let + -- JSON output format hardcoded for now. In the future we might want to support other output mappings such as CSV. + ctx = ResolverContext dbTables dbRepresentations qi "json" + in + mapLeft ApiRequestError $ + treeRestrictRange configDbMaxRows (iAction apiRequest) =<< + addNullEmbedFilters =<< + validateSpreadEmbeds =<< + addRelatedOrders =<< + addDataRepresentationAliases =<< + expandStarsForDataRepresentations ctx =<< + addRels qiSchema (iAction apiRequest) dbRelationships Nothing =<< + addLogicTrees ctx apiRequest =<< + addRanges apiRequest =<< + addOrders apiRequest =<< + addFilters ctx apiRequest (initReadRequest ctx $ QueryParams.qsSelect $ iQueryParams apiRequest) -- Build the initial read plan tree -initReadRequest :: QualifiedIdentifier -> [Tree SelectItem] -> ReadPlanTree -initReadRequest qi@QualifiedIdentifier{..} = - foldr (treeEntry rootDepth) $ Node defReadPlan{from=qi, relName=qiName, depth=rootDepth} [] +initReadRequest :: ResolverContext -> [Tree SelectItem] -> ReadPlanTree +initReadRequest ctx@ResolverContext{qi=QualifiedIdentifier{..}} = + foldr (treeEntry rootDepth) $ Node defReadPlan{from=qi ctx, relName=qiName, depth=rootDepth} [] where rootDepth = 0 defReadPlan = ReadPlan [] (QualifiedIdentifier mempty mempty) Nothing [] [] allRange mempty Nothing [] Nothing mempty Nothing Nothing False rootDepth @@ -128,7 +200,49 @@ initReadRequest qi@QualifiedIdentifier{..} = (Node defReadPlan{from=QualifiedIdentifier qiSchema selRelation, relName=selRelation, relHint=selHint, relJoinType=selJoinType, depth=nxtDepth, relIsSpread=True} []) fldForest:rForest SelectField{..} -> - Node q{select=(selField, selCast, selAlias):select q} rForest + Node q{select=(resolveOutputField ctx{qi=from q} selField, selCast, selAlias):select q} rForest + +-- | Preserve the original field name if data representation is used to coerce the value. +addDataRepresentationAliases :: ReadPlanTree -> Either ApiRequestError ReadPlanTree +addDataRepresentationAliases rPlanTree = Right $ fmap (\rPlan@ReadPlan{select=sel} -> rPlan{select=map aliasSelectItem sel}) rPlanTree + where + aliasSelectItem :: (CoercibleField, Maybe Cast, Maybe Alias) -> (CoercibleField, Maybe Cast, Maybe Alias) + -- If there already is an alias, don't overwrite it. + aliasSelectItem (fld@(CoercibleField{tfName=fieldName, tfTransform=(Just _)}), Nothing, Nothing) = (fld, Nothing, Just fieldName) + aliasSelectItem fld = fld + +knownColumnsInContext :: ResolverContext -> [Column] +knownColumnsInContext ResolverContext{..} = + fromMaybe [] $ HM.lookup qi tables >>= + Just . tableColumnsList + +-- | Expand "select *" into explicit field names of the table, if necessary to apply data representations. +expandStarsForDataRepresentations :: ResolverContext -> ReadPlanTree -> Either ApiRequestError ReadPlanTree +expandStarsForDataRepresentations ctx@ResolverContext{qi} rPlanTree = Right $ fmap expandStars rPlanTree + where + expandStars :: ReadPlan -> ReadPlan + -- When the schema is "" and the table is the source CTE, we assume the true source table is given in the from + -- alias and belongs to the request schema. See the bit in `addRels` with `newFrom = ...`. + expandStars rPlan@ReadPlan{from=(QualifiedIdentifier "" "pgrst_source"), fromAlias=(Just tblAlias)} = + expandStarsForTable ctx{qi=qi{qiName=tblAlias}} rPlan + expandStars rPlan@ReadPlan{from=fromTable} = + expandStarsForTable ctx{qi=fromTable} rPlan + +expandStarsForTable :: ResolverContext -> ReadPlan -> ReadPlan +expandStarsForTable ctx@ResolverContext{representations, outputType} rplan@ReadPlan{select=selectItems} = + -- If we have a '*' select AND the target table has at least one data representation, expand. + if ("*" `elem` map (\(field, _, _) -> tfName field) selectItems) && any hasOutputRep knownColumns + then rplan{select=concatMap (expandStarSelectItem knownColumns) selectItems} + else rplan + where + knownColumns = knownColumnsInContext ctx + + hasOutputRep :: Column -> Bool + hasOutputRep col = HM.member (colNominalType col, outputType) representations + + expandStarSelectItem :: [Column] -> (CoercibleField, Maybe Cast, Maybe Alias) -> [(CoercibleField, Maybe Cast, Maybe Alias)] + expandStarSelectItem columns (CoercibleField{tfName="*", tfJsonPath=[]}, b, c) = map (\col -> (withOutputFormat ctx $ resolveColumnField col, b, c)) columns + expandStarSelectItem _ selectItem = [selectItem] -- | Enforces the `max-rows` config on the result treeRestrictRange :: Maybe Integer -> Action -> ReadPlanTree -> Either ApiRequestError ReadPlanTree @@ -283,8 +397,8 @@ findRel schema allRels origin target hint = ) ) $ fromMaybe mempty $ HM.lookup (QualifiedIdentifier schema origin, schema) allRels -addFilters :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree -addFilters ApiRequest{..} rReq = +addFilters :: ResolverContext -> ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree +addFilters ctx ApiRequest{..} rReq = foldr addFilterToNode (Right rReq) flts where QueryParams.QueryParams{..} = iQueryParams @@ -296,7 +410,7 @@ addFilters ApiRequest{..} rReq = addFilterToNode :: (EmbedPath, Filter) -> Either ApiRequestError ReadPlanTree -> Either ApiRequestError ReadPlanTree addFilterToNode = - updateNode (\flt (Node q@ReadPlan{where_=lf} f) -> Node q{ReadPlan.where_=addFilterToLogicForest flt lf} f) + updateNode (\flt (Node q@ReadPlan{from=fromTable, where_=lf} f) -> Node q{ReadPlan.where_=addFilterToLogicForest (resolveFilter ctx{qi=fromTable} flt) lf} f) addOrders :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree addOrders ApiRequest{..} rReq = @@ -340,15 +454,15 @@ addNullEmbedFilters (Node rp@ReadPlan{where_=oldLogic} forest) = do newLogic <- getFilters readPlans `traverse` oldLogic Node rp{ReadPlan.where_= newLogic} <$> (addNullEmbedFilters `traverse` forest) where - getFilters :: [ReadPlan] -> LogicTree -> Either ApiRequestError LogicTree - getFilters rPlans (Expr b lOp trees) = Expr b lOp <$> (getFilters rPlans `traverse` trees) - getFilters rPlans flt@(Stmnt (Filter (fld, []) opExpr)) = + getFilters :: [ReadPlan] -> TypedLogicTree -> Either ApiRequestError TypedLogicTree + getFilters rPlans (TypedExpr b lOp trees) = TypedExpr b lOp <$> (getFilters rPlans `traverse` trees) + getFilters rPlans flt@(TypedStmnt (TypedFilter (CoercibleField fld [] _ _) opExpr)) = let foundRP = find (\ReadPlan{relName, relAlias} -> fld == fromMaybe relName relAlias) rPlans in case (foundRP, opExpr) of - (Just ReadPlan{relAggAlias}, OpExpr b (Is TriNull)) -> Right $ Stmnt $ FilterNullEmbed b relAggAlias + (Just ReadPlan{relAggAlias}, OpExpr b (Is TriNull)) -> Right $ TypedStmnt $ TypedFilterNullEmbed b relAggAlias (Just ReadPlan{relName}, _) -> Left $ UnacceptableFilter relName _ -> Right flt - getFilters _ flt@(Stmnt _) = Right flt + getFilters _ flt@(TypedStmnt _) = Right flt addRanges :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree addRanges ApiRequest{..} rReq = @@ -362,14 +476,22 @@ addRanges ApiRequest{..} rReq = addRangeToNode :: (EmbedPath, NonnegRange) -> Either ApiRequestError ReadPlanTree -> Either ApiRequestError ReadPlanTree addRangeToNode = updateNode (\r (Node q f) -> Node q{range_=r} f) -addLogicTrees :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree -addLogicTrees ApiRequest{..} rReq = +addLogicTrees :: ResolverContext -> ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree +addLogicTrees ctx ApiRequest{..} rReq = foldr addLogicTreeToNode (Right rReq) qsLogic where QueryParams.QueryParams{..} = iQueryParams addLogicTreeToNode :: (EmbedPath, LogicTree) -> Either ApiRequestError ReadPlanTree -> Either ApiRequestError ReadPlanTree - addLogicTreeToNode = updateNode (\t (Node q@ReadPlan{where_=lf} f) -> Node q{ReadPlan.where_=t:lf} f) + addLogicTreeToNode = updateNode (\t (Node q@ReadPlan{from=fromTable, where_=lf} f) -> Node q{ReadPlan.where_=resolveLogicTree ctx{qi=fromTable} t:lf} f) + +resolveLogicTree :: ResolverContext -> LogicTree -> TypedLogicTree +resolveLogicTree ctx (Stmnt flt) = TypedStmnt $ resolveFilter ctx flt +resolveLogicTree ctx (Expr b op lts) = TypedExpr b op (map (resolveLogicTree ctx) lts) + +resolveFilter :: ResolverContext -> Filter -> TypedFilter +resolveFilter ctx (Filter fld opExpr) = TypedFilter{typedField=resolveQueryInputField ctx fld, typedOpExpr=opExpr} +resolveFilter _ (FilterNullEmbed isNot fieldName) = TypedFilterNullEmbed isNot fieldName -- Validates that spread embeds are only done on to-one relationships validateSpreadEmbeds :: ReadPlanTree -> Either ApiRequestError ReadPlanTree @@ -395,7 +517,7 @@ updateNode f (targetNodeName:remainingPath, a) (Right (Node rootNode forest)) = findNode = find (\(Node ReadPlan{relName, relAlias} _) -> relName == targetNodeName || relAlias == Just targetNodeName) forest mutatePlan :: Mutation -> QualifiedIdentifier -> ApiRequest -> SchemaCache -> ReadPlanTree -> Either Error MutatePlan -mutatePlan mutation qi ApiRequest{..} sCache readReq = mapLeft ApiRequestError $ +mutatePlan mutation qi ApiRequest{..} SchemaCache{dbTables, dbRepresentations} readReq = mapLeft ApiRequestError $ case mutation of MutationCreate -> mapRight (\typedColumns -> Insert qi typedColumns body ((,) <$> iPreferResolution <*> Just confCols) [] returnings pkCols) typedColumnsOrError @@ -413,26 +535,27 @@ mutatePlan mutation qi ApiRequest{..} sCache readReq = mapLeft ApiRequestError $ Left InvalidFilters MutationDelete -> Right $ Delete qi combinedLogic iTopLevelRange rootOrder returnings where + ctx = ResolverContext dbTables dbRepresentations qi "json" confCols = fromMaybe pkCols qsOnConflict QueryParams.QueryParams{..} = iQueryParams returnings = if iPreferRepresentation == None then [] else inferColsEmbedNeeds readReq pkCols - pkCols = maybe mempty tablePKCols $ HM.lookup qi $ dbTables sCache - logic = map snd qsLogic + tbl = HM.lookup qi dbTables + pkCols = maybe mempty tablePKCols tbl + logic = map (resolveLogicTree ctx . snd) qsLogic rootOrder = maybe [] snd $ find (\(x, _) -> null x) qsOrder - combinedLogic = foldr addFilterToLogicForest logic qsFiltersRoot + combinedLogic = foldr (addFilterToLogicForest . resolveFilter ctx) logic qsFiltersRoot body = payRaw <$> iPayload -- the body is assumed to be json at this stage(ApiRequest validates) - tbl = HM.lookup qi $ dbTables sCache - typedColumnsOrError = resolveOrError tbl `traverse` S.toList iColumns + typedColumnsOrError = resolveOrError ctx tbl `traverse` S.toList iColumns -resolveOrError :: Maybe Table -> FieldName -> Either ApiRequestError TypedField -resolveOrError Nothing _ = Left NotFound -resolveOrError (Just table) field = - case resolveTableField table field of - Nothing -> Left $ ColumnNotFound (tableName table) field - Just typedField -> Right typedField +resolveOrError :: ResolverContext -> Maybe Table -> FieldName -> Either ApiRequestError CoercibleField +resolveOrError _ Nothing _ = Left NotFound +resolveOrError ctx (Just table) field = + case resolveTableFieldName table field of + CoercibleField{tfIRType=""} -> Left $ ColumnNotFound (tableName table) field + cf -> Right $ withJsonParse ctx cf callPlan :: ProcDescription -> ApiRequest -> ReadPlanTree -> CallPlan callPlan proc apiReq readReq = FunctionCall { @@ -460,7 +583,7 @@ inferColsEmbedNeeds (Node ReadPlan{select} forest) pkCols | "*" `elem` fldNames = ["*"] | otherwise = returnings where - fldNames = (\((fld, _), _, _) -> fld) <$> select + fldNames = tfName . (\(f, _, _) -> f) <$> select -- Without fkCols, when a mutatePlan to -- /projects?select=name,clients(name) occurs, the RETURNING SQL part would -- be `RETURNING name`(see QueryBuilder). This would make the embedding @@ -499,5 +622,5 @@ inferColsEmbedNeeds (Node ReadPlan{select} forest) pkCols -- Traditional filters(e.g. id=eq.1) are added as root nodes of the LogicTree -- they are later concatenated with AND in the QueryBuilder -addFilterToLogicForest :: Filter -> [LogicTree] -> [LogicTree] -addFilterToLogicForest flt lf = Stmnt flt : lf +addFilterToLogicForest :: TypedFilter -> [TypedLogicTree] -> [TypedLogicTree] +addFilterToLogicForest flt lf = TypedStmnt flt : lf diff --git a/src/PostgREST/Plan/MutatePlan.hs b/src/PostgREST/Plan/MutatePlan.hs index 0c9eaa0eb17..9faf259bbee 100644 --- a/src/PostgREST/Plan/MutatePlan.hs +++ b/src/PostgREST/Plan/MutatePlan.hs @@ -6,8 +6,9 @@ where import qualified Data.ByteString.Lazy as LBS import PostgREST.ApiRequest.Preferences (PreferResolution) -import PostgREST.ApiRequest.Types (LogicTree, OrderTerm) -import PostgREST.Plan.Types (TypedField) +import PostgREST.ApiRequest.Types (OrderTerm) +import PostgREST.Plan.Types (CoercibleField, + TypedLogicTree) import PostgREST.RangeQuery (NonnegRange) import PostgREST.SchemaCache.Identifiers (FieldName, QualifiedIdentifier) @@ -18,25 +19,25 @@ import Protolude data MutatePlan = Insert { in_ :: QualifiedIdentifier - , insCols :: [TypedField] + , insCols :: [CoercibleField] , insBody :: Maybe LBS.ByteString , onConflict :: Maybe (PreferResolution, [FieldName]) - , where_ :: [LogicTree] + , where_ :: [TypedLogicTree] , returning :: [FieldName] , insPkCols :: [FieldName] } | Update { in_ :: QualifiedIdentifier - , updCols :: [TypedField] + , updCols :: [CoercibleField] , updBody :: Maybe LBS.ByteString - , where_ :: [LogicTree] + , where_ :: [TypedLogicTree] , mutRange :: NonnegRange , mutOrder :: [OrderTerm] , returning :: [FieldName] } | Delete { in_ :: QualifiedIdentifier - , where_ :: [LogicTree] + , where_ :: [TypedLogicTree] , mutRange :: NonnegRange , mutOrder :: [OrderTerm] , returning :: [FieldName] diff --git a/src/PostgREST/Plan/ReadPlan.hs b/src/PostgREST/Plan/ReadPlan.hs index 94e181ca2b9..474a3578edd 100644 --- a/src/PostgREST/Plan/ReadPlan.hs +++ b/src/PostgREST/Plan/ReadPlan.hs @@ -6,9 +6,11 @@ module PostgREST.Plan.ReadPlan import Data.Tree (Tree (..)) -import PostgREST.ApiRequest.Types (Alias, Cast, Depth, Field, - Hint, JoinType, LogicTree, - NodeName, OrderTerm) +import PostgREST.ApiRequest.Types (Alias, Cast, Depth, Hint, + JoinType, NodeName, + OrderTerm) +import PostgREST.Plan.Types (CoercibleField (..), + TypedLogicTree) import PostgREST.RangeQuery (NonnegRange) import PostgREST.SchemaCache.Identifiers (FieldName, QualifiedIdentifier) @@ -26,10 +28,10 @@ data JoinCondition = deriving (Eq) data ReadPlan = ReadPlan - { select :: [(Field, Maybe Cast, Maybe Alias)] + { select :: [(CoercibleField, Maybe Cast, Maybe Alias)] , from :: QualifiedIdentifier , fromAlias :: Maybe Alias - , where_ :: [LogicTree] + , where_ :: [TypedLogicTree] , order :: [OrderTerm] , range_ :: NonnegRange , relName :: NodeName diff --git a/src/PostgREST/Plan/Types.hs b/src/PostgREST/Plan/Types.hs index 8e4a41f1567..3b472bf9561 100644 --- a/src/PostgREST/Plan/Types.hs +++ b/src/PostgREST/Plan/Types.hs @@ -1,24 +1,49 @@ module PostgREST.Plan.Types - ( TypedField(..) - , resolveTableField - + ( CoercibleField(..) + , unknownField + , TypedLogicTree(..) + , TypedFilter(..) + , TransformerProc ) where -import qualified Data.HashMap.Strict.InsOrd as HMI +import PostgREST.ApiRequest.Types (JsonPath, LogicOperator, OpExpr) import PostgREST.SchemaCache.Identifiers (FieldName) -import PostgREST.SchemaCache.Table (Column (..), Table (..)) import Protolude --- | A TypedField is a field with sufficient information to be read from JSON with `json_to_recordset`. -data TypedField = TypedField - { tfName :: FieldName - , tfIRType :: Text -- ^ The initial type of the field, before any casting. - } deriving (Eq) - -resolveTableField :: Table -> FieldName -> Maybe TypedField -resolveTableField table fieldName = - case HMI.lookup fieldName (tableColumns table) of - Just column -> Just $ TypedField (colName column) (colNominalType column) - Nothing -> Nothing +type TransformerProc = Text + +-- | A CoercibleField pairs the name of a query element with any type coercion information we need for some specific use case. +-- | +-- | As suggested by the name, it's often a reference to a field in a table but really it can be any nameable element (function parameter, calculation with an alias, etc) with a knowable type. +-- | +-- | In the simplest case, it allows us to parse JSON payloads with `json_to_recordset`, for which we need to know both the name and the type of each thing we'd like to extract. At a higher level, CoercibleField generalises to reflect that any value we work with in a query may need type specific handling. +-- | +-- | CoercibleField is the foundation for the Data Representations feature. This feature allow user-definable mappings between database types so that the same data can be presented or interpreted in various ways as needed. Sometimes the way Postgres coerces data implicitly isn't right for the job. Different mappings might be appropriate for different situations: parsing a filter from a query string requires one function (text -> field type) while parsing a payload from JSON takes another (json -> field type). And the reverse, outputting a field as JSON, requires yet a third (field type -> json). CoercibleField is that "job specific" reference to an element paired with the type we desire for that particular purpose and the function we'll use to get there, if any. +-- | +-- | In the planning phase, we "resolve" generic named elements into these specialised CoercibleFields. Again this is context specific: two different CoercibleFields both representing the exact same table column in the database, even in the same query, might have two different target types and mapping functions. For example, one might represent a column in a filter, and another the very same column in an output role to be sent in the response body. +-- | +-- | The type value is allowed to be the empty string. The analog here is soft type checking in programming languages: sometimes we don't need a variable to have a specified type and things will work anyhow. So the empty type variant is valid when we don't know and *don't need to know* about the specific type in some context. Note that this variation should not be used if it guarantees failure: in that case you should instead raise an error at the planning stage and bail out. For example, we can't parse JSON with `json_to_recordset` without knowing the types of each recipient field, and so error out. Using the empty string for the type would be incorrect and futile. On the other hand we use the empty type for RPC calls since type resolution isn't implemented for RPC, but it's fine because the query still works with Postgres' implicit coercion. In the future, hopefully we will support data representations across the board and then the empty type may be permanently retired. +data CoercibleField = CoercibleField + { tfName :: FieldName + , tfJsonPath :: JsonPath + , tfIRType :: Text -- ^ The native Postgres type of the field, the type before mapping. + , tfTransform :: Maybe TransformerProc -- ^ The optional mapping from irType -> targetType. + } deriving (Eq) + +unknownField :: FieldName -> JsonPath -> CoercibleField +unknownField name path = CoercibleField name path "" Nothing + +-- | Like a regular LogicTree but with field type information. +data TypedLogicTree + = TypedExpr Bool LogicOperator [TypedLogicTree] + | TypedStmnt TypedFilter + deriving (Eq) + +data TypedFilter = TypedFilter + { typedField :: CoercibleField + , typedOpExpr :: OpExpr + } + | TypedFilterNullEmbed Bool FieldName + deriving (Eq) diff --git a/src/PostgREST/Query/QueryBuilder.hs b/src/PostgREST/Query/QueryBuilder.hs index c23f2061d9a..29d93d6cd4f 100644 --- a/src/PostgREST/Query/QueryBuilder.hs +++ b/src/PostgREST/Query/QueryBuilder.hs @@ -53,7 +53,7 @@ readPlanToQuery (Node ReadPlan{select,from=mainQi,fromAlias,where_=logicForest,o where fromFrag = fromF relToParent mainQi fromAlias qi = getQualifiedIdentifier relToParent mainQi fromAlias - defSelect = [(("*", []), Nothing, Nothing)] -- gets all the columns in case of an empty select, ignoring/obtaining these columns is done at the aggregation stage + defSelect = [(unknownField "*" [], Nothing, Nothing)] -- gets all the columns in case of an empty select, ignoring/obtaining these columns is done at the aggregation stage (selects, joins) = foldr getSelectsJoins ([],[]) forest getSelectsJoins :: ReadPlanTree -> ([SQL.Snippet], [SQL.Snippet]) -> ([SQL.Snippet], [SQL.Snippet]) diff --git a/src/PostgREST/Query/SqlFragment.hs b/src/PostgREST/Query/SqlFragment.hs index 4548911a5ad..b772e628de9 100644 --- a/src/PostgREST/Query/SqlFragment.hs +++ b/src/PostgREST/Query/SqlFragment.hs @@ -58,15 +58,13 @@ import Control.Arrow ((***)) import Data.Foldable (foldr1) import Text.InterpolatedString.Perl6 (qc) -import PostgREST.ApiRequest.Types (Alias, Cast, Field, - Filter (..), +import PostgREST.ApiRequest.Types (Alias, Cast, FtsOperator (..), JsonOperand (..), JsonOperation (..), JsonPath, LogicOperator (..), - LogicTree (..), OpExpr (..), - Operation (..), + OpExpr (..), Operation (..), OrderDirection (..), OrderNulls (..), OrderTerm (..), @@ -75,7 +73,10 @@ import PostgREST.ApiRequest.Types (Alias, Cast, Field, import PostgREST.MediaType (MTPlanFormat (..), MTPlanOption (..)) import PostgREST.Plan.ReadPlan (JoinCondition (..)) -import PostgREST.Plan.Types (TypedField (..)) +import PostgREST.Plan.Types (CoercibleField (..), + TypedFilter (..), + TypedLogicTree (..), + unknownField) import PostgREST.RangeQuery (NonnegRange, allRange, rangeLimit, rangeOffset) import PostgREST.SchemaCache.Identifiers (FieldName, @@ -227,24 +228,37 @@ fromQi t = (if T.null s then mempty else pgFmtIdent s <> ".") <> pgFmtIdent n n = qiName t s = qiSchema t +pgFmtCallUnary :: Text -> SQL.Snippet -> SQL.Snippet +pgFmtCallUnary f x = SQL.sql (encodeUtf8 f) <> "(" <> x <> ")" + pgFmtColumn :: QualifiedIdentifier -> Text -> SqlFragment pgFmtColumn table "*" = fromQi table <> ".*" pgFmtColumn table c = fromQi table <> "." <> pgFmtIdent c -pgFmtField :: QualifiedIdentifier -> Field -> SQL.Snippet -pgFmtField table (c, []) = SQL.sql (pgFmtColumn table c) +pgFmtField :: QualifiedIdentifier -> CoercibleField -> SQL.Snippet +pgFmtField table CoercibleField{tfName=fn, tfJsonPath=[]} = SQL.sql (pgFmtColumn table fn) -- Using to_jsonb instead of to_json to avoid missing operator errors when filtering: -- "operator does not exist: json = unknown" -pgFmtField table (c, jp) = SQL.sql ("to_jsonb(" <> pgFmtColumn table c <> ")") <> pgFmtJsonPath jp +pgFmtField table CoercibleField{tfName=fn, tfJsonPath=jp} = SQL.sql ("to_jsonb(" <> pgFmtColumn table fn <> ")") <> pgFmtJsonPath jp + +-- Select the value of a named element from a table, applying its optional coercion mapping if any. +pgFmtTableCoerce :: QualifiedIdentifier -> CoercibleField -> SQL.Snippet +pgFmtTableCoerce table fld@(CoercibleField{tfTransform=(Just formatterProc)}) = pgFmtCallUnary formatterProc (pgFmtField table fld) +pgFmtTableCoerce table f = pgFmtField table f -pgFmtSelectItem :: QualifiedIdentifier -> (Field, Maybe Cast, Maybe Alias) -> SQL.Snippet -pgFmtSelectItem table (f@(fName, jp), Nothing, alias) = pgFmtField table f <> SQL.sql (pgFmtAs fName jp alias) +-- | Like the previous but now we just have a name so no namespace or JSON paths. +pgFmtCoerceNamed :: CoercibleField -> SQL.Snippet +pgFmtCoerceNamed CoercibleField{tfName=fn, tfTransform=(Just formatterProc)} = pgFmtCallUnary formatterProc (SQL.sql (pgFmtIdent fn)) <> " AS " <> SQL.sql (pgFmtIdent fn) +pgFmtCoerceNamed CoercibleField{tfName=fn} = SQL.sql (pgFmtIdent fn) + +pgFmtSelectItem :: QualifiedIdentifier -> (CoercibleField, Maybe Cast, Maybe Alias) -> SQL.Snippet +pgFmtSelectItem table (fld, Nothing, alias) = pgFmtTableCoerce table fld <> SQL.sql (pgFmtAs (tfName fld) (tfJsonPath fld) alias) -- Ideally we'd quote the cast with "pgFmtIdent cast". However, that would invalidate common casts such as "int", "bigint", etc. -- Try doing: `select 1::"bigint"` - it'll err, using "int8" will work though. There's some parser magic that pg does that's invalidated when quoting. -- Not quoting should be fine, we validate the input on Parsers. -pgFmtSelectItem table (f@(fName, jp), Just cast, alias) = "CAST (" <> pgFmtField table f <> " AS " <> SQL.sql (encodeUtf8 cast) <> " )" <> SQL.sql (pgFmtAs fName jp alias) +pgFmtSelectItem table (fld, Just cast, alias) = "CAST (" <> pgFmtTableCoerce table fld <> " AS " <> SQL.sql (encodeUtf8 cast) <> " )" <> SQL.sql (pgFmtAs (tfName fld) (tfJsonPath fld) alias) -pgFmtSelectFromJson :: [TypedField] -> SQL.Snippet +pgFmtSelectFromJson :: [CoercibleField] -> SQL.Snippet pgFmtSelectFromJson fields = SQL.sql "SELECT " <> parsedCols <> " " <> (if null fields @@ -255,7 +269,7 @@ pgFmtSelectFromJson fields = else SQL.sql ("FROM json_to_recordset (" <> selectBody <> ") AS _ " <> "(" <> typedCols <> ") ") ) where - parsedCols = SQL.sql $ BS.intercalate ", " $ pgFmtIdent . tfName <$> fields + parsedCols = intercalateSnippet ", " $ pgFmtCoerceNamed <$> fields typedCols = BS.intercalate ", " $ pgFmtIdent . tfName <> const " " <> encodeUtf8 . tfIRType <$> fields pgFmtOrderTerm :: QualifiedIdentifier -> OrderTerm -> SQL.Snippet @@ -266,8 +280,8 @@ pgFmtOrderTerm qi ot = maybe mempty nullOrder $ otNullOrder ot]) where fmtOTerm = \case - OrderTerm{otTerm} -> pgFmtField qi otTerm - OrderRelationTerm{otRelation, otRelTerm} -> pgFmtField (QualifiedIdentifier mempty otRelation) otRelTerm + OrderTerm{otTerm=(fn, jp)} -> pgFmtField qi (unknownField fn jp) + OrderRelationTerm{otRelation, otRelTerm=(fn, jp)} -> pgFmtField (QualifiedIdentifier mempty otRelation) (unknownField fn jp) direction OrderAsc = "ASC" direction OrderDesc = "DESC" @@ -275,15 +289,26 @@ pgFmtOrderTerm qi ot = nullOrder OrderNullsFirst = "NULLS FIRST" nullOrder OrderNullsLast = "NULLS LAST" - -pgFmtFilter :: QualifiedIdentifier -> Filter -> SQL.Snippet -pgFmtFilter _ (FilterNullEmbed hasNot fld) = SQL.sql (pgFmtIdent fld) <> " IS " <> (if hasNot then "NOT" else mempty) <> " NULL" -pgFmtFilter _ (Filter _ (NoOpExpr _)) = mempty -- TODO unreachable because NoOpExpr is filtered on QueryParams -pgFmtFilter table (Filter fld (OpExpr hasNot oper)) = notOp <> " " <> case oper of +-- | Interpret a literal in the way the planner indicated through the CoercibleField. +pgFmtUnknownLiteralForField :: SQL.Snippet -> CoercibleField -> SQL.Snippet +pgFmtUnknownLiteralForField value CoercibleField{tfTransform=(Just parserProc)} = pgFmtCallUnary parserProc value +-- But when no transform is requested, we just use the literal as-is. +pgFmtUnknownLiteralForField value _ = value + +-- | Array version of the above, used by ANY(). +pgFmtArrayLiteralForField :: [Text] -> CoercibleField -> SQL.Snippet +pgFmtArrayLiteralForField values CoercibleField{tfTransform=(Just parserProc)} = SQL.sql "ARRAY[" <> intercalateSnippet ", " (pgFmtCallUnary parserProc . unknownLiteral <$> values) <> "]" +-- When no transformation is requested, use an array literal which should be simpler, maybe faster. +pgFmtArrayLiteralForField values _ = unknownLiteral (pgBuildArrayLiteral values) + +pgFmtFilter :: QualifiedIdentifier -> TypedFilter -> SQL.Snippet +pgFmtFilter _ (TypedFilterNullEmbed hasNot fld) = SQL.sql (pgFmtIdent fld) <> " IS " <> (if hasNot then "NOT" else mempty) <> " NULL" +pgFmtFilter _ (TypedFilter _ (NoOpExpr _)) = mempty -- TODO unreachable because NoOpExpr is filtered on QueryParams +pgFmtFilter table (TypedFilter fld (OpExpr hasNot oper)) = notOp <> " " <> case oper of Op op val -> pgFmtFieldOp op <> " " <> case op of OpLike -> unknownLiteral (T.map star val) OpILike -> unknownLiteral (T.map star val) - _ -> unknownLiteral val + _ -> pgFmtUnknownLiteralForField (unknownLiteral val) fld -- IS cannot be prepared. `PREPARE boolplan AS SELECT * FROM projects where id IS $1` will give a syntax error. -- The above can be fixed by using `PREPARE boolplan AS SELECT * FROM projects where id IS NOT DISTINCT FROM $1;` @@ -300,7 +325,7 @@ pgFmtFilter table (Filter fld (OpExpr hasNot oper)) = notOp <> " " <> case oper -- + Can invalidate prepared statements: multiple parameters on an IN($1, $2, $3) will lead to using different prepared statements and not take advantage of caching. In vals -> pgFmtField table fld <> " " <> case vals of [""] -> "= ANY('{}') " - _ -> "= ANY (" <> unknownLiteral (pgBuildArrayLiteral vals) <> ") " + _ -> "= ANY (" <> pgFmtArrayLiteralForField vals fld <> ") " Fts op lang val -> pgFmtFieldFts op <> "(" <> ftsLang lang <> unknownLiteral val <> ") " @@ -315,14 +340,14 @@ pgFmtJoinCondition :: JoinCondition -> SQL.Snippet pgFmtJoinCondition (JoinCondition (qi1, col1) (qi2, col2)) = SQL.sql $ pgFmtColumn qi1 col1 <> " = " <> pgFmtColumn qi2 col2 -pgFmtLogicTree :: QualifiedIdentifier -> LogicTree -> SQL.Snippet -pgFmtLogicTree qi (Expr hasNot op forest) = SQL.sql notOp <> " (" <> intercalateSnippet (opSql op) (pgFmtLogicTree qi <$> forest) <> ")" +pgFmtLogicTree :: QualifiedIdentifier -> TypedLogicTree -> SQL.Snippet +pgFmtLogicTree qi (TypedExpr hasNot op forest) = SQL.sql notOp <> " (" <> intercalateSnippet (opSql op) (pgFmtLogicTree qi <$> forest) <> ")" where notOp = if hasNot then "NOT" else mempty opSql And = " AND " opSql Or = " OR " -pgFmtLogicTree qi (Stmnt flt) = pgFmtFilter qi flt +pgFmtLogicTree qi (TypedStmnt flt) = pgFmtFilter qi flt pgFmtJsonPath :: JsonPath -> SQL.Snippet pgFmtJsonPath = \case diff --git a/src/PostgREST/SchemaCache.hs b/src/PostgREST/SchemaCache.hs index 14cd9428edb..41db41970cb 100644 --- a/src/PostgREST/SchemaCache.hs +++ b/src/PostgREST/SchemaCache.hs @@ -38,31 +38,34 @@ import qualified Hasql.Transaction as SQL import Contravariant.Extras (contrazip2) import Text.InterpolatedString.Perl6 (q) -import PostgREST.Config.Database (pgVersionStatement) -import PostgREST.Config.PgVersion (PgVersion, pgVersion100, - pgVersion110) -import PostgREST.SchemaCache.Identifiers (AccessSet, FieldName, - QualifiedIdentifier (..), - Schema) -import PostgREST.SchemaCache.Proc (PgType (..), - ProcDescription (..), - ProcParam (..), - ProcVolatility (..), - ProcsMap, RetType (..)) -import PostgREST.SchemaCache.Relationship (Cardinality (..), - Junction (..), - Relationship (..), - RelationshipsMap) -import PostgREST.SchemaCache.Table (Column (..), ColumnMap, - Table (..), TablesMap) +import PostgREST.Config.Database (pgVersionStatement) +import PostgREST.Config.PgVersion (PgVersion, pgVersion100, + pgVersion110) +import PostgREST.SchemaCache.Identifiers (AccessSet, FieldName, + QualifiedIdentifier (..), + Schema) +import PostgREST.SchemaCache.Proc (PgType (..), + ProcDescription (..), + ProcParam (..), + ProcVolatility (..), + ProcsMap, RetType (..)) +import PostgREST.SchemaCache.Relationship (Cardinality (..), + Junction (..), + Relationship (..), + RelationshipsMap) +import PostgREST.SchemaCache.Representations (DataRepresentation (..), + RepresentationsMap) +import PostgREST.SchemaCache.Table (Column (..), ColumnMap, + Table (..), TablesMap) import Protolude data SchemaCache = SchemaCache - { dbTables :: TablesMap - , dbRelationships :: RelationshipsMap - , dbProcs :: ProcsMap + { dbTables :: TablesMap + , dbRelationships :: RelationshipsMap + , dbProcs :: ProcsMap + , dbRepresentations :: RepresentationsMap } deriving (Generic, JSON.ToJSON) @@ -113,6 +116,7 @@ querySchemaCache schemas extraSearchPath prepared = do m2oRels <- SQL.statement mempty $ allM2OandO2ORels pgVer prepared procs <- SQL.statement schemas $ allProcs pgVer prepared cRels <- SQL.statement mempty $ allComputedRels prepared + reps <- SQL.statement schemas $ dataRepresentations prepared let tabsWViewsPks = addViewPrimaryKeys tabs keyDeps rels = addInverseRels $ addM2MRels tabsWViewsPks $ addViewM2OAndO2ORels keyDeps m2oRels @@ -121,6 +125,7 @@ querySchemaCache schemas extraSearchPath prepared = do dbTables = tabsWViewsPks , dbRelationships = getOverrideRelationshipsMap rels cRels , dbProcs = procs + , dbRepresentations = reps } -- | overrides detected relationships with the computed relationships and gets the RelationshipsMap @@ -147,10 +152,11 @@ getOverrideRelationshipsMap rels cRels = removeInternal :: [Schema] -> SchemaCache -> SchemaCache removeInternal schemas dbStruct = SchemaCache { - dbTables = HM.filterWithKey (\(QualifiedIdentifier sch _) _ -> sch `elem` schemas) $ dbTables dbStruct - , dbRelationships = filter (\r -> qiSchema (relForeignTable r) `elem` schemas && not (hasInternalJunction r)) <$> + dbTables = HM.filterWithKey (\(QualifiedIdentifier sch _) _ -> sch `elem` schemas) $ dbTables dbStruct + , dbRelationships = filter (\r -> qiSchema (relForeignTable r) `elem` schemas && not (hasInternalJunction r)) <$> HM.filterWithKey (\(QualifiedIdentifier sch _, _) _ -> sch `elem` schemas ) (dbRelationships dbStruct) - , dbProcs = dbProcs dbStruct -- procs are only obtained from the exposed schemas, no need to filter them. + , dbProcs = dbProcs dbStruct -- procs are only obtained from the exposed schemas, no need to filter them. + , dbRepresentations = dbRepresentations dbStruct -- no need to filter, not directly exposed through the API } where hasInternalJunction ComputedRelationship{} = False @@ -271,6 +277,42 @@ decodeProcs = | v == 's' = Stable | otherwise = Volatile -- only 'v' can happen here +decodeRepresentations :: HD.Result RepresentationsMap +decodeRepresentations = + HM.fromList . map (\rep@DataRepresentation{drSourceType, drTargetType} -> ((drSourceType, drTargetType), rep)) <$> HD.rowList row + where + row = DataRepresentation + <$> column HD.text + <*> column HD.text + <*> column HD.text + +-- Selects all potential data representation transformations. To qualify the cast must be +-- 1. to or from a domain +-- 2. implicit +-- For the time being it must also be to/from JSON or text, although one can imagine a future where we support special +-- cases like CSV specific representations. +dataRepresentations :: Bool -> SQL.Statement [Schema] RepresentationsMap +dataRepresentations = SQL.Statement sql (arrayParam HE.text) decodeRepresentations + where + sql = [q| + SELECT + c.castsource::regtype::text, + c.casttarget::regtype::text, + c.castfunc::regproc::text + FROM + pg_catalog.pg_cast c + JOIN pg_catalog.pg_type src_t + ON c.castsource::oid = src_t.oid + JOIN pg_catalog.pg_type dst_t + ON c.casttarget::oid = dst_t.oid + WHERE + c.castcontext = 'i' + AND c.castmethod = 'f' + AND has_function_privilege(c.castfunc, 'execute') + AND ((src_t.typtype = 'd' AND c.casttarget IN ('json'::regtype::oid , 'text'::regtype::oid)) + OR (dst_t.typtype = 'd' AND c.castsource IN ('json'::regtype::oid , 'text'::regtype::oid))) + |] + allProcs :: PgVersion -> Bool -> SQL.Statement [Schema] ProcsMap allProcs pgVer = SQL.Statement sql (arrayParam HE.text) decodeProcs where diff --git a/src/PostgREST/SchemaCache/Representations.hs b/src/PostgREST/SchemaCache/Representations.hs new file mode 100644 index 00000000000..027365f6df9 --- /dev/null +++ b/src/PostgREST/SchemaCache/Representations.hs @@ -0,0 +1,29 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} + +module PostgREST.SchemaCache.Representations + ( DataRepresentation(..) + , RepresentationsMap + ) where + +import qualified Data.Aeson as JSON +import qualified Data.HashMap.Strict as HM + + +import Protolude + +-- | Data representations allow user customisation of how to present and receive data through APIs, per field. +-- This structure is used for the library of available transforms. It answers questions like: +-- - What function, if any, should be used to present a certain field that's been selected for API output? +-- - How do we parse incoming data for a certain field type when inserting or updating? +-- - And similarly, how do we parse textual data in a query string to be used as a filter? +-- +-- Support for outputting special formats like CSV and binary data would fit into the same system. +data DataRepresentation = DataRepresentation + { drSourceType :: Text + , drTargetType :: Text + , drFunction :: Text + } deriving (Eq, Show, Generic, JSON.ToJSON, JSON.FromJSON) + +-- The representation map maps from (source type, target type) to a DR. +type RepresentationsMap = HM.HashMap (Text, Text) DataRepresentation diff --git a/test/spec/Feature/Query/ComputedRelsSpec.hs b/test/spec/Feature/Query/ComputedRelsSpec.hs index ea66ad43740..cba71e68b6a 100644 --- a/test/spec/Feature/Query/ComputedRelsSpec.hs +++ b/test/spec/Feature/Query/ComputedRelsSpec.hs @@ -104,6 +104,42 @@ spec = describe "computed relationships" $ do [json|[ {"name":"Final Fantasy I","designer":{"name":"Hironobu Sakaguchi"}} ]|] { matchStatus = 200 } + it "applies data representations to response" $ do + -- A smoke test for data reps in the presence of computed relations. + + -- The data rep here title cases the designer name before presentation. So here the lowercase version will be saved, + -- but the title case version returned. Pulling in a computed relation should not confuse this. + request methodPatch "/designers?select=name,videogames:computed_videogames(name)&id=eq.1" + [("Prefer", "return=representation"), ("Prefer", "tx=commit")] + [json| {"name": "sidney k. meier"} |] + `shouldRespondWith` + [json|[{"name":"Sidney K. Meier","videogames":[{"name":"Civilization I"}, {"name":"Civilization II"}]}]|] + { matchStatus = 200 } + + -- Verify it was saved the way we requested (there's no text data rep for this column, so if we select with the wrong casing, it should fail.) + get "/designers?select=id&name=eq.Sidney%20K.%20Meier" + `shouldRespondWith` + [json|[]|] + { matchStatus = 200, matchHeaders = [matchContentTypeJson] } + -- But with the right casing it works. + get "/designers?select=id,name&name=eq.sidney%20k.%20meier" + `shouldRespondWith` + [json|[{"id": 1, "name":"Sidney K. Meier"}]|] + { matchStatus = 200, matchHeaders = [matchContentTypeJson] } + + -- Most importantly, if you read it back even via a computed relation, the data rep should be applied. + get "/videogames?select=name,designer:computed_designers(*)&id=eq.1" + `shouldRespondWith` + [json|[ + {"name":"Civilization I","designer":{"id": 1, "name":"Sidney K. Meier"}} + ]|] { matchHeaders = [matchContentTypeJson] } + + -- reset the test fixture + request methodPatch "/designers?id=eq.1" + [("Prefer", "tx=commit")] + [json| {"name": "Sid Meier"} |] + `shouldRespondWith` 204 + it "works with self joins" $ get "/web_content?select=name,child_web_content(name),parent_web_content(name)&id=in.(0,1)" `shouldRespondWith` diff --git a/test/spec/Feature/Query/InsertSpec.hs b/test/spec/Feature/Query/InsertSpec.hs index 50053169dc0..1ea0ad82668 100644 --- a/test/spec/Feature/Query/InsertSpec.hs +++ b/test/spec/Feature/Query/InsertSpec.hs @@ -11,8 +11,9 @@ import Test.Hspec.Wai import Test.Hspec.Wai.JSON import Text.Heredoc -import PostgREST.Config.PgVersion (PgVersion, pgVersion110, - pgVersion112, pgVersion130) +import PostgREST.Config.PgVersion (PgVersion, pgVersion100, + pgVersion110, pgVersion112, + pgVersion130) import Protolude hiding (get) import SpecHelper @@ -657,3 +658,114 @@ spec actualPgVersion = do , "Location" <:> "/test_null_pk_competitors_sponsors?id=eq.1&sponsor_id=is.null" , "Content-Range" <:> "*/*" ] } + + -- Data representations for payload parsing requires Postgrest 10 or above. + when (actualPgVersion >= pgVersion100) $ do + describe "Data representations" $ do + context "on regular table" $ do + it "parses values in POST body" $ + -- we don't check that the parsing is correct here, just that it's happening. If it doesn't happen we'll get a + -- an "invalid input syntax for type integer:" error. + request methodPost "/datarep_todos" [("Prefer", "return=headers-only")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + "" + { matchStatus = 201 + , matchHeaders = [ matchHeaderAbsent hContentType + , "Location" <:> "/datarep_todos?id=eq.5" + , "Content-Range" <:> "*/*" ] + } + + it "parses values in POST body and formats individually selected values in return=representation" $ + request methodPost "/datarep_todos?select=id,label_color" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":5, "label_color": "#001100"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + it "parses values in POST body and formats values in return=representation" $ + request methodPost "/datarep_todos" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00+00"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + context "with ?columns parameter" $ do + it "ignores json keys not included in ?columns; parses only the ones specified" $ + request methodPost "/datarep_todos?columns=id,label_color&select=id,name,label_color,due_at" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "invalid but should be ignored"} |] + `shouldRespondWith` + [json| [{"id":5, "name":null, "label_color": "#001100", "due_at": "2018-01-01T00:00:00+00"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + it "fails without parsing anything if at least one specified column doesn't exist" $ + request methodPost "/datarep_todos?columns=id,label_color,helicopters&select=id,name,label_color,due_at" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos' does not exist","details":null,"hint":null} |] + { matchStatus = 400 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8"] + } + + context "on updatable view" $ do + it "parses values in POST body" $ + -- we don't check that the parsing is correct here, just that it's happening. If it doesn't happen we'll get a + -- an "invalid input syntax for type integer:" error. + request methodPost "/datarep_todos_computed" [("Prefer", "return=headers-only")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + "" + { matchStatus = 201 + , matchHeaders = [ matchHeaderAbsent hContentType + , "Location" <:> "/datarep_todos_computed?id=eq.5" + , "Content-Range" <:> "*/*" ] + } + + it "parses values in POST body and formats individually selected values in return=representation" $ + request methodPost "/datarep_todos_computed?select=id,label_color" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":5, "label_color": "#001100"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + it "parses values in POST body and formats values in return=representation" $ + request methodPost "/datarep_todos_computed" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00+00", "dark_color":"#000880"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + context "on updatable views with ?columns parameter" $ do + it "ignores json keys not included in ?columns; parses only the ones specified" $ + request methodPost "/datarep_todos_computed?columns=id,label_color&select=id,name,label_color,due_at" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "invalid but should be ignored"} |] + `shouldRespondWith` + [json| [{"id":5, "name":null, "label_color": "#001100", "due_at": "2018-01-01T00:00:00+00"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + it "fails without parsing anything if at least one specified column doesn't exist" $ + request methodPost "/datarep_todos_computed?columns=id,label_color,helicopters&select=id,name,label_color,due_at" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos_computed' does not exist","details":null,"hint":null} |] + { matchStatus = 400 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8"] + } diff --git a/test/spec/Feature/Query/QuerySpec.hs b/test/spec/Feature/Query/QuerySpec.hs index b995dfe7ce1..690550c9374 100644 --- a/test/spec/Feature/Query/QuerySpec.hs +++ b/test/spec/Feature/Query/QuerySpec.hs @@ -1276,3 +1276,125 @@ spec actualPgVersion = do {"id":4,"name":"OSX","client_id":2}, {"id":5,"name":"Orphan","client_id":null}]|] { matchHeaders = [matchContentTypeJson] } + + describe "Data representations for customisable value formatting and parsing" $ do + it "formats a single column" $ + get "/datarep_todos?select=id,label_color&id=lt.4" `shouldRespondWith` + [json| [{"id":1,"label_color":"#000000"},{"id":2,"label_color":"#000100"},{"id":3,"label_color":"#01E240"}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats two columns with different formatters" $ + get "/datarep_todos?select=id,label_color,due_at&id=lt.4" `shouldRespondWith` + [json| [{"id":1,"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"},{"id":2,"label_color":"#000100","due_at":"2018-01-03T00:00:00+00"},{"id":3,"label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}] |] + { matchHeaders = [matchContentTypeJson] } + it "fails in some reasonable way when selecting fields that don't exist" $ + get "/datarep_todos?select=id,label_color,banana" `shouldRespondWith` + [json| {"code":"42703","details":null,"hint":null,"message":"column datarep_todos.banana does not exist"} |] + { matchStatus = 400 + , matchHeaders = [matchContentTypeJson] + } + it "formats columns in views including computed columns" $ + get "/datarep_todos_computed?select=id,label_color,dark_color" `shouldRespondWith` + [json| [ + {"id":1, "label_color":"#000000", "dark_color":"#000000"}, + {"id":2, "label_color":"#000100", "dark_color":"#000080"}, + {"id":3, "label_color":"#01E240", "dark_color":"#00F120"}, + {"id":4, "label_color":"", "dark_color":""} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "formats and allows rename" $ + get "/datarep_todos?select=id,clr:label_color&id=lt.4" `shouldRespondWith` + [json| [{"id":1,"clr":"#000000"},{"id":2,"clr":"#000100"},{"id":3,"clr":"#01E240"}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats, renames and allows manual casting on top" $ + get "/datarep_todos?select=id,clr:label_color::text&id=lt.4" `shouldRespondWith` + [json| [{"id":1,"clr":"\"#000000\""},{"id":2,"clr":"\"#000100\""},{"id":3,"clr":"\"#01E240\""}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats nulls" $ + -- due_at is formatted as NULL but label_color NULLs become empty strings-- it's up to the formatting function. + get "/datarep_todos?select=id,label_color,due_at&id=gt.2&id=lt.5" `shouldRespondWith` + [json| [{"id":3,"label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"},{"id":4,"label_color":"","due_at":null}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats star select" $ + get "/datarep_todos?select=*&id=lt.4" `shouldRespondWith` + [json| [ + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "formats implicit star select" $ + get "/datarep_todos?id=lt.4" `shouldRespondWith` + [json| [ + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "formats star and explicit mix" $ + get "/datarep_todos?select=due_at,*&id=lt.4" `shouldRespondWith` + [json| [ + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "formats through join" $ + get "/datarep_next_two_todos?select=id,name,first_item:datarep_todos!datarep_next_two_todos_first_item_id_fkey(label_color,due_at)" `shouldRespondWith` + [json| [{"id":1,"name":"school related","first_item":{"label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}},{"id":2,"name":"do these first","first_item":{"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats through join with star select" $ + get "/datarep_next_two_todos?select=id,name,second_item:datarep_todos!datarep_next_two_todos_second_item_id_fkey(*)" `shouldRespondWith` + [json| [ + {"id":1,"name":"school related","second_item": + {"id": 3, "name": "Algebra", "label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}}, + {"id":2,"name":"do these first","second_item": + {"id": 3, "name": "Algebra", "label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "uses text parser on value for filter given through query parameters" $ + get "/datarep_todos?select=id,due_at&label_color=eq.000100" `shouldRespondWith` + [json| [{"id":2,"due_at":"2018-01-03T00:00:00+00"}] |] + { matchHeaders = [matchContentTypeJson] } + it "in the absense of text parser, does not try to use the JSON parser for query parameters" $ + get "/datarep_todos?select=id,due_at&due_at=eq.T" `shouldRespondWith` + -- okay this test is a bit of a hack but we prove the parser is not used because it'd replace the T and fail a + -- different way. + [json| {"code":"22007","details":null,"hint":null,"message":"invalid input syntax for type timestamp with time zone: \"T\""} |] + { matchStatus = 400 + , matchHeaders = [matchContentTypeJson] + } + -- Before PG 11, this will fail because we need arrays of domain type values. The docs should explain data reps are + -- not supported in this case. + when (actualPgVersion >= pgVersion110) $ do + it "uses text parser for filter with 'IN' predicates" $ + get "/datarep_todos?select=id,due_at&label_color=in.(000100,01E240)" `shouldRespondWith` + [json| [ + {"id":2, "due_at": "2018-01-03T00:00:00+00"}, + {"id":3, "due_at": "2018-01-01T14:12:34.123456+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "uses text parser for filter with 'NOT IN' predicates" $ + get "/datarep_todos?select=id,due_at&label_color=not.in.(000000,01E240)" `shouldRespondWith` + [json| [ + {"id":2, "due_at": "2018-01-03T00:00:00+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "uses text parser on value for filter across relations" $ + get "/datarep_next_two_todos?select=id,name,datarep_todos!datarep_next_two_todos_first_item_id_fkey(label_color,due_at)&datarep_todos.label_color=neq.000100" `shouldRespondWith` + [json| [{"id":1,"name":"school related","datarep_todos":null},{"id":2,"name":"do these first","datarep_todos":{"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}}] |] + { matchHeaders = [matchContentTypeJson] } + -- This is not supported by data reps (would be hard to make it work with high performance). So the test just + -- verifies we don't panic or add inappropriate SQL to the filters. + it "fails safely on user trying to use ilike operator on data reps column" $ + get "/datarep_todos?select=id,name&label_color=ilike.#*100" `shouldRespondWith` ( + if actualPgVersion >= pgVersion110 then + [json| + {"code":"42883","details":null,"hint":"No operator matches the given name and argument types. You might need to add explicit type casts.","message":"operator does not exist: public.color ~~* unknown"} + |] + else + [json| + {"code":"42883","details":null,"hint":"No operator matches the given name and argument type(s). You might need to add explicit type casts.","message":"operator does not exist: public.color ~~* unknown"} + |]) + { matchStatus = 404 + , matchHeaders = [matchContentTypeJson] + } diff --git a/test/spec/Feature/Query/UpdateSpec.hs b/test/spec/Feature/Query/UpdateSpec.hs index ff0be3dcf5d..16840b1d048 100644 --- a/test/spec/Feature/Query/UpdateSpec.hs +++ b/test/spec/Feature/Query/UpdateSpec.hs @@ -9,6 +9,9 @@ import Network.HTTP.Types import Test.Hspec.Wai import Test.Hspec.Wai.JSON +import PostgREST.Config.PgVersion (PgVersion, pgVersion100) + + import Protolude hiding (get) import SpecHelper @@ -18,8 +21,8 @@ tblDataBefore = [aesonQQ|[ , { "id": 3, "name": "item-3" } ]|] -spec :: SpecWith ((), Application) -spec = do +spec :: PgVersion -> SpecWith ((), Application) +spec actualPgVersion = do describe "Patching record" $ do context "to unknown uri" $ it "indicates no table found by returning 404" $ @@ -543,3 +546,187 @@ spec = do , { "id": 2, "name": "item-2" } , { "id": 3, "name": "item-3" } ]|] + + -- Data representations for payload parsing requires Postgrest 10 or above. + when (actualPgVersion >= pgVersion100) $ do + describe "Data representations" $ do + context "for a single row" $ do + it "parses values in payload" $ + request methodPatch "/datarep_todos?id=eq.2" [("Prefer", "return=headers-only")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + "" + { matchStatus = 204 + , matchHeaders = [ matchHeaderAbsent hContentType + , "Content-Range" <:> "0-0/*" ] + } + + it "parses values in payload and formats individually selected values in return=representation" $ + request methodPatch "/datarep_todos?id=eq.2&select=id,label_color" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":2, "label_color": "#221100"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "parses values in payload and formats values in return=representation" $ + request methodPatch "/datarep_todos?id=eq.2" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20+00"} |] + `shouldRespondWith` + [json| [{"id":2, "name": "Essay", "label_color": "#221100", "due_at":"2019-01-03T11:00:20+00"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "parses values in payload and formats star mixed selected values in return=representation" $ + request methodPatch "/datarep_todos?id=eq.2&select=due_at,*" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + -- end up with due_at twice here but that's unrelated to data reps + [json| [{"due_at":"2019-01-03T11:00:00+00", "id":2, "name":"Essay", "label_color":"#221100", "due_at":"2019-01-03T11:00:00+00"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + context "for multiple rows" $ do + it "parses values in payload and formats individually selected values in return=representation" $ + request methodPatch "/datarep_todos?id=lt.4&select=id,name,label_color" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [ + {"id":1, "name": "Report", "label_color": "#221100"}, + {"id":2, "name": "Essay", "label_color": "#221100"}, + {"id":3, "name": "Algebra", "label_color": "#221100"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-2/*"] + } + + it "parses values in payload and formats values in return=representation" $ + request methodPatch "/datarep_todos?id=lt.4" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [ + {"id":1, "name": "Report", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"}, + {"id":2, "name": "Essay", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"}, + {"id":3, "name": "Algebra", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-2/*"] + } + context "with ?columns parameter" $ do + it "ignores json keys not included in ?columns; parses only the ones specified" $ + request methodPatch "/datarep_todos?id=eq.2&columns=due_at" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| [ + {"id":2, "name": "Essay", "label_color": "#000100", "due_at":"2019-01-03T11:00:00+00"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "fails if at least one specified column doesn't exist" $ + request methodPatch "/datarep_todos?id=eq.2&columns=label_color,helicopters" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos' does not exist","details":null,"hint":null} |] + { matchStatus = 400 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8"] + } + + it "ignores json keys and gives 200 if no record updated" $ + request methodPatch "/datarep_todos?id=eq.2001&columns=label_color" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` 200 + context "on a view" $ do + context "for a single row" $ do + it "parses values in payload" $ + request methodPatch "/datarep_todos_computed?id=eq.2" [("Prefer", "return=headers-only")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + "" + { matchStatus = 204 + , matchHeaders = [ matchHeaderAbsent hContentType + , "Content-Range" <:> "0-0/*" ] + } + + it "parses values in payload and formats individually selected values in return=representation" $ + request methodPatch "/datarep_todos_computed?id=eq.2&select=id,label_color" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":2, "label_color": "#221100"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "parses values in payload and formats values in return=representation" $ + request methodPatch "/datarep_todos_computed?id=eq.2" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20+00"} |] + `shouldRespondWith` + [json| [{"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:20+00"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + context "for multiple rows" $ do + it "parses values in payload and formats individually selected values in return=representation" $ + request methodPatch "/datarep_todos_computed?id=lt.4&select=id,name,label_color,dark_color" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [ + {"id":1, "name": "Report", "label_color": "#221100", "dark_color":"#110880"}, + {"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880"}, + {"id":3, "name": "Algebra", "label_color": "#221100", "dark_color":"#110880"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-2/*"] + } + + it "parses values in payload and formats values in return=representation" $ + request methodPatch "/datarep_todos_computed?id=lt.4" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [ + {"id":1, "name": "Report", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"}, + {"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"}, + {"id":3, "name": "Algebra", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-2/*"] + } + context "with ?columns parameter" $ do + it "ignores json keys not included in ?columns; parses only the ones specified" $ + request methodPatch "/datarep_todos_computed?id=eq.2&columns=due_at" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| [ + {"id":2, "name": "Essay", "label_color": "#000100", "dark_color": "#000080", "due_at":"2019-01-03T11:00:00+00"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "fails if at least one specified column doesn't exist" $ + request methodPatch "/datarep_todos_computed?id=eq.2&columns=label_color,helicopters" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos_computed' does not exist","details":null,"hint":null} |] + { matchStatus = 400 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8"] + } + + it "ignores json keys and gives 200 if no record updated" $ + request methodPatch "/datarep_todos_computed?id=eq.2001&columns=label_color" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` 200 diff --git a/test/spec/Main.hs b/test/spec/Main.hs index aa443b821a5..cd53d25b8c9 100644 --- a/test/spec/Main.hs +++ b/test/spec/Main.hs @@ -148,7 +148,7 @@ main = do , ("Feature.Query.RawOutputTypesSpec" , Feature.Query.RawOutputTypesSpec.spec) , ("Feature.Query.RpcSpec" , Feature.Query.RpcSpec.spec actualPgVersion) , ("Feature.Query.SingularSpec" , Feature.Query.SingularSpec.spec) - , ("Feature.Query.UpdateSpec" , Feature.Query.UpdateSpec.spec) + , ("Feature.Query.UpdateSpec" , Feature.Query.UpdateSpec.spec actualPgVersion) , ("Feature.Query.UpsertSpec" , Feature.Query.UpsertSpec.spec actualPgVersion) , ("Feature.Query.ComputedRelsSpec" , Feature.Query.ComputedRelsSpec.spec) , ("Feature.Query.RelatedQueriesSpec" , Feature.Query.RelatedQueriesSpec.spec) diff --git a/test/spec/fixtures/data.sql b/test/spec/fixtures/data.sql index dd32ae9dc4e..501beac467d 100644 --- a/test/spec/fixtures/data.sql +++ b/test/spec/fixtures/data.sql @@ -838,3 +838,13 @@ INSERT INTO posters(id,name) VALUES (1,'Mark'), (2,'Elon'), (3,'Bill'), (4,'Jeff TRUNCATE TABLE subscriptions CASCADE; INSERT INTO subscriptions(subscriber,subscribed) VALUES (3,1), (4,1), (1,2); + +TRUNCATE TABLE datarep_todos CASCADE; +INSERT INTO datarep_todos VALUES (1, 'Report', 0, '2018-01-02'); +INSERT INTO datarep_todos VALUES (2, 'Essay', 256, '2018-01-03'); +INSERT INTO datarep_todos VALUES (3, 'Algebra', 123456, '2018-01-01 14:12:34.123456'); +INSERT INTO datarep_todos VALUES (4, 'Opus Magnum', NULL, NULL); + +TRUNCATE TABLE datarep_next_two_todos CASCADE; +INSERT INTO datarep_next_two_todos VALUES (1, 2, 3, 'school related'); +INSERT INTO datarep_next_two_todos VALUES (2, 1, 3, 'do these first'); diff --git a/test/spec/fixtures/schema.sql b/test/spec/fixtures/schema.sql index 7935ef7c667..4bc6e3eb201 100644 --- a/test/spec/fixtures/schema.sql +++ b/test/spec/fixtures/schema.sql @@ -2786,9 +2786,20 @@ BEGIN LOAD 'safeupdate'; END; $$ LANGUAGE plpgsql SECURITY DEFINER; +-- This tests data representations over computed joins: even a lower case title should come back title cased. +DROP DOMAIN IF EXISTS public.titlecasetext CASCADE; +CREATE DOMAIN public.titlecasetext AS text; + +CREATE OR REPLACE FUNCTION json(public.titlecasetext) RETURNS json AS $$ + SELECT to_json(INITCAP($1::text)); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE CAST (public.titlecasetext AS json) WITH FUNCTION json(public.titlecasetext) AS IMPLICIT; +-- End of data representations specific stuff except for where the domain is used in the table. + CREATE TABLE designers ( id int primary key -, name text +, name public.titlecasetext ); CREATE TABLE videogames ( @@ -3103,6 +3114,72 @@ create table test.subscriptions( primary key(subscriber, subscribed) ); +-- For formatting output and parsing input of types with custom API representations. +DROP DOMAIN IF EXISTS public.color CASCADE; +CREATE DOMAIN public.color AS INTEGER CHECK (VALUE >= 0 AND VALUE <= 16777215); + +CREATE OR REPLACE FUNCTION color(json) RETURNS public.color AS $$ + SELECT color($1 #>> '{}'); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION color(text) RETURNS public.color AS $$ + SELECT (('x' || lpad((CASE WHEN SUBSTRING($1::text, 1, 1) = '#' THEN SUBSTRING($1::text, 2) ELSE $1::text END), 8, '0'))::bit(32)::int)::public.color; +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION json(public.color) RETURNS json AS $$ + SELECT + CASE WHEN $1 IS NULL THEN to_json(''::text) + ELSE to_json('#' || lpad(upper(to_hex($1)), 6, '0')) + END; +$$ LANGUAGE SQL IMMUTABLE; + +CREATE CAST (public.color AS json) WITH FUNCTION json(public.color) AS IMPLICIT; +CREATE CAST (json AS public.color) WITH FUNCTION color(json) AS IMPLICIT; +CREATE CAST (text AS public.color) WITH FUNCTION color(text) AS IMPLICIT; + +DROP DOMAIN IF EXISTS public.isodate CASCADE; +CREATE DOMAIN public.isodate AS timestamp with time zone; + +CREATE OR REPLACE FUNCTION isodate(json) RETURNS public.isodate AS $$ + SELECT isodate($1 #>> '{}'); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION isodate(text) RETURNS public.isodate AS $$ + SELECT (replace($1, 'T', ' ')::timestamp with time zone)::public.isodate; +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION json(public.isodate) RETURNS json AS $$ + SELECT to_json(replace($1::text, ' ', 'T')); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE CAST (public.isodate AS json) WITH FUNCTION json(public.isodate) AS IMPLICIT; +CREATE CAST (json AS public.isodate) WITH FUNCTION isodate(json) AS IMPLICIT; +-- We intentionally don't have this in order to test query string parsing doesn't try to fall back on JSON parsing. +-- CREATE CAST (text AS public.isodate) WITH FUNCTION isodate(text) AS IMPLICIT; + +CREATE TABLE datarep_todos ( + id bigint primary key, + name text, + label_color public.color default 0, + due_at public.isodate default '2018-01-01'::date +); + +CREATE TABLE datarep_next_two_todos ( + id bigint primary key, + first_item_id bigint references datarep_todos(id), + second_item_id bigint references datarep_todos(id), + name text +); + +CREATE VIEW datarep_todos_computed as ( + SELECT id, + name, + label_color, + due_at, + (label_color / 2)::public.color as dark_color + FROM datarep_todos +); + -- view's name is alphabetically before projects create view test.alpha_projects as select c.id, p.name as pro_name, c.name as cli_name