From fb29d90f5d49bf5ce2e678f3922206c595bf6696 Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Fri, 20 Jan 2023 16:51:08 +0000 Subject: [PATCH 01/11] feat: data representations allow custom parsing and formatting of API fields. See PR #2523. Most notable code changes: - Load data representation casts into schema cache. - Data representations for reads, filters, inserts, updates, views, over joins. - `CoercibleField` represents name references in queries where coercion may be needed. - `ResolverContext` help facilitate field resolution during planning. - Planner 'resolves' names in the API query and pairs them with any implicit conversions to be used in the query builder stage. - Tests for all of the above. --- README.md | 1 + postgrest.cabal | 1 + src/PostgREST/Plan.hs | 253 ++++++++++++++----- src/PostgREST/Plan/MutatePlan.hs | 15 +- src/PostgREST/Plan/ReadPlan.hs | 12 +- src/PostgREST/Plan/Types.hs | 57 +++-- src/PostgREST/Query/QueryBuilder.hs | 2 +- src/PostgREST/Query/SqlFragment.hs | 75 ++++-- src/PostgREST/SchemaCache.hs | 88 +++++-- src/PostgREST/SchemaCache/Representations.hs | 29 +++ test/spec/Feature/Query/ComputedRelsSpec.hs | 36 +++ test/spec/Feature/Query/InsertSpec.hs | 116 ++++++++- test/spec/Feature/Query/QuerySpec.hs | 122 +++++++++ test/spec/Feature/Query/UpdateSpec.hs | 191 +++++++++++++- test/spec/Main.hs | 2 +- test/spec/fixtures/data.sql | 10 + test/spec/fixtures/schema.sql | 79 +++++- 17 files changed, 941 insertions(+), 148 deletions(-) create mode 100644 src/PostgREST/SchemaCache/Representations.hs diff --git a/README.md b/README.md index ae9df78100..e669744648 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,4 @@ + ![Logo](static/bigger-logo.png "Logo") [![Donate](https://img.shields.io/badge/Donate-Patreon-orange.svg?colorB=F96854)](https://www.patreon.com/postgrest) diff --git a/postgrest.cabal b/postgrest.cabal index ad142cee6d..39a1afad8e 100644 --- a/postgrest.cabal +++ b/postgrest.cabal @@ -48,6 +48,7 @@ library PostgREST.SchemaCache.Identifiers PostgREST.SchemaCache.Proc PostgREST.SchemaCache.Relationship + PostgREST.SchemaCache.Representations PostgREST.SchemaCache.Table PostgREST.Error PostgREST.Logger diff --git a/src/PostgREST/Plan.hs b/src/PostgREST/Plan.hs index 8f800df06c..ea4a590fc0 100644 --- a/src/PostgREST/Plan.hs +++ b/src/PostgREST/Plan.hs @@ -26,6 +26,7 @@ module PostgREST.Plan ) where import qualified Data.HashMap.Strict as HM +import qualified Data.HashMap.Strict.InsOrd as HMI import qualified Data.Set as S import qualified PostgREST.SchemaCache.Proc as Proc @@ -33,31 +34,35 @@ import Data.Either.Combinators (mapLeft, mapRight) import Data.List (delete) import Data.Tree (Tree (..)) -import PostgREST.ApiRequest (Action (..), - ApiRequest (..), +import PostgREST.ApiRequest (Action (..), + ApiRequest (..), InvokeMethod (..), - Mutation (..), - Payload (..)) -import PostgREST.Config (AppConfig (..)) -import PostgREST.Error (Error (..)) -import PostgREST.Query.SqlFragment (sourceCTEName) -import PostgREST.RangeQuery (NonnegRange, allRange, - convertToLimitZeroRange, - restrictRange) -import PostgREST.SchemaCache (SchemaCache (..)) -import PostgREST.SchemaCache.Identifiers (FieldName, - QualifiedIdentifier (..), - Schema) -import PostgREST.SchemaCache.Proc (ProcDescription (..), - ProcParam (..), - procReturnsScalar) -import PostgREST.SchemaCache.Relationship (Cardinality (..), - Junction (..), - Relationship (..), - RelationshipsMap, - relIsToOne) -import PostgREST.SchemaCache.Table (Table (tableName), - tablePKCols) + Mutation (..), + Payload (..)) +import PostgREST.Config (AppConfig (..)) +import PostgREST.Error (Error (..)) +import PostgREST.Query.SqlFragment (sourceCTEName) +import PostgREST.RangeQuery (NonnegRange, allRange, + convertToLimitZeroRange, + restrictRange) +import PostgREST.SchemaCache (SchemaCache (..)) +import PostgREST.SchemaCache.Identifiers (FieldName, + QualifiedIdentifier (..), + Schema) +import PostgREST.SchemaCache.Proc (ProcDescription (..), + ProcParam (..), + procReturnsScalar) +import PostgREST.SchemaCache.Relationship (Cardinality (..), + Junction (..), + Relationship (..), + RelationshipsMap, + relIsToOne) +import PostgREST.SchemaCache.Representations (DataRepresentation (..), + RepresentationsMap) +import PostgREST.SchemaCache.Table (Column (..), Table (..), + TablesMap, + tableColumnsList, + tablePKCols) import PostgREST.ApiRequest.Preferences import PostgREST.ApiRequest.Types @@ -108,26 +113,93 @@ readPlanTxMode = SQL.Read inspectPlanTxMode :: SQL.Mode inspectPlanTxMode = SQL.Read +-- | During planning we need to resolve Field -> CoercibleField (finding the context specific target type and map function). +-- | ResolverContext facilitates this without the need to pass around a laundry list of parameters. +data ResolverContext = ResolverContext + { tables :: TablesMap + , representations :: RepresentationsMap + , qi :: QualifiedIdentifier -- ^ The table we're currently attending; changes as we recurse into joins etc. + , outputType :: Text -- ^ The output type for the response payload; e.g. "csv", "json", "binary". + } + +resolveColumnField :: Column -> CoercibleField +resolveColumnField col = CoercibleField (colName col) [] (colNominalType col) Nothing + +resolveTableFieldName :: Table -> FieldName -> CoercibleField +resolveTableFieldName table fieldName = + fromMaybe (unknownField fieldName []) $ HMI.lookup fieldName (tableColumns table) >>= + Just . resolveColumnField + +resolveTableField :: Table -> Field -> CoercibleField +resolveTableField table (fieldName, []) = resolveTableFieldName table fieldName +-- If the field is known and a JSON path is given, always assume the JSON type. But don't assume a type for entirely unknown fields. +resolveTableField table (fieldName, jp) = + case resolveTableFieldName table fieldName of + tf@CoercibleField{tfIRType=""} -> tf{tfJsonPath=jp} + tf -> tf{tfJsonPath=jp, tfIRType="json"} + +-- | Resolve a type within the context based on the given field name and JSON path. Although there are situations where failure to resolve a field is considered an error (see `resolveOrError`), there are also situations where we allow it (RPC calls). If it should be an error and `resolveOrError` doesn't fit, ensure to check the `tfIRType` isn't empty. +resolveTypeOrUnknown :: ResolverContext -> Field -> CoercibleField +resolveTypeOrUnknown ResolverContext{..} field@(fn, jp) = + fromMaybe (unknownField fn jp) $ HM.lookup qi tables >>= + Just . flip resolveTableField field + +-- | Install any pre-defined data representation from source to target to coerce this reference. +-- +-- Note that we change the IR type here. This might seem unintuitive. The short of it is that for a CoercibleField without a transformer, input type == output type. A transformer maps from a -> b, so by definition the input type will be a and the output type b after. And tfIRType is the *input* type. +-- +-- It might feel odd that once a transformer is added we 'forget' the target type (because now a /= b). You might also note there's no obvious way to stack transforms (even if there was a stack, you erased what type you're working with so it's awkward). Alas as satisfying as it would be to engineer a layered mapping system with full type information, we just don't need it. +withTransformer :: ResolverContext -> Text -> Text -> CoercibleField -> CoercibleField +withTransformer ResolverContext{representations} sourceType targetType field = + fromMaybe field $ HM.lookup (sourceType, targetType) representations >>= + (\fieldRepresentation -> Just field{tfIRType=sourceType, tfTransform=Just (drFunction fieldRepresentation)}) + +-- | Map the intermediate representation type to the output type, if available. +withOutputFormat :: ResolverContext -> CoercibleField -> CoercibleField +withOutputFormat ctx@ResolverContext{outputType} field@CoercibleField{tfIRType} = withTransformer ctx tfIRType outputType field + +-- | Map text into the intermediate representation type, if available. +withTextParse :: ResolverContext -> CoercibleField -> CoercibleField +withTextParse ctx field@CoercibleField{tfIRType} = withTransformer ctx "text" tfIRType field + +-- | Map json into the intermediate representation type, if available. +withJsonParse :: ResolverContext -> CoercibleField -> CoercibleField +withJsonParse ctx field@CoercibleField{tfIRType} = withTransformer ctx "json" tfIRType field + +-- | Map the intermediate representation type to the output type defined by the resolver context (normally json), if available. +resolveOutputField :: ResolverContext -> Field -> CoercibleField +resolveOutputField ctx field = withOutputFormat ctx $ resolveTypeOrUnknown ctx field + +-- | Map the query string format of a value (text) into the intermediate representation type, if available. +resolveQueryInputField :: ResolverContext -> Field -> CoercibleField +resolveQueryInputField ctx field = withTextParse ctx $ resolveTypeOrUnknown ctx field + -- | Builds the ReadPlan tree on a number of stages. -- | Adds filters, order, limits on its respective nodes. -- | Adds joins conditions obtained from resource embedding. readPlan :: QualifiedIdentifier -> AppConfig -> SchemaCache -> ApiRequest -> Either Error ReadPlanTree -readPlan qi@QualifiedIdentifier{..} AppConfig{configDbMaxRows} SchemaCache{dbRelationships} apiRequest = - mapLeft ApiRequestError $ - treeRestrictRange configDbMaxRows (iAction apiRequest) =<< - addNullEmbedFilters =<< - validateSpreadEmbeds =<< - addRelatedOrders =<< - addRels qiSchema (iAction apiRequest) dbRelationships Nothing =<< - addLogicTrees apiRequest =<< - addRanges apiRequest =<< - addOrders apiRequest =<< - addFilters apiRequest (initReadRequest qi $ QueryParams.qsSelect $ iQueryParams apiRequest) +readPlan qi@QualifiedIdentifier{..} AppConfig{configDbMaxRows} SchemaCache{dbTables, dbRelationships, dbRepresentations} apiRequest = + let + -- JSON output format hardcoded for now. In the future we might want to support other output mappings such as CSV. + ctx = ResolverContext dbTables dbRepresentations qi "json" + in + mapLeft ApiRequestError $ + treeRestrictRange configDbMaxRows (iAction apiRequest) =<< + addNullEmbedFilters =<< + validateSpreadEmbeds =<< + addRelatedOrders =<< + addDataRepresentationAliases =<< + expandStarsForDataRepresentations ctx =<< + addRels qiSchema (iAction apiRequest) dbRelationships Nothing =<< + addLogicTrees ctx apiRequest =<< + addRanges apiRequest =<< + addOrders apiRequest =<< + addFilters ctx apiRequest (initReadRequest ctx $ QueryParams.qsSelect $ iQueryParams apiRequest) -- Build the initial read plan tree -initReadRequest :: QualifiedIdentifier -> [Tree SelectItem] -> ReadPlanTree -initReadRequest qi@QualifiedIdentifier{..} = - foldr (treeEntry rootDepth) $ Node defReadPlan{from=qi, relName=qiName, depth=rootDepth} [] +initReadRequest :: ResolverContext -> [Tree SelectItem] -> ReadPlanTree +initReadRequest ctx@ResolverContext{qi=QualifiedIdentifier{..}} = + foldr (treeEntry rootDepth) $ Node defReadPlan{from=qi ctx, relName=qiName, depth=rootDepth} [] where rootDepth = 0 defReadPlan = ReadPlan [] (QualifiedIdentifier mempty mempty) Nothing [] [] allRange mempty Nothing [] Nothing mempty Nothing Nothing False rootDepth @@ -146,7 +218,49 @@ initReadRequest qi@QualifiedIdentifier{..} = (Node defReadPlan{from=QualifiedIdentifier qiSchema selRelation, relName=selRelation, relHint=selHint, relJoinType=selJoinType, depth=nxtDepth, relIsSpread=True} []) fldForest:rForest SelectField{..} -> - Node q{select=(selField, selCast, selAlias):select q} rForest + Node q{select=(resolveOutputField ctx{qi=from q} selField, selCast, selAlias):select q} rForest + +-- | Preserve the original field name if data representation is used to coerce the value. +addDataRepresentationAliases :: ReadPlanTree -> Either ApiRequestError ReadPlanTree +addDataRepresentationAliases rPlanTree = Right $ fmap (\rPlan@ReadPlan{select=sel} -> rPlan{select=map aliasSelectItem sel}) rPlanTree + where + aliasSelectItem :: (CoercibleField, Maybe Cast, Maybe Alias) -> (CoercibleField, Maybe Cast, Maybe Alias) + -- If there already is an alias, don't overwrite it. + aliasSelectItem (fld@(CoercibleField{tfName=fieldName, tfTransform=(Just _)}), Nothing, Nothing) = (fld, Nothing, Just fieldName) + aliasSelectItem fld = fld + +knownColumnsInContext :: ResolverContext -> [Column] +knownColumnsInContext ResolverContext{..} = + fromMaybe [] $ HM.lookup qi tables >>= + Just . tableColumnsList + +-- | Expand "select *" into explicit field names of the table, if necessary to apply data representations. +expandStarsForDataRepresentations :: ResolverContext -> ReadPlanTree -> Either ApiRequestError ReadPlanTree +expandStarsForDataRepresentations ctx@ResolverContext{qi} rPlanTree = Right $ fmap expandStars rPlanTree + where + expandStars :: ReadPlan -> ReadPlan + -- When the schema is "" and the table is the source CTE, we assume the true source table is given in the from + -- alias and belongs to the request schema. See the bit in `addRels` with `newFrom = ...`. + expandStars rPlan@ReadPlan{from=(QualifiedIdentifier "" "pgrst_source"), fromAlias=(Just tblAlias)} = + expandStarsForTable ctx{qi=qi{qiName=tblAlias}} rPlan + expandStars rPlan@ReadPlan{from=fromTable} = + expandStarsForTable ctx{qi=fromTable} rPlan + +expandStarsForTable :: ResolverContext -> ReadPlan -> ReadPlan +expandStarsForTable ctx@ResolverContext{representations, outputType} rplan@ReadPlan{select=selectItems} = + -- If we have a '*' select AND the target table has at least one data representation, expand. + if ("*" `elem` map (\(field, _, _) -> tfName field) selectItems) && any hasOutputRep knownColumns + then rplan{select=concatMap (expandStarSelectItem knownColumns) selectItems} + else rplan + where + knownColumns = knownColumnsInContext ctx + + hasOutputRep :: Column -> Bool + hasOutputRep col = HM.member (colNominalType col, outputType) representations + + expandStarSelectItem :: [Column] -> (CoercibleField, Maybe Cast, Maybe Alias) -> [(CoercibleField, Maybe Cast, Maybe Alias)] + expandStarSelectItem columns (CoercibleField{tfName="*", tfJsonPath=[]}, b, c) = map (\col -> (withOutputFormat ctx $ resolveColumnField col, b, c)) columns + expandStarSelectItem _ selectItem = [selectItem] -- | Enforces the `max-rows` config on the result treeRestrictRange :: Maybe Integer -> Action -> ReadPlanTree -> Either ApiRequestError ReadPlanTree @@ -301,8 +415,8 @@ findRel schema allRels origin target hint = ) ) $ fromMaybe mempty $ HM.lookup (QualifiedIdentifier schema origin, schema) allRels -addFilters :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree -addFilters ApiRequest{..} rReq = +addFilters :: ResolverContext -> ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree +addFilters ctx ApiRequest{..} rReq = foldr addFilterToNode (Right rReq) flts where QueryParams.QueryParams{..} = iQueryParams @@ -314,7 +428,7 @@ addFilters ApiRequest{..} rReq = addFilterToNode :: (EmbedPath, Filter) -> Either ApiRequestError ReadPlanTree -> Either ApiRequestError ReadPlanTree addFilterToNode = - updateNode (\flt (Node q@ReadPlan{where_=lf} f) -> Node q{ReadPlan.where_=addFilterToLogicForest flt lf} f) + updateNode (\flt (Node q@ReadPlan{from=fromTable, where_=lf} f) -> Node q{ReadPlan.where_=addFilterToLogicForest (resolveFilter ctx{qi=fromTable} flt) lf} f) addOrders :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree addOrders ApiRequest{..} rReq = @@ -358,15 +472,15 @@ addNullEmbedFilters (Node rp@ReadPlan{where_=oldLogic} forest) = do newLogic <- getFilters readPlans `traverse` oldLogic Node rp{ReadPlan.where_= newLogic} <$> (addNullEmbedFilters `traverse` forest) where - getFilters :: [ReadPlan] -> LogicTree -> Either ApiRequestError LogicTree - getFilters rPlans (Expr b lOp trees) = Expr b lOp <$> (getFilters rPlans `traverse` trees) - getFilters rPlans flt@(Stmnt (Filter (fld, []) opExpr)) = + getFilters :: [ReadPlan] -> TypedLogicTree -> Either ApiRequestError TypedLogicTree + getFilters rPlans (TypedExpr b lOp trees) = TypedExpr b lOp <$> (getFilters rPlans `traverse` trees) + getFilters rPlans flt@(TypedStmnt (TypedFilter (CoercibleField fld [] _ _) opExpr)) = let foundRP = find (\ReadPlan{relName, relAlias} -> fld == fromMaybe relName relAlias) rPlans in case (foundRP, opExpr) of - (Just ReadPlan{relAggAlias}, OpExpr b (Is TriNull)) -> Right $ Stmnt $ FilterNullEmbed b relAggAlias + (Just ReadPlan{relAggAlias}, OpExpr b (Is TriNull)) -> Right $ TypedStmnt $ TypedFilterNullEmbed b relAggAlias (Just ReadPlan{relName}, _) -> Left $ UnacceptableFilter relName _ -> Right flt - getFilters _ flt@(Stmnt _) = Right flt + getFilters _ flt@(TypedStmnt _) = Right flt addRanges :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree addRanges ApiRequest{..} rReq = @@ -380,14 +494,22 @@ addRanges ApiRequest{..} rReq = addRangeToNode :: (EmbedPath, NonnegRange) -> Either ApiRequestError ReadPlanTree -> Either ApiRequestError ReadPlanTree addRangeToNode = updateNode (\r (Node q f) -> Node q{range_=r} f) -addLogicTrees :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree -addLogicTrees ApiRequest{..} rReq = +addLogicTrees :: ResolverContext -> ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree +addLogicTrees ctx ApiRequest{..} rReq = foldr addLogicTreeToNode (Right rReq) qsLogic where QueryParams.QueryParams{..} = iQueryParams addLogicTreeToNode :: (EmbedPath, LogicTree) -> Either ApiRequestError ReadPlanTree -> Either ApiRequestError ReadPlanTree - addLogicTreeToNode = updateNode (\t (Node q@ReadPlan{where_=lf} f) -> Node q{ReadPlan.where_=t:lf} f) + addLogicTreeToNode = updateNode (\t (Node q@ReadPlan{from=fromTable, where_=lf} f) -> Node q{ReadPlan.where_=resolveLogicTree ctx{qi=fromTable} t:lf} f) + +resolveLogicTree :: ResolverContext -> LogicTree -> TypedLogicTree +resolveLogicTree ctx (Stmnt flt) = TypedStmnt $ resolveFilter ctx flt +resolveLogicTree ctx (Expr b op lts) = TypedExpr b op (map (resolveLogicTree ctx) lts) + +resolveFilter :: ResolverContext -> Filter -> TypedFilter +resolveFilter ctx (Filter fld opExpr) = TypedFilter{typedField=resolveQueryInputField ctx fld, typedOpExpr=opExpr} +resolveFilter _ (FilterNullEmbed isNot fieldName) = TypedFilterNullEmbed isNot fieldName -- Validates that spread embeds are only done on to-one relationships validateSpreadEmbeds :: ReadPlanTree -> Either ApiRequestError ReadPlanTree @@ -413,7 +535,7 @@ updateNode f (targetNodeName:remainingPath, a) (Right (Node rootNode forest)) = findNode = find (\(Node ReadPlan{relName, relAlias} _) -> relName == targetNodeName || relAlias == Just targetNodeName) forest mutatePlan :: Mutation -> QualifiedIdentifier -> ApiRequest -> SchemaCache -> ReadPlanTree -> Either Error MutatePlan -mutatePlan mutation qi ApiRequest{..} sCache readReq = mapLeft ApiRequestError $ +mutatePlan mutation qi ApiRequest{..} SchemaCache{dbTables, dbRepresentations} readReq = mapLeft ApiRequestError $ case mutation of MutationCreate -> mapRight (\typedColumns -> Insert qi typedColumns body ((,) <$> iPreferResolution <*> Just confCols) [] returnings pkCols) typedColumnsOrError @@ -431,26 +553,27 @@ mutatePlan mutation qi ApiRequest{..} sCache readReq = mapLeft ApiRequestError $ Left InvalidFilters MutationDelete -> Right $ Delete qi combinedLogic iTopLevelRange rootOrder returnings where + ctx = ResolverContext dbTables dbRepresentations qi "json" confCols = fromMaybe pkCols qsOnConflict QueryParams.QueryParams{..} = iQueryParams returnings = if iPreferRepresentation == None then [] else inferColsEmbedNeeds readReq pkCols - pkCols = maybe mempty tablePKCols $ HM.lookup qi $ dbTables sCache - logic = map snd qsLogic + tbl = HM.lookup qi dbTables + pkCols = maybe mempty tablePKCols tbl + logic = map (resolveLogicTree ctx . snd) qsLogic rootOrder = maybe [] snd $ find (\(x, _) -> null x) qsOrder - combinedLogic = foldr addFilterToLogicForest logic qsFiltersRoot + combinedLogic = foldr (addFilterToLogicForest . resolveFilter ctx) logic qsFiltersRoot body = payRaw <$> iPayload -- the body is assumed to be json at this stage(ApiRequest validates) - tbl = HM.lookup qi $ dbTables sCache - typedColumnsOrError = resolveOrError tbl `traverse` S.toList iColumns + typedColumnsOrError = resolveOrError ctx tbl `traverse` S.toList iColumns -resolveOrError :: Maybe Table -> FieldName -> Either ApiRequestError TypedField -resolveOrError Nothing _ = Left NotFound -resolveOrError (Just table) field = - case resolveTableField table field of - Nothing -> Left $ ColumnNotFound (tableName table) field - Just typedField -> Right typedField +resolveOrError :: ResolverContext -> Maybe Table -> FieldName -> Either ApiRequestError CoercibleField +resolveOrError _ Nothing _ = Left NotFound +resolveOrError ctx (Just table) field = + case resolveTableFieldName table field of + CoercibleField{tfIRType=""} -> Left $ ColumnNotFound (tableName table) field + cf -> Right $ withJsonParse ctx cf callPlan :: ProcDescription -> ApiRequest -> ReadPlanTree -> CallPlan callPlan proc apiReq readReq = FunctionCall { @@ -478,7 +601,7 @@ inferColsEmbedNeeds (Node ReadPlan{select} forest) pkCols | "*" `elem` fldNames = ["*"] | otherwise = returnings where - fldNames = (\((fld, _), _, _) -> fld) <$> select + fldNames = tfName . (\(f, _, _) -> f) <$> select -- Without fkCols, when a mutatePlan to -- /projects?select=name,clients(name) occurs, the RETURNING SQL part would -- be `RETURNING name`(see QueryBuilder). This would make the embedding @@ -517,5 +640,5 @@ inferColsEmbedNeeds (Node ReadPlan{select} forest) pkCols -- Traditional filters(e.g. id=eq.1) are added as root nodes of the LogicTree -- they are later concatenated with AND in the QueryBuilder -addFilterToLogicForest :: Filter -> [LogicTree] -> [LogicTree] -addFilterToLogicForest flt lf = Stmnt flt : lf +addFilterToLogicForest :: TypedFilter -> [TypedLogicTree] -> [TypedLogicTree] +addFilterToLogicForest flt lf = TypedStmnt flt : lf diff --git a/src/PostgREST/Plan/MutatePlan.hs b/src/PostgREST/Plan/MutatePlan.hs index 0c9eaa0eb1..9faf259bbe 100644 --- a/src/PostgREST/Plan/MutatePlan.hs +++ b/src/PostgREST/Plan/MutatePlan.hs @@ -6,8 +6,9 @@ where import qualified Data.ByteString.Lazy as LBS import PostgREST.ApiRequest.Preferences (PreferResolution) -import PostgREST.ApiRequest.Types (LogicTree, OrderTerm) -import PostgREST.Plan.Types (TypedField) +import PostgREST.ApiRequest.Types (OrderTerm) +import PostgREST.Plan.Types (CoercibleField, + TypedLogicTree) import PostgREST.RangeQuery (NonnegRange) import PostgREST.SchemaCache.Identifiers (FieldName, QualifiedIdentifier) @@ -18,25 +19,25 @@ import Protolude data MutatePlan = Insert { in_ :: QualifiedIdentifier - , insCols :: [TypedField] + , insCols :: [CoercibleField] , insBody :: Maybe LBS.ByteString , onConflict :: Maybe (PreferResolution, [FieldName]) - , where_ :: [LogicTree] + , where_ :: [TypedLogicTree] , returning :: [FieldName] , insPkCols :: [FieldName] } | Update { in_ :: QualifiedIdentifier - , updCols :: [TypedField] + , updCols :: [CoercibleField] , updBody :: Maybe LBS.ByteString - , where_ :: [LogicTree] + , where_ :: [TypedLogicTree] , mutRange :: NonnegRange , mutOrder :: [OrderTerm] , returning :: [FieldName] } | Delete { in_ :: QualifiedIdentifier - , where_ :: [LogicTree] + , where_ :: [TypedLogicTree] , mutRange :: NonnegRange , mutOrder :: [OrderTerm] , returning :: [FieldName] diff --git a/src/PostgREST/Plan/ReadPlan.hs b/src/PostgREST/Plan/ReadPlan.hs index 94e181ca2b..474a3578ed 100644 --- a/src/PostgREST/Plan/ReadPlan.hs +++ b/src/PostgREST/Plan/ReadPlan.hs @@ -6,9 +6,11 @@ module PostgREST.Plan.ReadPlan import Data.Tree (Tree (..)) -import PostgREST.ApiRequest.Types (Alias, Cast, Depth, Field, - Hint, JoinType, LogicTree, - NodeName, OrderTerm) +import PostgREST.ApiRequest.Types (Alias, Cast, Depth, Hint, + JoinType, NodeName, + OrderTerm) +import PostgREST.Plan.Types (CoercibleField (..), + TypedLogicTree) import PostgREST.RangeQuery (NonnegRange) import PostgREST.SchemaCache.Identifiers (FieldName, QualifiedIdentifier) @@ -26,10 +28,10 @@ data JoinCondition = deriving (Eq) data ReadPlan = ReadPlan - { select :: [(Field, Maybe Cast, Maybe Alias)] + { select :: [(CoercibleField, Maybe Cast, Maybe Alias)] , from :: QualifiedIdentifier , fromAlias :: Maybe Alias - , where_ :: [LogicTree] + , where_ :: [TypedLogicTree] , order :: [OrderTerm] , range_ :: NonnegRange , relName :: NodeName diff --git a/src/PostgREST/Plan/Types.hs b/src/PostgREST/Plan/Types.hs index 8e4a41f156..3b472bf956 100644 --- a/src/PostgREST/Plan/Types.hs +++ b/src/PostgREST/Plan/Types.hs @@ -1,24 +1,49 @@ module PostgREST.Plan.Types - ( TypedField(..) - , resolveTableField - + ( CoercibleField(..) + , unknownField + , TypedLogicTree(..) + , TypedFilter(..) + , TransformerProc ) where -import qualified Data.HashMap.Strict.InsOrd as HMI +import PostgREST.ApiRequest.Types (JsonPath, LogicOperator, OpExpr) import PostgREST.SchemaCache.Identifiers (FieldName) -import PostgREST.SchemaCache.Table (Column (..), Table (..)) import Protolude --- | A TypedField is a field with sufficient information to be read from JSON with `json_to_recordset`. -data TypedField = TypedField - { tfName :: FieldName - , tfIRType :: Text -- ^ The initial type of the field, before any casting. - } deriving (Eq) - -resolveTableField :: Table -> FieldName -> Maybe TypedField -resolveTableField table fieldName = - case HMI.lookup fieldName (tableColumns table) of - Just column -> Just $ TypedField (colName column) (colNominalType column) - Nothing -> Nothing +type TransformerProc = Text + +-- | A CoercibleField pairs the name of a query element with any type coercion information we need for some specific use case. +-- | +-- | As suggested by the name, it's often a reference to a field in a table but really it can be any nameable element (function parameter, calculation with an alias, etc) with a knowable type. +-- | +-- | In the simplest case, it allows us to parse JSON payloads with `json_to_recordset`, for which we need to know both the name and the type of each thing we'd like to extract. At a higher level, CoercibleField generalises to reflect that any value we work with in a query may need type specific handling. +-- | +-- | CoercibleField is the foundation for the Data Representations feature. This feature allow user-definable mappings between database types so that the same data can be presented or interpreted in various ways as needed. Sometimes the way Postgres coerces data implicitly isn't right for the job. Different mappings might be appropriate for different situations: parsing a filter from a query string requires one function (text -> field type) while parsing a payload from JSON takes another (json -> field type). And the reverse, outputting a field as JSON, requires yet a third (field type -> json). CoercibleField is that "job specific" reference to an element paired with the type we desire for that particular purpose and the function we'll use to get there, if any. +-- | +-- | In the planning phase, we "resolve" generic named elements into these specialised CoercibleFields. Again this is context specific: two different CoercibleFields both representing the exact same table column in the database, even in the same query, might have two different target types and mapping functions. For example, one might represent a column in a filter, and another the very same column in an output role to be sent in the response body. +-- | +-- | The type value is allowed to be the empty string. The analog here is soft type checking in programming languages: sometimes we don't need a variable to have a specified type and things will work anyhow. So the empty type variant is valid when we don't know and *don't need to know* about the specific type in some context. Note that this variation should not be used if it guarantees failure: in that case you should instead raise an error at the planning stage and bail out. For example, we can't parse JSON with `json_to_recordset` without knowing the types of each recipient field, and so error out. Using the empty string for the type would be incorrect and futile. On the other hand we use the empty type for RPC calls since type resolution isn't implemented for RPC, but it's fine because the query still works with Postgres' implicit coercion. In the future, hopefully we will support data representations across the board and then the empty type may be permanently retired. +data CoercibleField = CoercibleField + { tfName :: FieldName + , tfJsonPath :: JsonPath + , tfIRType :: Text -- ^ The native Postgres type of the field, the type before mapping. + , tfTransform :: Maybe TransformerProc -- ^ The optional mapping from irType -> targetType. + } deriving (Eq) + +unknownField :: FieldName -> JsonPath -> CoercibleField +unknownField name path = CoercibleField name path "" Nothing + +-- | Like a regular LogicTree but with field type information. +data TypedLogicTree + = TypedExpr Bool LogicOperator [TypedLogicTree] + | TypedStmnt TypedFilter + deriving (Eq) + +data TypedFilter = TypedFilter + { typedField :: CoercibleField + , typedOpExpr :: OpExpr + } + | TypedFilterNullEmbed Bool FieldName + deriving (Eq) diff --git a/src/PostgREST/Query/QueryBuilder.hs b/src/PostgREST/Query/QueryBuilder.hs index c23f2061d9..29d93d6cd4 100644 --- a/src/PostgREST/Query/QueryBuilder.hs +++ b/src/PostgREST/Query/QueryBuilder.hs @@ -53,7 +53,7 @@ readPlanToQuery (Node ReadPlan{select,from=mainQi,fromAlias,where_=logicForest,o where fromFrag = fromF relToParent mainQi fromAlias qi = getQualifiedIdentifier relToParent mainQi fromAlias - defSelect = [(("*", []), Nothing, Nothing)] -- gets all the columns in case of an empty select, ignoring/obtaining these columns is done at the aggregation stage + defSelect = [(unknownField "*" [], Nothing, Nothing)] -- gets all the columns in case of an empty select, ignoring/obtaining these columns is done at the aggregation stage (selects, joins) = foldr getSelectsJoins ([],[]) forest getSelectsJoins :: ReadPlanTree -> ([SQL.Snippet], [SQL.Snippet]) -> ([SQL.Snippet], [SQL.Snippet]) diff --git a/src/PostgREST/Query/SqlFragment.hs b/src/PostgREST/Query/SqlFragment.hs index 4548911a5a..b772e628de 100644 --- a/src/PostgREST/Query/SqlFragment.hs +++ b/src/PostgREST/Query/SqlFragment.hs @@ -58,15 +58,13 @@ import Control.Arrow ((***)) import Data.Foldable (foldr1) import Text.InterpolatedString.Perl6 (qc) -import PostgREST.ApiRequest.Types (Alias, Cast, Field, - Filter (..), +import PostgREST.ApiRequest.Types (Alias, Cast, FtsOperator (..), JsonOperand (..), JsonOperation (..), JsonPath, LogicOperator (..), - LogicTree (..), OpExpr (..), - Operation (..), + OpExpr (..), Operation (..), OrderDirection (..), OrderNulls (..), OrderTerm (..), @@ -75,7 +73,10 @@ import PostgREST.ApiRequest.Types (Alias, Cast, Field, import PostgREST.MediaType (MTPlanFormat (..), MTPlanOption (..)) import PostgREST.Plan.ReadPlan (JoinCondition (..)) -import PostgREST.Plan.Types (TypedField (..)) +import PostgREST.Plan.Types (CoercibleField (..), + TypedFilter (..), + TypedLogicTree (..), + unknownField) import PostgREST.RangeQuery (NonnegRange, allRange, rangeLimit, rangeOffset) import PostgREST.SchemaCache.Identifiers (FieldName, @@ -227,24 +228,37 @@ fromQi t = (if T.null s then mempty else pgFmtIdent s <> ".") <> pgFmtIdent n n = qiName t s = qiSchema t +pgFmtCallUnary :: Text -> SQL.Snippet -> SQL.Snippet +pgFmtCallUnary f x = SQL.sql (encodeUtf8 f) <> "(" <> x <> ")" + pgFmtColumn :: QualifiedIdentifier -> Text -> SqlFragment pgFmtColumn table "*" = fromQi table <> ".*" pgFmtColumn table c = fromQi table <> "." <> pgFmtIdent c -pgFmtField :: QualifiedIdentifier -> Field -> SQL.Snippet -pgFmtField table (c, []) = SQL.sql (pgFmtColumn table c) +pgFmtField :: QualifiedIdentifier -> CoercibleField -> SQL.Snippet +pgFmtField table CoercibleField{tfName=fn, tfJsonPath=[]} = SQL.sql (pgFmtColumn table fn) -- Using to_jsonb instead of to_json to avoid missing operator errors when filtering: -- "operator does not exist: json = unknown" -pgFmtField table (c, jp) = SQL.sql ("to_jsonb(" <> pgFmtColumn table c <> ")") <> pgFmtJsonPath jp +pgFmtField table CoercibleField{tfName=fn, tfJsonPath=jp} = SQL.sql ("to_jsonb(" <> pgFmtColumn table fn <> ")") <> pgFmtJsonPath jp + +-- Select the value of a named element from a table, applying its optional coercion mapping if any. +pgFmtTableCoerce :: QualifiedIdentifier -> CoercibleField -> SQL.Snippet +pgFmtTableCoerce table fld@(CoercibleField{tfTransform=(Just formatterProc)}) = pgFmtCallUnary formatterProc (pgFmtField table fld) +pgFmtTableCoerce table f = pgFmtField table f -pgFmtSelectItem :: QualifiedIdentifier -> (Field, Maybe Cast, Maybe Alias) -> SQL.Snippet -pgFmtSelectItem table (f@(fName, jp), Nothing, alias) = pgFmtField table f <> SQL.sql (pgFmtAs fName jp alias) +-- | Like the previous but now we just have a name so no namespace or JSON paths. +pgFmtCoerceNamed :: CoercibleField -> SQL.Snippet +pgFmtCoerceNamed CoercibleField{tfName=fn, tfTransform=(Just formatterProc)} = pgFmtCallUnary formatterProc (SQL.sql (pgFmtIdent fn)) <> " AS " <> SQL.sql (pgFmtIdent fn) +pgFmtCoerceNamed CoercibleField{tfName=fn} = SQL.sql (pgFmtIdent fn) + +pgFmtSelectItem :: QualifiedIdentifier -> (CoercibleField, Maybe Cast, Maybe Alias) -> SQL.Snippet +pgFmtSelectItem table (fld, Nothing, alias) = pgFmtTableCoerce table fld <> SQL.sql (pgFmtAs (tfName fld) (tfJsonPath fld) alias) -- Ideally we'd quote the cast with "pgFmtIdent cast". However, that would invalidate common casts such as "int", "bigint", etc. -- Try doing: `select 1::"bigint"` - it'll err, using "int8" will work though. There's some parser magic that pg does that's invalidated when quoting. -- Not quoting should be fine, we validate the input on Parsers. -pgFmtSelectItem table (f@(fName, jp), Just cast, alias) = "CAST (" <> pgFmtField table f <> " AS " <> SQL.sql (encodeUtf8 cast) <> " )" <> SQL.sql (pgFmtAs fName jp alias) +pgFmtSelectItem table (fld, Just cast, alias) = "CAST (" <> pgFmtTableCoerce table fld <> " AS " <> SQL.sql (encodeUtf8 cast) <> " )" <> SQL.sql (pgFmtAs (tfName fld) (tfJsonPath fld) alias) -pgFmtSelectFromJson :: [TypedField] -> SQL.Snippet +pgFmtSelectFromJson :: [CoercibleField] -> SQL.Snippet pgFmtSelectFromJson fields = SQL.sql "SELECT " <> parsedCols <> " " <> (if null fields @@ -255,7 +269,7 @@ pgFmtSelectFromJson fields = else SQL.sql ("FROM json_to_recordset (" <> selectBody <> ") AS _ " <> "(" <> typedCols <> ") ") ) where - parsedCols = SQL.sql $ BS.intercalate ", " $ pgFmtIdent . tfName <$> fields + parsedCols = intercalateSnippet ", " $ pgFmtCoerceNamed <$> fields typedCols = BS.intercalate ", " $ pgFmtIdent . tfName <> const " " <> encodeUtf8 . tfIRType <$> fields pgFmtOrderTerm :: QualifiedIdentifier -> OrderTerm -> SQL.Snippet @@ -266,8 +280,8 @@ pgFmtOrderTerm qi ot = maybe mempty nullOrder $ otNullOrder ot]) where fmtOTerm = \case - OrderTerm{otTerm} -> pgFmtField qi otTerm - OrderRelationTerm{otRelation, otRelTerm} -> pgFmtField (QualifiedIdentifier mempty otRelation) otRelTerm + OrderTerm{otTerm=(fn, jp)} -> pgFmtField qi (unknownField fn jp) + OrderRelationTerm{otRelation, otRelTerm=(fn, jp)} -> pgFmtField (QualifiedIdentifier mempty otRelation) (unknownField fn jp) direction OrderAsc = "ASC" direction OrderDesc = "DESC" @@ -275,15 +289,26 @@ pgFmtOrderTerm qi ot = nullOrder OrderNullsFirst = "NULLS FIRST" nullOrder OrderNullsLast = "NULLS LAST" - -pgFmtFilter :: QualifiedIdentifier -> Filter -> SQL.Snippet -pgFmtFilter _ (FilterNullEmbed hasNot fld) = SQL.sql (pgFmtIdent fld) <> " IS " <> (if hasNot then "NOT" else mempty) <> " NULL" -pgFmtFilter _ (Filter _ (NoOpExpr _)) = mempty -- TODO unreachable because NoOpExpr is filtered on QueryParams -pgFmtFilter table (Filter fld (OpExpr hasNot oper)) = notOp <> " " <> case oper of +-- | Interpret a literal in the way the planner indicated through the CoercibleField. +pgFmtUnknownLiteralForField :: SQL.Snippet -> CoercibleField -> SQL.Snippet +pgFmtUnknownLiteralForField value CoercibleField{tfTransform=(Just parserProc)} = pgFmtCallUnary parserProc value +-- But when no transform is requested, we just use the literal as-is. +pgFmtUnknownLiteralForField value _ = value + +-- | Array version of the above, used by ANY(). +pgFmtArrayLiteralForField :: [Text] -> CoercibleField -> SQL.Snippet +pgFmtArrayLiteralForField values CoercibleField{tfTransform=(Just parserProc)} = SQL.sql "ARRAY[" <> intercalateSnippet ", " (pgFmtCallUnary parserProc . unknownLiteral <$> values) <> "]" +-- When no transformation is requested, use an array literal which should be simpler, maybe faster. +pgFmtArrayLiteralForField values _ = unknownLiteral (pgBuildArrayLiteral values) + +pgFmtFilter :: QualifiedIdentifier -> TypedFilter -> SQL.Snippet +pgFmtFilter _ (TypedFilterNullEmbed hasNot fld) = SQL.sql (pgFmtIdent fld) <> " IS " <> (if hasNot then "NOT" else mempty) <> " NULL" +pgFmtFilter _ (TypedFilter _ (NoOpExpr _)) = mempty -- TODO unreachable because NoOpExpr is filtered on QueryParams +pgFmtFilter table (TypedFilter fld (OpExpr hasNot oper)) = notOp <> " " <> case oper of Op op val -> pgFmtFieldOp op <> " " <> case op of OpLike -> unknownLiteral (T.map star val) OpILike -> unknownLiteral (T.map star val) - _ -> unknownLiteral val + _ -> pgFmtUnknownLiteralForField (unknownLiteral val) fld -- IS cannot be prepared. `PREPARE boolplan AS SELECT * FROM projects where id IS $1` will give a syntax error. -- The above can be fixed by using `PREPARE boolplan AS SELECT * FROM projects where id IS NOT DISTINCT FROM $1;` @@ -300,7 +325,7 @@ pgFmtFilter table (Filter fld (OpExpr hasNot oper)) = notOp <> " " <> case oper -- + Can invalidate prepared statements: multiple parameters on an IN($1, $2, $3) will lead to using different prepared statements and not take advantage of caching. In vals -> pgFmtField table fld <> " " <> case vals of [""] -> "= ANY('{}') " - _ -> "= ANY (" <> unknownLiteral (pgBuildArrayLiteral vals) <> ") " + _ -> "= ANY (" <> pgFmtArrayLiteralForField vals fld <> ") " Fts op lang val -> pgFmtFieldFts op <> "(" <> ftsLang lang <> unknownLiteral val <> ") " @@ -315,14 +340,14 @@ pgFmtJoinCondition :: JoinCondition -> SQL.Snippet pgFmtJoinCondition (JoinCondition (qi1, col1) (qi2, col2)) = SQL.sql $ pgFmtColumn qi1 col1 <> " = " <> pgFmtColumn qi2 col2 -pgFmtLogicTree :: QualifiedIdentifier -> LogicTree -> SQL.Snippet -pgFmtLogicTree qi (Expr hasNot op forest) = SQL.sql notOp <> " (" <> intercalateSnippet (opSql op) (pgFmtLogicTree qi <$> forest) <> ")" +pgFmtLogicTree :: QualifiedIdentifier -> TypedLogicTree -> SQL.Snippet +pgFmtLogicTree qi (TypedExpr hasNot op forest) = SQL.sql notOp <> " (" <> intercalateSnippet (opSql op) (pgFmtLogicTree qi <$> forest) <> ")" where notOp = if hasNot then "NOT" else mempty opSql And = " AND " opSql Or = " OR " -pgFmtLogicTree qi (Stmnt flt) = pgFmtFilter qi flt +pgFmtLogicTree qi (TypedStmnt flt) = pgFmtFilter qi flt pgFmtJsonPath :: JsonPath -> SQL.Snippet pgFmtJsonPath = \case diff --git a/src/PostgREST/SchemaCache.hs b/src/PostgREST/SchemaCache.hs index 14cd9428ed..41db41970c 100644 --- a/src/PostgREST/SchemaCache.hs +++ b/src/PostgREST/SchemaCache.hs @@ -38,31 +38,34 @@ import qualified Hasql.Transaction as SQL import Contravariant.Extras (contrazip2) import Text.InterpolatedString.Perl6 (q) -import PostgREST.Config.Database (pgVersionStatement) -import PostgREST.Config.PgVersion (PgVersion, pgVersion100, - pgVersion110) -import PostgREST.SchemaCache.Identifiers (AccessSet, FieldName, - QualifiedIdentifier (..), - Schema) -import PostgREST.SchemaCache.Proc (PgType (..), - ProcDescription (..), - ProcParam (..), - ProcVolatility (..), - ProcsMap, RetType (..)) -import PostgREST.SchemaCache.Relationship (Cardinality (..), - Junction (..), - Relationship (..), - RelationshipsMap) -import PostgREST.SchemaCache.Table (Column (..), ColumnMap, - Table (..), TablesMap) +import PostgREST.Config.Database (pgVersionStatement) +import PostgREST.Config.PgVersion (PgVersion, pgVersion100, + pgVersion110) +import PostgREST.SchemaCache.Identifiers (AccessSet, FieldName, + QualifiedIdentifier (..), + Schema) +import PostgREST.SchemaCache.Proc (PgType (..), + ProcDescription (..), + ProcParam (..), + ProcVolatility (..), + ProcsMap, RetType (..)) +import PostgREST.SchemaCache.Relationship (Cardinality (..), + Junction (..), + Relationship (..), + RelationshipsMap) +import PostgREST.SchemaCache.Representations (DataRepresentation (..), + RepresentationsMap) +import PostgREST.SchemaCache.Table (Column (..), ColumnMap, + Table (..), TablesMap) import Protolude data SchemaCache = SchemaCache - { dbTables :: TablesMap - , dbRelationships :: RelationshipsMap - , dbProcs :: ProcsMap + { dbTables :: TablesMap + , dbRelationships :: RelationshipsMap + , dbProcs :: ProcsMap + , dbRepresentations :: RepresentationsMap } deriving (Generic, JSON.ToJSON) @@ -113,6 +116,7 @@ querySchemaCache schemas extraSearchPath prepared = do m2oRels <- SQL.statement mempty $ allM2OandO2ORels pgVer prepared procs <- SQL.statement schemas $ allProcs pgVer prepared cRels <- SQL.statement mempty $ allComputedRels prepared + reps <- SQL.statement schemas $ dataRepresentations prepared let tabsWViewsPks = addViewPrimaryKeys tabs keyDeps rels = addInverseRels $ addM2MRels tabsWViewsPks $ addViewM2OAndO2ORels keyDeps m2oRels @@ -121,6 +125,7 @@ querySchemaCache schemas extraSearchPath prepared = do dbTables = tabsWViewsPks , dbRelationships = getOverrideRelationshipsMap rels cRels , dbProcs = procs + , dbRepresentations = reps } -- | overrides detected relationships with the computed relationships and gets the RelationshipsMap @@ -147,10 +152,11 @@ getOverrideRelationshipsMap rels cRels = removeInternal :: [Schema] -> SchemaCache -> SchemaCache removeInternal schemas dbStruct = SchemaCache { - dbTables = HM.filterWithKey (\(QualifiedIdentifier sch _) _ -> sch `elem` schemas) $ dbTables dbStruct - , dbRelationships = filter (\r -> qiSchema (relForeignTable r) `elem` schemas && not (hasInternalJunction r)) <$> + dbTables = HM.filterWithKey (\(QualifiedIdentifier sch _) _ -> sch `elem` schemas) $ dbTables dbStruct + , dbRelationships = filter (\r -> qiSchema (relForeignTable r) `elem` schemas && not (hasInternalJunction r)) <$> HM.filterWithKey (\(QualifiedIdentifier sch _, _) _ -> sch `elem` schemas ) (dbRelationships dbStruct) - , dbProcs = dbProcs dbStruct -- procs are only obtained from the exposed schemas, no need to filter them. + , dbProcs = dbProcs dbStruct -- procs are only obtained from the exposed schemas, no need to filter them. + , dbRepresentations = dbRepresentations dbStruct -- no need to filter, not directly exposed through the API } where hasInternalJunction ComputedRelationship{} = False @@ -271,6 +277,42 @@ decodeProcs = | v == 's' = Stable | otherwise = Volatile -- only 'v' can happen here +decodeRepresentations :: HD.Result RepresentationsMap +decodeRepresentations = + HM.fromList . map (\rep@DataRepresentation{drSourceType, drTargetType} -> ((drSourceType, drTargetType), rep)) <$> HD.rowList row + where + row = DataRepresentation + <$> column HD.text + <*> column HD.text + <*> column HD.text + +-- Selects all potential data representation transformations. To qualify the cast must be +-- 1. to or from a domain +-- 2. implicit +-- For the time being it must also be to/from JSON or text, although one can imagine a future where we support special +-- cases like CSV specific representations. +dataRepresentations :: Bool -> SQL.Statement [Schema] RepresentationsMap +dataRepresentations = SQL.Statement sql (arrayParam HE.text) decodeRepresentations + where + sql = [q| + SELECT + c.castsource::regtype::text, + c.casttarget::regtype::text, + c.castfunc::regproc::text + FROM + pg_catalog.pg_cast c + JOIN pg_catalog.pg_type src_t + ON c.castsource::oid = src_t.oid + JOIN pg_catalog.pg_type dst_t + ON c.casttarget::oid = dst_t.oid + WHERE + c.castcontext = 'i' + AND c.castmethod = 'f' + AND has_function_privilege(c.castfunc, 'execute') + AND ((src_t.typtype = 'd' AND c.casttarget IN ('json'::regtype::oid , 'text'::regtype::oid)) + OR (dst_t.typtype = 'd' AND c.castsource IN ('json'::regtype::oid , 'text'::regtype::oid))) + |] + allProcs :: PgVersion -> Bool -> SQL.Statement [Schema] ProcsMap allProcs pgVer = SQL.Statement sql (arrayParam HE.text) decodeProcs where diff --git a/src/PostgREST/SchemaCache/Representations.hs b/src/PostgREST/SchemaCache/Representations.hs new file mode 100644 index 0000000000..027365f6df --- /dev/null +++ b/src/PostgREST/SchemaCache/Representations.hs @@ -0,0 +1,29 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} + +module PostgREST.SchemaCache.Representations + ( DataRepresentation(..) + , RepresentationsMap + ) where + +import qualified Data.Aeson as JSON +import qualified Data.HashMap.Strict as HM + + +import Protolude + +-- | Data representations allow user customisation of how to present and receive data through APIs, per field. +-- This structure is used for the library of available transforms. It answers questions like: +-- - What function, if any, should be used to present a certain field that's been selected for API output? +-- - How do we parse incoming data for a certain field type when inserting or updating? +-- - And similarly, how do we parse textual data in a query string to be used as a filter? +-- +-- Support for outputting special formats like CSV and binary data would fit into the same system. +data DataRepresentation = DataRepresentation + { drSourceType :: Text + , drTargetType :: Text + , drFunction :: Text + } deriving (Eq, Show, Generic, JSON.ToJSON, JSON.FromJSON) + +-- The representation map maps from (source type, target type) to a DR. +type RepresentationsMap = HM.HashMap (Text, Text) DataRepresentation diff --git a/test/spec/Feature/Query/ComputedRelsSpec.hs b/test/spec/Feature/Query/ComputedRelsSpec.hs index ea66ad4374..cba71e68b6 100644 --- a/test/spec/Feature/Query/ComputedRelsSpec.hs +++ b/test/spec/Feature/Query/ComputedRelsSpec.hs @@ -104,6 +104,42 @@ spec = describe "computed relationships" $ do [json|[ {"name":"Final Fantasy I","designer":{"name":"Hironobu Sakaguchi"}} ]|] { matchStatus = 200 } + it "applies data representations to response" $ do + -- A smoke test for data reps in the presence of computed relations. + + -- The data rep here title cases the designer name before presentation. So here the lowercase version will be saved, + -- but the title case version returned. Pulling in a computed relation should not confuse this. + request methodPatch "/designers?select=name,videogames:computed_videogames(name)&id=eq.1" + [("Prefer", "return=representation"), ("Prefer", "tx=commit")] + [json| {"name": "sidney k. meier"} |] + `shouldRespondWith` + [json|[{"name":"Sidney K. Meier","videogames":[{"name":"Civilization I"}, {"name":"Civilization II"}]}]|] + { matchStatus = 200 } + + -- Verify it was saved the way we requested (there's no text data rep for this column, so if we select with the wrong casing, it should fail.) + get "/designers?select=id&name=eq.Sidney%20K.%20Meier" + `shouldRespondWith` + [json|[]|] + { matchStatus = 200, matchHeaders = [matchContentTypeJson] } + -- But with the right casing it works. + get "/designers?select=id,name&name=eq.sidney%20k.%20meier" + `shouldRespondWith` + [json|[{"id": 1, "name":"Sidney K. Meier"}]|] + { matchStatus = 200, matchHeaders = [matchContentTypeJson] } + + -- Most importantly, if you read it back even via a computed relation, the data rep should be applied. + get "/videogames?select=name,designer:computed_designers(*)&id=eq.1" + `shouldRespondWith` + [json|[ + {"name":"Civilization I","designer":{"id": 1, "name":"Sidney K. Meier"}} + ]|] { matchHeaders = [matchContentTypeJson] } + + -- reset the test fixture + request methodPatch "/designers?id=eq.1" + [("Prefer", "tx=commit")] + [json| {"name": "Sid Meier"} |] + `shouldRespondWith` 204 + it "works with self joins" $ get "/web_content?select=name,child_web_content(name),parent_web_content(name)&id=in.(0,1)" `shouldRespondWith` diff --git a/test/spec/Feature/Query/InsertSpec.hs b/test/spec/Feature/Query/InsertSpec.hs index 50053169dc..1ea0ad8266 100644 --- a/test/spec/Feature/Query/InsertSpec.hs +++ b/test/spec/Feature/Query/InsertSpec.hs @@ -11,8 +11,9 @@ import Test.Hspec.Wai import Test.Hspec.Wai.JSON import Text.Heredoc -import PostgREST.Config.PgVersion (PgVersion, pgVersion110, - pgVersion112, pgVersion130) +import PostgREST.Config.PgVersion (PgVersion, pgVersion100, + pgVersion110, pgVersion112, + pgVersion130) import Protolude hiding (get) import SpecHelper @@ -657,3 +658,114 @@ spec actualPgVersion = do , "Location" <:> "/test_null_pk_competitors_sponsors?id=eq.1&sponsor_id=is.null" , "Content-Range" <:> "*/*" ] } + + -- Data representations for payload parsing requires Postgrest 10 or above. + when (actualPgVersion >= pgVersion100) $ do + describe "Data representations" $ do + context "on regular table" $ do + it "parses values in POST body" $ + -- we don't check that the parsing is correct here, just that it's happening. If it doesn't happen we'll get a + -- an "invalid input syntax for type integer:" error. + request methodPost "/datarep_todos" [("Prefer", "return=headers-only")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + "" + { matchStatus = 201 + , matchHeaders = [ matchHeaderAbsent hContentType + , "Location" <:> "/datarep_todos?id=eq.5" + , "Content-Range" <:> "*/*" ] + } + + it "parses values in POST body and formats individually selected values in return=representation" $ + request methodPost "/datarep_todos?select=id,label_color" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":5, "label_color": "#001100"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + it "parses values in POST body and formats values in return=representation" $ + request methodPost "/datarep_todos" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00+00"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + context "with ?columns parameter" $ do + it "ignores json keys not included in ?columns; parses only the ones specified" $ + request methodPost "/datarep_todos?columns=id,label_color&select=id,name,label_color,due_at" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "invalid but should be ignored"} |] + `shouldRespondWith` + [json| [{"id":5, "name":null, "label_color": "#001100", "due_at": "2018-01-01T00:00:00+00"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + it "fails without parsing anything if at least one specified column doesn't exist" $ + request methodPost "/datarep_todos?columns=id,label_color,helicopters&select=id,name,label_color,due_at" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos' does not exist","details":null,"hint":null} |] + { matchStatus = 400 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8"] + } + + context "on updatable view" $ do + it "parses values in POST body" $ + -- we don't check that the parsing is correct here, just that it's happening. If it doesn't happen we'll get a + -- an "invalid input syntax for type integer:" error. + request methodPost "/datarep_todos_computed" [("Prefer", "return=headers-only")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + "" + { matchStatus = 201 + , matchHeaders = [ matchHeaderAbsent hContentType + , "Location" <:> "/datarep_todos_computed?id=eq.5" + , "Content-Range" <:> "*/*" ] + } + + it "parses values in POST body and formats individually selected values in return=representation" $ + request methodPost "/datarep_todos_computed?select=id,label_color" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":5, "label_color": "#001100"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + it "parses values in POST body and formats values in return=representation" $ + request methodPost "/datarep_todos_computed" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00+00", "dark_color":"#000880"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + context "on updatable views with ?columns parameter" $ do + it "ignores json keys not included in ?columns; parses only the ones specified" $ + request methodPost "/datarep_todos_computed?columns=id,label_color&select=id,name,label_color,due_at" [("Prefer", "return=representation")] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "invalid but should be ignored"} |] + `shouldRespondWith` + [json| [{"id":5, "name":null, "label_color": "#001100", "due_at": "2018-01-01T00:00:00+00"}] |] + { matchStatus = 201 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "*/*"] + } + + it "fails without parsing anything if at least one specified column doesn't exist" $ + request methodPost "/datarep_todos_computed?columns=id,label_color,helicopters&select=id,name,label_color,due_at" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos_computed' does not exist","details":null,"hint":null} |] + { matchStatus = 400 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8"] + } diff --git a/test/spec/Feature/Query/QuerySpec.hs b/test/spec/Feature/Query/QuerySpec.hs index b995dfe7ce..690550c937 100644 --- a/test/spec/Feature/Query/QuerySpec.hs +++ b/test/spec/Feature/Query/QuerySpec.hs @@ -1276,3 +1276,125 @@ spec actualPgVersion = do {"id":4,"name":"OSX","client_id":2}, {"id":5,"name":"Orphan","client_id":null}]|] { matchHeaders = [matchContentTypeJson] } + + describe "Data representations for customisable value formatting and parsing" $ do + it "formats a single column" $ + get "/datarep_todos?select=id,label_color&id=lt.4" `shouldRespondWith` + [json| [{"id":1,"label_color":"#000000"},{"id":2,"label_color":"#000100"},{"id":3,"label_color":"#01E240"}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats two columns with different formatters" $ + get "/datarep_todos?select=id,label_color,due_at&id=lt.4" `shouldRespondWith` + [json| [{"id":1,"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"},{"id":2,"label_color":"#000100","due_at":"2018-01-03T00:00:00+00"},{"id":3,"label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}] |] + { matchHeaders = [matchContentTypeJson] } + it "fails in some reasonable way when selecting fields that don't exist" $ + get "/datarep_todos?select=id,label_color,banana" `shouldRespondWith` + [json| {"code":"42703","details":null,"hint":null,"message":"column datarep_todos.banana does not exist"} |] + { matchStatus = 400 + , matchHeaders = [matchContentTypeJson] + } + it "formats columns in views including computed columns" $ + get "/datarep_todos_computed?select=id,label_color,dark_color" `shouldRespondWith` + [json| [ + {"id":1, "label_color":"#000000", "dark_color":"#000000"}, + {"id":2, "label_color":"#000100", "dark_color":"#000080"}, + {"id":3, "label_color":"#01E240", "dark_color":"#00F120"}, + {"id":4, "label_color":"", "dark_color":""} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "formats and allows rename" $ + get "/datarep_todos?select=id,clr:label_color&id=lt.4" `shouldRespondWith` + [json| [{"id":1,"clr":"#000000"},{"id":2,"clr":"#000100"},{"id":3,"clr":"#01E240"}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats, renames and allows manual casting on top" $ + get "/datarep_todos?select=id,clr:label_color::text&id=lt.4" `shouldRespondWith` + [json| [{"id":1,"clr":"\"#000000\""},{"id":2,"clr":"\"#000100\""},{"id":3,"clr":"\"#01E240\""}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats nulls" $ + -- due_at is formatted as NULL but label_color NULLs become empty strings-- it's up to the formatting function. + get "/datarep_todos?select=id,label_color,due_at&id=gt.2&id=lt.5" `shouldRespondWith` + [json| [{"id":3,"label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"},{"id":4,"label_color":"","due_at":null}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats star select" $ + get "/datarep_todos?select=*&id=lt.4" `shouldRespondWith` + [json| [ + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "formats implicit star select" $ + get "/datarep_todos?id=lt.4" `shouldRespondWith` + [json| [ + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "formats star and explicit mix" $ + get "/datarep_todos?select=due_at,*&id=lt.4" `shouldRespondWith` + [json| [ + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "formats through join" $ + get "/datarep_next_two_todos?select=id,name,first_item:datarep_todos!datarep_next_two_todos_first_item_id_fkey(label_color,due_at)" `shouldRespondWith` + [json| [{"id":1,"name":"school related","first_item":{"label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}},{"id":2,"name":"do these first","first_item":{"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}}] |] + { matchHeaders = [matchContentTypeJson] } + it "formats through join with star select" $ + get "/datarep_next_two_todos?select=id,name,second_item:datarep_todos!datarep_next_two_todos_second_item_id_fkey(*)" `shouldRespondWith` + [json| [ + {"id":1,"name":"school related","second_item": + {"id": 3, "name": "Algebra", "label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}}, + {"id":2,"name":"do these first","second_item": + {"id": 3, "name": "Algebra", "label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "uses text parser on value for filter given through query parameters" $ + get "/datarep_todos?select=id,due_at&label_color=eq.000100" `shouldRespondWith` + [json| [{"id":2,"due_at":"2018-01-03T00:00:00+00"}] |] + { matchHeaders = [matchContentTypeJson] } + it "in the absense of text parser, does not try to use the JSON parser for query parameters" $ + get "/datarep_todos?select=id,due_at&due_at=eq.T" `shouldRespondWith` + -- okay this test is a bit of a hack but we prove the parser is not used because it'd replace the T and fail a + -- different way. + [json| {"code":"22007","details":null,"hint":null,"message":"invalid input syntax for type timestamp with time zone: \"T\""} |] + { matchStatus = 400 + , matchHeaders = [matchContentTypeJson] + } + -- Before PG 11, this will fail because we need arrays of domain type values. The docs should explain data reps are + -- not supported in this case. + when (actualPgVersion >= pgVersion110) $ do + it "uses text parser for filter with 'IN' predicates" $ + get "/datarep_todos?select=id,due_at&label_color=in.(000100,01E240)" `shouldRespondWith` + [json| [ + {"id":2, "due_at": "2018-01-03T00:00:00+00"}, + {"id":3, "due_at": "2018-01-01T14:12:34.123456+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "uses text parser for filter with 'NOT IN' predicates" $ + get "/datarep_todos?select=id,due_at&label_color=not.in.(000000,01E240)" `shouldRespondWith` + [json| [ + {"id":2, "due_at": "2018-01-03T00:00:00+00"} + ] |] + { matchHeaders = [matchContentTypeJson] } + it "uses text parser on value for filter across relations" $ + get "/datarep_next_two_todos?select=id,name,datarep_todos!datarep_next_two_todos_first_item_id_fkey(label_color,due_at)&datarep_todos.label_color=neq.000100" `shouldRespondWith` + [json| [{"id":1,"name":"school related","datarep_todos":null},{"id":2,"name":"do these first","datarep_todos":{"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}}] |] + { matchHeaders = [matchContentTypeJson] } + -- This is not supported by data reps (would be hard to make it work with high performance). So the test just + -- verifies we don't panic or add inappropriate SQL to the filters. + it "fails safely on user trying to use ilike operator on data reps column" $ + get "/datarep_todos?select=id,name&label_color=ilike.#*100" `shouldRespondWith` ( + if actualPgVersion >= pgVersion110 then + [json| + {"code":"42883","details":null,"hint":"No operator matches the given name and argument types. You might need to add explicit type casts.","message":"operator does not exist: public.color ~~* unknown"} + |] + else + [json| + {"code":"42883","details":null,"hint":"No operator matches the given name and argument type(s). You might need to add explicit type casts.","message":"operator does not exist: public.color ~~* unknown"} + |]) + { matchStatus = 404 + , matchHeaders = [matchContentTypeJson] + } diff --git a/test/spec/Feature/Query/UpdateSpec.hs b/test/spec/Feature/Query/UpdateSpec.hs index ff0be3dcf5..16840b1d04 100644 --- a/test/spec/Feature/Query/UpdateSpec.hs +++ b/test/spec/Feature/Query/UpdateSpec.hs @@ -9,6 +9,9 @@ import Network.HTTP.Types import Test.Hspec.Wai import Test.Hspec.Wai.JSON +import PostgREST.Config.PgVersion (PgVersion, pgVersion100) + + import Protolude hiding (get) import SpecHelper @@ -18,8 +21,8 @@ tblDataBefore = [aesonQQ|[ , { "id": 3, "name": "item-3" } ]|] -spec :: SpecWith ((), Application) -spec = do +spec :: PgVersion -> SpecWith ((), Application) +spec actualPgVersion = do describe "Patching record" $ do context "to unknown uri" $ it "indicates no table found by returning 404" $ @@ -543,3 +546,187 @@ spec = do , { "id": 2, "name": "item-2" } , { "id": 3, "name": "item-3" } ]|] + + -- Data representations for payload parsing requires Postgrest 10 or above. + when (actualPgVersion >= pgVersion100) $ do + describe "Data representations" $ do + context "for a single row" $ do + it "parses values in payload" $ + request methodPatch "/datarep_todos?id=eq.2" [("Prefer", "return=headers-only")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + "" + { matchStatus = 204 + , matchHeaders = [ matchHeaderAbsent hContentType + , "Content-Range" <:> "0-0/*" ] + } + + it "parses values in payload and formats individually selected values in return=representation" $ + request methodPatch "/datarep_todos?id=eq.2&select=id,label_color" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":2, "label_color": "#221100"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "parses values in payload and formats values in return=representation" $ + request methodPatch "/datarep_todos?id=eq.2" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20+00"} |] + `shouldRespondWith` + [json| [{"id":2, "name": "Essay", "label_color": "#221100", "due_at":"2019-01-03T11:00:20+00"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "parses values in payload and formats star mixed selected values in return=representation" $ + request methodPatch "/datarep_todos?id=eq.2&select=due_at,*" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + -- end up with due_at twice here but that's unrelated to data reps + [json| [{"due_at":"2019-01-03T11:00:00+00", "id":2, "name":"Essay", "label_color":"#221100", "due_at":"2019-01-03T11:00:00+00"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + context "for multiple rows" $ do + it "parses values in payload and formats individually selected values in return=representation" $ + request methodPatch "/datarep_todos?id=lt.4&select=id,name,label_color" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [ + {"id":1, "name": "Report", "label_color": "#221100"}, + {"id":2, "name": "Essay", "label_color": "#221100"}, + {"id":3, "name": "Algebra", "label_color": "#221100"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-2/*"] + } + + it "parses values in payload and formats values in return=representation" $ + request methodPatch "/datarep_todos?id=lt.4" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [ + {"id":1, "name": "Report", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"}, + {"id":2, "name": "Essay", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"}, + {"id":3, "name": "Algebra", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-2/*"] + } + context "with ?columns parameter" $ do + it "ignores json keys not included in ?columns; parses only the ones specified" $ + request methodPatch "/datarep_todos?id=eq.2&columns=due_at" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| [ + {"id":2, "name": "Essay", "label_color": "#000100", "due_at":"2019-01-03T11:00:00+00"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "fails if at least one specified column doesn't exist" $ + request methodPatch "/datarep_todos?id=eq.2&columns=label_color,helicopters" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos' does not exist","details":null,"hint":null} |] + { matchStatus = 400 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8"] + } + + it "ignores json keys and gives 200 if no record updated" $ + request methodPatch "/datarep_todos?id=eq.2001&columns=label_color" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` 200 + context "on a view" $ do + context "for a single row" $ do + it "parses values in payload" $ + request methodPatch "/datarep_todos_computed?id=eq.2" [("Prefer", "return=headers-only")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + "" + { matchStatus = 204 + , matchHeaders = [ matchHeaderAbsent hContentType + , "Content-Range" <:> "0-0/*" ] + } + + it "parses values in payload and formats individually selected values in return=representation" $ + request methodPatch "/datarep_todos_computed?id=eq.2&select=id,label_color" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [{"id":2, "label_color": "#221100"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "parses values in payload and formats values in return=representation" $ + request methodPatch "/datarep_todos_computed?id=eq.2" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20+00"} |] + `shouldRespondWith` + [json| [{"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:20+00"}] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + context "for multiple rows" $ do + it "parses values in payload and formats individually selected values in return=representation" $ + request methodPatch "/datarep_todos_computed?id=lt.4&select=id,name,label_color,dark_color" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [ + {"id":1, "name": "Report", "label_color": "#221100", "dark_color":"#110880"}, + {"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880"}, + {"id":3, "name": "Algebra", "label_color": "#221100", "dark_color":"#110880"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-2/*"] + } + + it "parses values in payload and formats values in return=representation" $ + request methodPatch "/datarep_todos_computed?id=lt.4" [("Prefer", "return=representation")] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + `shouldRespondWith` + [json| [ + {"id":1, "name": "Report", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"}, + {"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"}, + {"id":3, "name": "Algebra", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-2/*"] + } + context "with ?columns parameter" $ do + it "ignores json keys not included in ?columns; parses only the ones specified" $ + request methodPatch "/datarep_todos_computed?id=eq.2&columns=due_at" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| [ + {"id":2, "name": "Essay", "label_color": "#000100", "dark_color": "#000080", "due_at":"2019-01-03T11:00:00+00"} + ] |] + { matchStatus = 200 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", + "Content-Range" <:> "0-0/*"] + } + + it "fails if at least one specified column doesn't exist" $ + request methodPatch "/datarep_todos_computed?id=eq.2&columns=label_color,helicopters" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` + [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos_computed' does not exist","details":null,"hint":null} |] + { matchStatus = 400 + , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8"] + } + + it "ignores json keys and gives 200 if no record updated" $ + request methodPatch "/datarep_todos_computed?id=eq.2001&columns=label_color" [("Prefer", "return=representation")] + [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + `shouldRespondWith` 200 diff --git a/test/spec/Main.hs b/test/spec/Main.hs index aa443b821a..cd53d25b8c 100644 --- a/test/spec/Main.hs +++ b/test/spec/Main.hs @@ -148,7 +148,7 @@ main = do , ("Feature.Query.RawOutputTypesSpec" , Feature.Query.RawOutputTypesSpec.spec) , ("Feature.Query.RpcSpec" , Feature.Query.RpcSpec.spec actualPgVersion) , ("Feature.Query.SingularSpec" , Feature.Query.SingularSpec.spec) - , ("Feature.Query.UpdateSpec" , Feature.Query.UpdateSpec.spec) + , ("Feature.Query.UpdateSpec" , Feature.Query.UpdateSpec.spec actualPgVersion) , ("Feature.Query.UpsertSpec" , Feature.Query.UpsertSpec.spec actualPgVersion) , ("Feature.Query.ComputedRelsSpec" , Feature.Query.ComputedRelsSpec.spec) , ("Feature.Query.RelatedQueriesSpec" , Feature.Query.RelatedQueriesSpec.spec) diff --git a/test/spec/fixtures/data.sql b/test/spec/fixtures/data.sql index dd32ae9dc4..501beac467 100644 --- a/test/spec/fixtures/data.sql +++ b/test/spec/fixtures/data.sql @@ -838,3 +838,13 @@ INSERT INTO posters(id,name) VALUES (1,'Mark'), (2,'Elon'), (3,'Bill'), (4,'Jeff TRUNCATE TABLE subscriptions CASCADE; INSERT INTO subscriptions(subscriber,subscribed) VALUES (3,1), (4,1), (1,2); + +TRUNCATE TABLE datarep_todos CASCADE; +INSERT INTO datarep_todos VALUES (1, 'Report', 0, '2018-01-02'); +INSERT INTO datarep_todos VALUES (2, 'Essay', 256, '2018-01-03'); +INSERT INTO datarep_todos VALUES (3, 'Algebra', 123456, '2018-01-01 14:12:34.123456'); +INSERT INTO datarep_todos VALUES (4, 'Opus Magnum', NULL, NULL); + +TRUNCATE TABLE datarep_next_two_todos CASCADE; +INSERT INTO datarep_next_two_todos VALUES (1, 2, 3, 'school related'); +INSERT INTO datarep_next_two_todos VALUES (2, 1, 3, 'do these first'); diff --git a/test/spec/fixtures/schema.sql b/test/spec/fixtures/schema.sql index c8f40e7ba1..6711ce732b 100644 --- a/test/spec/fixtures/schema.sql +++ b/test/spec/fixtures/schema.sql @@ -2786,9 +2786,20 @@ BEGIN LOAD 'safeupdate'; END; $$ LANGUAGE plpgsql SECURITY DEFINER; +-- This tests data representations over computed joins: even a lower case title should come back title cased. +DROP DOMAIN IF EXISTS public.titlecasetext CASCADE; +CREATE DOMAIN public.titlecasetext AS text; + +CREATE OR REPLACE FUNCTION json(public.titlecasetext) RETURNS json AS $$ + SELECT to_json(INITCAP($1::text)); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE CAST (public.titlecasetext AS json) WITH FUNCTION json(public.titlecasetext) AS IMPLICIT; +-- End of data representations specific stuff except for where the domain is used in the table. + CREATE TABLE designers ( id int primary key -, name text +, name public.titlecasetext ); CREATE TABLE videogames ( @@ -3103,6 +3114,72 @@ create table test.subscriptions( primary key(subscriber, subscribed) ); +-- For formatting output and parsing input of types with custom API representations. +DROP DOMAIN IF EXISTS public.color CASCADE; +CREATE DOMAIN public.color AS INTEGER CHECK (VALUE >= 0 AND VALUE <= 16777215); + +CREATE OR REPLACE FUNCTION color(json) RETURNS public.color AS $$ + SELECT color($1 #>> '{}'); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION color(text) RETURNS public.color AS $$ + SELECT (('x' || lpad((CASE WHEN SUBSTRING($1::text, 1, 1) = '#' THEN SUBSTRING($1::text, 2) ELSE $1::text END), 8, '0'))::bit(32)::int)::public.color; +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION json(public.color) RETURNS json AS $$ + SELECT + CASE WHEN $1 IS NULL THEN to_json(''::text) + ELSE to_json('#' || lpad(upper(to_hex($1)), 6, '0')) + END; +$$ LANGUAGE SQL IMMUTABLE; + +CREATE CAST (public.color AS json) WITH FUNCTION json(public.color) AS IMPLICIT; +CREATE CAST (json AS public.color) WITH FUNCTION color(json) AS IMPLICIT; +CREATE CAST (text AS public.color) WITH FUNCTION color(text) AS IMPLICIT; + +DROP DOMAIN IF EXISTS public.isodate CASCADE; +CREATE DOMAIN public.isodate AS timestamp with time zone; + +CREATE OR REPLACE FUNCTION isodate(json) RETURNS public.isodate AS $$ + SELECT isodate($1 #>> '{}'); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION isodate(text) RETURNS public.isodate AS $$ + SELECT (replace($1, 'T', ' ')::timestamp with time zone)::public.isodate; +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION json(public.isodate) RETURNS json AS $$ + SELECT to_json(replace($1::text, ' ', 'T')); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE CAST (public.isodate AS json) WITH FUNCTION json(public.isodate) AS IMPLICIT; +CREATE CAST (json AS public.isodate) WITH FUNCTION isodate(json) AS IMPLICIT; +-- We intentionally don't have this in order to test query string parsing doesn't try to fall back on JSON parsing. +-- CREATE CAST (text AS public.isodate) WITH FUNCTION isodate(text) AS IMPLICIT; + +CREATE TABLE datarep_todos ( + id bigint primary key, + name text, + label_color public.color default 0, + due_at public.isodate default '2018-01-01'::date +); + +CREATE TABLE datarep_next_two_todos ( + id bigint primary key, + first_item_id bigint references datarep_todos(id), + second_item_id bigint references datarep_todos(id), + name text +); + +CREATE VIEW datarep_todos_computed as ( + SELECT id, + name, + label_color, + due_at, + (label_color / 2)::public.color as dark_color + FROM datarep_todos +); + -- view's name is alphabetically before projects create view test.alpha_projects as select c.id, p.name as pro_name, c.name as cli_name From e6718ddd720b3c5f98e710e7fc36cd51177b0151 Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Mon, 23 Jan 2023 10:55:04 +0000 Subject: [PATCH 02/11] More consistent naming (TypedX -> CoercibleX). --- src/PostgREST/Plan.hs | 26 +++++++++++++------------- src/PostgREST/Plan/MutatePlan.hs | 8 ++++---- src/PostgREST/Plan/ReadPlan.hs | 4 ++-- src/PostgREST/Plan/Types.hs | 20 ++++++++++---------- src/PostgREST/Query/SqlFragment.hs | 18 +++++++++--------- 5 files changed, 38 insertions(+), 38 deletions(-) diff --git a/src/PostgREST/Plan.hs b/src/PostgREST/Plan.hs index ea4a590fc0..05b42917a9 100644 --- a/src/PostgREST/Plan.hs +++ b/src/PostgREST/Plan.hs @@ -472,15 +472,15 @@ addNullEmbedFilters (Node rp@ReadPlan{where_=oldLogic} forest) = do newLogic <- getFilters readPlans `traverse` oldLogic Node rp{ReadPlan.where_= newLogic} <$> (addNullEmbedFilters `traverse` forest) where - getFilters :: [ReadPlan] -> TypedLogicTree -> Either ApiRequestError TypedLogicTree - getFilters rPlans (TypedExpr b lOp trees) = TypedExpr b lOp <$> (getFilters rPlans `traverse` trees) - getFilters rPlans flt@(TypedStmnt (TypedFilter (CoercibleField fld [] _ _) opExpr)) = + getFilters :: [ReadPlan] -> CoercibleLogicTree -> Either ApiRequestError CoercibleLogicTree + getFilters rPlans (CoercibleExpr b lOp trees) = CoercibleExpr b lOp <$> (getFilters rPlans `traverse` trees) + getFilters rPlans flt@(CoercibleStmnt (CoercibleFilter (CoercibleField fld [] _ _) opExpr)) = let foundRP = find (\ReadPlan{relName, relAlias} -> fld == fromMaybe relName relAlias) rPlans in case (foundRP, opExpr) of - (Just ReadPlan{relAggAlias}, OpExpr b (Is TriNull)) -> Right $ TypedStmnt $ TypedFilterNullEmbed b relAggAlias + (Just ReadPlan{relAggAlias}, OpExpr b (Is TriNull)) -> Right $ CoercibleStmnt $ CoercibleFilterNullEmbed b relAggAlias (Just ReadPlan{relName}, _) -> Left $ UnacceptableFilter relName _ -> Right flt - getFilters _ flt@(TypedStmnt _) = Right flt + getFilters _ flt@(CoercibleStmnt _) = Right flt addRanges :: ApiRequest -> ReadPlanTree -> Either ApiRequestError ReadPlanTree addRanges ApiRequest{..} rReq = @@ -503,13 +503,13 @@ addLogicTrees ctx ApiRequest{..} rReq = addLogicTreeToNode :: (EmbedPath, LogicTree) -> Either ApiRequestError ReadPlanTree -> Either ApiRequestError ReadPlanTree addLogicTreeToNode = updateNode (\t (Node q@ReadPlan{from=fromTable, where_=lf} f) -> Node q{ReadPlan.where_=resolveLogicTree ctx{qi=fromTable} t:lf} f) -resolveLogicTree :: ResolverContext -> LogicTree -> TypedLogicTree -resolveLogicTree ctx (Stmnt flt) = TypedStmnt $ resolveFilter ctx flt -resolveLogicTree ctx (Expr b op lts) = TypedExpr b op (map (resolveLogicTree ctx) lts) +resolveLogicTree :: ResolverContext -> LogicTree -> CoercibleLogicTree +resolveLogicTree ctx (Stmnt flt) = CoercibleStmnt $ resolveFilter ctx flt +resolveLogicTree ctx (Expr b op lts) = CoercibleExpr b op (map (resolveLogicTree ctx) lts) -resolveFilter :: ResolverContext -> Filter -> TypedFilter -resolveFilter ctx (Filter fld opExpr) = TypedFilter{typedField=resolveQueryInputField ctx fld, typedOpExpr=opExpr} -resolveFilter _ (FilterNullEmbed isNot fieldName) = TypedFilterNullEmbed isNot fieldName +resolveFilter :: ResolverContext -> Filter -> CoercibleFilter +resolveFilter ctx (Filter fld opExpr) = CoercibleFilter{field=resolveQueryInputField ctx fld, opExpr=opExpr} +resolveFilter _ (FilterNullEmbed isNot fieldName) = CoercibleFilterNullEmbed isNot fieldName -- Validates that spread embeds are only done on to-one relationships validateSpreadEmbeds :: ReadPlanTree -> Either ApiRequestError ReadPlanTree @@ -640,5 +640,5 @@ inferColsEmbedNeeds (Node ReadPlan{select} forest) pkCols -- Traditional filters(e.g. id=eq.1) are added as root nodes of the LogicTree -- they are later concatenated with AND in the QueryBuilder -addFilterToLogicForest :: TypedFilter -> [TypedLogicTree] -> [TypedLogicTree] -addFilterToLogicForest flt lf = TypedStmnt flt : lf +addFilterToLogicForest :: CoercibleFilter -> [CoercibleLogicTree] -> [CoercibleLogicTree] +addFilterToLogicForest flt lf = CoercibleStmnt flt : lf diff --git a/src/PostgREST/Plan/MutatePlan.hs b/src/PostgREST/Plan/MutatePlan.hs index 9faf259bbe..c7c46439cc 100644 --- a/src/PostgREST/Plan/MutatePlan.hs +++ b/src/PostgREST/Plan/MutatePlan.hs @@ -8,7 +8,7 @@ import qualified Data.ByteString.Lazy as LBS import PostgREST.ApiRequest.Preferences (PreferResolution) import PostgREST.ApiRequest.Types (OrderTerm) import PostgREST.Plan.Types (CoercibleField, - TypedLogicTree) + CoercibleLogicTree) import PostgREST.RangeQuery (NonnegRange) import PostgREST.SchemaCache.Identifiers (FieldName, QualifiedIdentifier) @@ -22,7 +22,7 @@ data MutatePlan , insCols :: [CoercibleField] , insBody :: Maybe LBS.ByteString , onConflict :: Maybe (PreferResolution, [FieldName]) - , where_ :: [TypedLogicTree] + , where_ :: [CoercibleLogicTree] , returning :: [FieldName] , insPkCols :: [FieldName] } @@ -30,14 +30,14 @@ data MutatePlan { in_ :: QualifiedIdentifier , updCols :: [CoercibleField] , updBody :: Maybe LBS.ByteString - , where_ :: [TypedLogicTree] + , where_ :: [CoercibleLogicTree] , mutRange :: NonnegRange , mutOrder :: [OrderTerm] , returning :: [FieldName] } | Delete { in_ :: QualifiedIdentifier - , where_ :: [TypedLogicTree] + , where_ :: [CoercibleLogicTree] , mutRange :: NonnegRange , mutOrder :: [OrderTerm] , returning :: [FieldName] diff --git a/src/PostgREST/Plan/ReadPlan.hs b/src/PostgREST/Plan/ReadPlan.hs index 474a3578ed..4cf8a8ceb5 100644 --- a/src/PostgREST/Plan/ReadPlan.hs +++ b/src/PostgREST/Plan/ReadPlan.hs @@ -10,7 +10,7 @@ import PostgREST.ApiRequest.Types (Alias, Cast, Depth, Hint, JoinType, NodeName, OrderTerm) import PostgREST.Plan.Types (CoercibleField (..), - TypedLogicTree) + CoercibleLogicTree) import PostgREST.RangeQuery (NonnegRange) import PostgREST.SchemaCache.Identifiers (FieldName, QualifiedIdentifier) @@ -31,7 +31,7 @@ data ReadPlan = ReadPlan { select :: [(CoercibleField, Maybe Cast, Maybe Alias)] , from :: QualifiedIdentifier , fromAlias :: Maybe Alias - , where_ :: [TypedLogicTree] + , where_ :: [CoercibleLogicTree] , order :: [OrderTerm] , range_ :: NonnegRange , relName :: NodeName diff --git a/src/PostgREST/Plan/Types.hs b/src/PostgREST/Plan/Types.hs index 3b472bf956..956032b74e 100644 --- a/src/PostgREST/Plan/Types.hs +++ b/src/PostgREST/Plan/Types.hs @@ -1,8 +1,8 @@ module PostgREST.Plan.Types ( CoercibleField(..) , unknownField - , TypedLogicTree(..) - , TypedFilter(..) + , CoercibleLogicTree(..) + , CoercibleFilter(..) , TransformerProc ) where @@ -35,15 +35,15 @@ data CoercibleField = CoercibleField unknownField :: FieldName -> JsonPath -> CoercibleField unknownField name path = CoercibleField name path "" Nothing --- | Like a regular LogicTree but with field type information. -data TypedLogicTree - = TypedExpr Bool LogicOperator [TypedLogicTree] - | TypedStmnt TypedFilter +-- | Like an API request LogicTree, but with coercible field information. +data CoercibleLogicTree + = CoercibleExpr Bool LogicOperator [CoercibleLogicTree] + | CoercibleStmnt CoercibleFilter deriving (Eq) -data TypedFilter = TypedFilter - { typedField :: CoercibleField - , typedOpExpr :: OpExpr +data CoercibleFilter = CoercibleFilter + { field :: CoercibleField + , opExpr :: OpExpr } - | TypedFilterNullEmbed Bool FieldName + | CoercibleFilterNullEmbed Bool FieldName deriving (Eq) diff --git a/src/PostgREST/Query/SqlFragment.hs b/src/PostgREST/Query/SqlFragment.hs index b772e628de..830240d83e 100644 --- a/src/PostgREST/Query/SqlFragment.hs +++ b/src/PostgREST/Query/SqlFragment.hs @@ -74,8 +74,8 @@ import PostgREST.MediaType (MTPlanFormat (..), MTPlanOption (..)) import PostgREST.Plan.ReadPlan (JoinCondition (..)) import PostgREST.Plan.Types (CoercibleField (..), - TypedFilter (..), - TypedLogicTree (..), + CoercibleFilter (..), + CoercibleLogicTree (..), unknownField) import PostgREST.RangeQuery (NonnegRange, allRange, rangeLimit, rangeOffset) @@ -301,10 +301,10 @@ pgFmtArrayLiteralForField values CoercibleField{tfTransform=(Just parserProc)} = -- When no transformation is requested, use an array literal which should be simpler, maybe faster. pgFmtArrayLiteralForField values _ = unknownLiteral (pgBuildArrayLiteral values) -pgFmtFilter :: QualifiedIdentifier -> TypedFilter -> SQL.Snippet -pgFmtFilter _ (TypedFilterNullEmbed hasNot fld) = SQL.sql (pgFmtIdent fld) <> " IS " <> (if hasNot then "NOT" else mempty) <> " NULL" -pgFmtFilter _ (TypedFilter _ (NoOpExpr _)) = mempty -- TODO unreachable because NoOpExpr is filtered on QueryParams -pgFmtFilter table (TypedFilter fld (OpExpr hasNot oper)) = notOp <> " " <> case oper of +pgFmtFilter :: QualifiedIdentifier -> CoercibleFilter -> SQL.Snippet +pgFmtFilter _ (CoercibleFilterNullEmbed hasNot fld) = SQL.sql (pgFmtIdent fld) <> " IS " <> (if hasNot then "NOT" else mempty) <> " NULL" +pgFmtFilter _ (CoercibleFilter _ (NoOpExpr _)) = mempty -- TODO unreachable because NoOpExpr is filtered on QueryParams +pgFmtFilter table (CoercibleFilter fld (OpExpr hasNot oper)) = notOp <> " " <> case oper of Op op val -> pgFmtFieldOp op <> " " <> case op of OpLike -> unknownLiteral (T.map star val) OpILike -> unknownLiteral (T.map star val) @@ -340,14 +340,14 @@ pgFmtJoinCondition :: JoinCondition -> SQL.Snippet pgFmtJoinCondition (JoinCondition (qi1, col1) (qi2, col2)) = SQL.sql $ pgFmtColumn qi1 col1 <> " = " <> pgFmtColumn qi2 col2 -pgFmtLogicTree :: QualifiedIdentifier -> TypedLogicTree -> SQL.Snippet -pgFmtLogicTree qi (TypedExpr hasNot op forest) = SQL.sql notOp <> " (" <> intercalateSnippet (opSql op) (pgFmtLogicTree qi <$> forest) <> ")" +pgFmtLogicTree :: QualifiedIdentifier -> CoercibleLogicTree -> SQL.Snippet +pgFmtLogicTree qi (CoercibleExpr hasNot op forest) = SQL.sql notOp <> " (" <> intercalateSnippet (opSql op) (pgFmtLogicTree qi <$> forest) <> ")" where notOp = if hasNot then "NOT" else mempty opSql And = " AND " opSql Or = " OR " -pgFmtLogicTree qi (TypedStmnt flt) = pgFmtFilter qi flt +pgFmtLogicTree qi (CoercibleStmnt flt) = pgFmtFilter qi flt pgFmtJsonPath :: JsonPath -> SQL.Snippet pgFmtJsonPath = \case From b32418b6010b7f18e454094a2745ae0f0980a6f6 Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Mon, 23 Jan 2023 13:31:05 +0000 Subject: [PATCH 03/11] New: unit tests for more data representation use cases; helpful as examples as well. --- test/spec/Feature/Query/InsertSpec.hs | 6 +-- test/spec/Feature/Query/QuerySpec.hs | 24 +++++----- test/spec/Feature/Query/UpdateSpec.hs | 20 ++++---- test/spec/fixtures/data.sql | 4 +- test/spec/fixtures/schema.sql | 68 ++++++++++++++++++++++++++- 5 files changed, 92 insertions(+), 30 deletions(-) diff --git a/test/spec/Feature/Query/InsertSpec.hs b/test/spec/Feature/Query/InsertSpec.hs index 1ea0ad8266..6c7ad07db6 100644 --- a/test/spec/Feature/Query/InsertSpec.hs +++ b/test/spec/Feature/Query/InsertSpec.hs @@ -659,7 +659,7 @@ spec actualPgVersion = do , "Content-Range" <:> "*/*" ] } - -- Data representations for payload parsing requires Postgrest 10 or above. + -- Data representations for payload parsing requires Postgres 10 or above. when (actualPgVersion >= pgVersion100) $ do describe "Data representations" $ do context "on regular table" $ do @@ -688,9 +688,9 @@ spec actualPgVersion = do it "parses values in POST body and formats values in return=representation" $ request methodPost "/datarep_todos" [("Prefer", "return=representation")] - [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] + [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00", "icon_image": "3q2+7w", "created_at":-15, "budget": "-100000000000000.13"} |] `shouldRespondWith` - [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00+00"}] |] + [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00+00", "icon_image": "3q2+7w==", "created_at":-15, "budget": "-100000000000000.13"}] |] { matchStatus = 201 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "*/*"] diff --git a/test/spec/Feature/Query/QuerySpec.hs b/test/spec/Feature/Query/QuerySpec.hs index 690550c937..1f99963962 100644 --- a/test/spec/Feature/Query/QuerySpec.hs +++ b/test/spec/Feature/Query/QuerySpec.hs @@ -1317,25 +1317,25 @@ spec actualPgVersion = do it "formats star select" $ get "/datarep_todos?select=*&id=lt.4" `shouldRespondWith` [json| [ - {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, - {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, - {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"} ] |] { matchHeaders = [matchContentTypeJson] } it "formats implicit star select" $ get "/datarep_todos?id=lt.4" `shouldRespondWith` [json| [ - {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, - {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, - {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"} ] |] { matchHeaders = [matchContentTypeJson] } it "formats star and explicit mix" $ get "/datarep_todos?select=due_at,*&id=lt.4" `shouldRespondWith` [json| [ - {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}, - {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}, - {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"} + {"due_at":"2018-01-02T00:00:00+00","id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, + {"due_at":"2018-01-03T00:00:00+00","id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, + {"due_at":"2018-01-01T14:12:34.123456+00","id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"} ] |] { matchHeaders = [matchContentTypeJson] } it "formats through join" $ @@ -1345,10 +1345,8 @@ spec actualPgVersion = do it "formats through join with star select" $ get "/datarep_next_two_todos?select=id,name,second_item:datarep_todos!datarep_next_two_todos_second_item_id_fkey(*)" `shouldRespondWith` [json| [ - {"id":1,"name":"school related","second_item": - {"id": 3, "name": "Algebra", "label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}}, - {"id":2,"name":"do these first","second_item": - {"id": 3, "name": "Algebra", "label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}} + {"id":1,"name":"school related","second_item":{"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"}}, + {"id":2,"name":"do these first","second_item":{"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"}} ] |] { matchHeaders = [matchContentTypeJson] } it "uses text parser on value for filter given through query parameters" $ diff --git a/test/spec/Feature/Query/UpdateSpec.hs b/test/spec/Feature/Query/UpdateSpec.hs index 16840b1d04..429175aab6 100644 --- a/test/spec/Feature/Query/UpdateSpec.hs +++ b/test/spec/Feature/Query/UpdateSpec.hs @@ -547,7 +547,7 @@ spec actualPgVersion = do , { "id": 3, "name": "item-3" } ]|] - -- Data representations for payload parsing requires Postgrest 10 or above. + -- Data representations for payload parsing requires Postgres 10 or above. when (actualPgVersion >= pgVersion100) $ do describe "Data representations" $ do context "for a single row" $ do @@ -573,9 +573,9 @@ spec actualPgVersion = do it "parses values in payload and formats values in return=representation" $ request methodPatch "/datarep_todos?id=eq.2" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20+00", "icon_image": "3q2+7w"} |] `shouldRespondWith` - [json| [{"id":2, "name": "Essay", "label_color": "#221100", "due_at":"2019-01-03T11:00:20+00"}] |] + [json| [{"id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:20+00","icon_image":"3q2+7w==","created_at":1513213350,"budget":"100000000000000.13"}] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "0-0/*"] @@ -583,10 +583,10 @@ spec actualPgVersion = do it "parses values in payload and formats star mixed selected values in return=representation" $ request methodPatch "/datarep_todos?id=eq.2&select=due_at,*" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00", "created_at": 0} |] `shouldRespondWith` -- end up with due_at twice here but that's unrelated to data reps - [json| [{"due_at":"2019-01-03T11:00:00+00", "id":2, "name":"Essay", "label_color":"#221100", "due_at":"2019-01-03T11:00:00+00"}] |] + [json| [{"due_at":"2019-01-03T11:00:00+00","id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:00+00","icon_image":null,"created_at":0,"budget":"100000000000000.13"}] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "0-0/*"] @@ -608,12 +608,12 @@ spec actualPgVersion = do it "parses values in payload and formats values in return=representation" $ request methodPatch "/datarep_todos?id=lt.4" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00", "icon_image": "3q2+7w="} |] `shouldRespondWith` [json| [ - {"id":1, "name": "Report", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"}, - {"id":2, "name": "Essay", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"}, - {"id":3, "name": "Algebra", "label_color": "#221100", "due_at":"2019-01-03T11:00:00+00"} + {"id":1,"name":"Report","label_color":"#221100","due_at":"2019-01-03T11:00:00+00","icon_image":"3q2+7w==","created_at":1513213350,"budget":"12.50"}, + {"id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:00+00","icon_image":"3q2+7w==","created_at":1513213350,"budget":"100000000000000.13"}, + {"id":3,"name":"Algebra","label_color":"#221100","due_at":"2019-01-03T11:00:00+00","icon_image":"3q2+7w==","created_at":1513213350,"budget":"0.00"} ] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", @@ -625,7 +625,7 @@ spec actualPgVersion = do [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] `shouldRespondWith` [json| [ - {"id":2, "name": "Essay", "label_color": "#000100", "due_at":"2019-01-03T11:00:00+00"} + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2019-01-03T11:00:00+00","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"} ] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", diff --git a/test/spec/fixtures/data.sql b/test/spec/fixtures/data.sql index 501beac467..22b5a2fdf4 100644 --- a/test/spec/fixtures/data.sql +++ b/test/spec/fixtures/data.sql @@ -840,8 +840,8 @@ TRUNCATE TABLE subscriptions CASCADE; INSERT INTO subscriptions(subscriber,subscribed) VALUES (3,1), (4,1), (1,2); TRUNCATE TABLE datarep_todos CASCADE; -INSERT INTO datarep_todos VALUES (1, 'Report', 0, '2018-01-02'); -INSERT INTO datarep_todos VALUES (2, 'Essay', 256, '2018-01-03'); +INSERT INTO datarep_todos VALUES (1, 'Report', 0, '2018-01-02', '\x89504e470d0a1a0a0000000d4948445200000001000000010100000000376ef924000000001049444154789c62600100000000ffff03000000060005057bfabd400000000049454e44ae426082', '2017-12-14 01:02:30', 12.50); -- smallest possible PNG +INSERT INTO datarep_todos VALUES (2, 'Essay', 256, '2018-01-03', NULL, '2017-12-14 01:02:30', 100000000000000.13); -- a number which can't be represented by a 64-bit float INSERT INTO datarep_todos VALUES (3, 'Algebra', 123456, '2018-01-01 14:12:34.123456'); INSERT INTO datarep_todos VALUES (4, 'Opus Magnum', NULL, NULL); diff --git a/test/spec/fixtures/schema.sql b/test/spec/fixtures/schema.sql index 6711ce732b..916312bc09 100644 --- a/test/spec/fixtures/schema.sql +++ b/test/spec/fixtures/schema.sql @@ -3114,7 +3114,7 @@ create table test.subscriptions( primary key(subscriber, subscribed) ); --- For formatting output and parsing input of types with custom API representations. +-- Data representations feature DROP DOMAIN IF EXISTS public.color CASCADE; CREATE DOMAIN public.color AS INTEGER CHECK (VALUE >= 0 AND VALUE <= 16777215); @@ -3157,11 +3157,75 @@ CREATE CAST (json AS public.isodate) WITH FUNCTION isodate(json) AS IMPLICIT; -- We intentionally don't have this in order to test query string parsing doesn't try to fall back on JSON parsing. -- CREATE CAST (text AS public.isodate) WITH FUNCTION isodate(text) AS IMPLICIT; +-- bytea_b64 is a base64-encoded binary string +DROP DOMAIN IF EXISTS public.bytea_b64 CASCADE; +CREATE DOMAIN public.bytea_b64 AS bytea; + +CREATE OR REPLACE FUNCTION bytea_b64(json) RETURNS public.bytea_b64 AS $$ + SELECT bytea_b64($1 #>> '{}'); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION bytea_b64(text) RETURNS public.bytea_b64 AS $$ + -- allow unpadded base64 + SELECT decode($1 || repeat('=', 4 - (length($1) % 4)), 'base64')::public.bytea_b64; +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION json(public.bytea_b64) RETURNS json AS $$ + SELECT to_json(translate(encode($1, 'base64'), E'\n', '')); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE CAST (public.bytea_b64 AS json) WITH FUNCTION json(public.bytea_b64) AS IMPLICIT; +CREATE CAST (json AS public.bytea_b64) WITH FUNCTION bytea_b64(json) AS IMPLICIT; +CREATE CAST (text AS public.bytea_b64) WITH FUNCTION bytea_b64(text) AS IMPLICIT; + +-- unixtz is a timestamptz represented as an integer number of seconds since the Unix epoch +DROP DOMAIN IF EXISTS public.unixtz CASCADE; +CREATE DOMAIN public.unixtz AS timestamp with time zone; + +CREATE OR REPLACE FUNCTION unixtz(json) RETURNS public.unixtz AS $$ + SELECT unixtz($1 #>> '{}'); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION unixtz(text) RETURNS public.unixtz AS $$ + SELECT (to_timestamp($1::numeric)::public.unixtz); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION json(public.unixtz) RETURNS json AS $$ + SELECT to_json(extract(epoch from $1)::bigint); +$$ LANGUAGE SQL IMMUTABLE; + + +CREATE CAST (public.unixtz AS json) WITH FUNCTION json(public.unixtz) AS IMPLICIT; +CREATE CAST (json AS public.unixtz) WITH FUNCTION unixtz(json) AS IMPLICIT; +CREATE CAST (text AS public.unixtz) WITH FUNCTION unixtz(text) AS IMPLICIT; + +DROP DOMAIN IF EXISTS public.monetary CASCADE; +CREATE DOMAIN public.monetary AS numeric(17,2); + +CREATE OR REPLACE FUNCTION monetary(json) RETURNS public.monetary AS $$ + SELECT monetary($1 #>> '{}'); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION monetary(text) RETURNS public.monetary AS $$ + SELECT ($1::numeric)::public.monetary; +$$ LANGUAGE SQL IMMUTABLE; + +CREATE OR REPLACE FUNCTION json(public.monetary) RETURNS json AS $$ + SELECT to_json($1::text); +$$ LANGUAGE SQL IMMUTABLE; + +CREATE CAST (public.monetary AS json) WITH FUNCTION json(public.monetary) AS IMPLICIT; +CREATE CAST (json AS public.monetary) WITH FUNCTION monetary(json) AS IMPLICIT; +CREATE CAST (text AS public.monetary) WITH FUNCTION monetary(text) AS IMPLICIT; + CREATE TABLE datarep_todos ( id bigint primary key, name text, label_color public.color default 0, - due_at public.isodate default '2018-01-01'::date + due_at public.isodate default '2018-01-01'::date, + icon_image public.bytea_b64, + created_at public.unixtz default '2017-12-14 01:02:30'::timestamptz, + budget public.monetary default 0 ); CREATE TABLE datarep_next_two_todos ( From 8fb733ae351fb72fd4aad4ab7ae06d90e929ab4d Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Mon, 23 Jan 2023 13:31:35 +0000 Subject: [PATCH 04/11] New: update CHANGELOG with data representations feature description. --- CHANGELOG.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee0465174f..ec3a673e7d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,21 @@ This project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased + - #2523, Data representations - @aljungberg + + Allows for flexible API output formatting and input parsing on a per-column type basis using regular SQL functions configured in the database + + Enables greater flexibility in the form and shape of your APIs, both for output and input, making PostgREST a more versatile general-purpose API server + + Examples include base64 encode/decode your binary data (like a `bytea` column containing an image), choose whether to present a timestamp column as seconds since the Unix epoch or as an ISO 8601 string, or represent fixed precision decimals as strings, not doubles, to preserve precision + + ...and accept the same in `POST/PUT/PATCH` by configuring the reverse transformation(s) + + Other use-cases include custom representation of enums, arrays, nested objects, CSS hex colour strings, gzip compressed fields, metric to imperial conversions, and much more + + Works when using the `select` parameter to select only a subset of columns, embedding through complex joins, renaming fields, with views and computed columns + + Works when filtering on a formatted column without extra indexes by parsing to the canonical representation + + Works for data `RETURNING` operations, such as requesting the full body in a POST/PUT/PATCH with `Prefer: return=representation` + + Works for batch updates and inserts + + Completely optional, define the functions in the database and they will be used automatically everywhere + + Data representations preserve the ability to write to the original column and require no extra storage or complex triggers (compared to using `GENERATED ALWAYS` columns) + + Note: data representations require Postgres 10 (Postgres 11 if using `IN` predicates); data representations are not implemented for RPC + - #2622, Consider any PostgreSQL authentication failure as fatal and exit immediately - @michivi + ### Added - #1414, Add related orders - @steve-chavez @@ -329,7 +344,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). ### Deprecated -- #1348, Deprecate `.` symbol for disambiguating resource embedding(added in #918). The url-safe '!' should be used instead. We refrained from using `+` as part of our syntax because it conflicts with some http clients and proxies. ++- #1348, Deprecate `.` symbol for disambiguating resource embedding(added in #918). The url-safe '!' should be used instead. We refrained from using `+` as part of our syntax because it conflicts with some http clients and proxies. ## [6.0.1] - 2019-07-30 From 1247c208ac5da4b7508180018ec486f8d00d99ba Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Mon, 23 Jan 2023 14:10:05 +0000 Subject: [PATCH 05/11] Fixed failing idempotence test. --- test/spec/Feature/Query/ComputedRelsSpec.hs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/test/spec/Feature/Query/ComputedRelsSpec.hs b/test/spec/Feature/Query/ComputedRelsSpec.hs index cba71e68b6..f7d85849bf 100644 --- a/test/spec/Feature/Query/ComputedRelsSpec.hs +++ b/test/spec/Feature/Query/ComputedRelsSpec.hs @@ -139,6 +139,11 @@ spec = describe "computed relationships" $ do [("Prefer", "tx=commit")] [json| {"name": "Sid Meier"} |] `shouldRespondWith` 204 + -- need to poke the second one too to prevent inherent ordering from changing + request methodPatch "/designers?id=eq.2" + [("Prefer", "tx=commit")] + [json| {"name": "Hironobu Sakaguchi"} |] + `shouldRespondWith` 204 it "works with self joins" $ get "/web_content?select=name,child_web_content(name),parent_web_content(name)&id=in.(0,1)" From 3e0ca039b4f38b30b27cbea4061a5b64df12fedc Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Thu, 2 Feb 2023 21:23:26 +0000 Subject: [PATCH 06/11] New: replace date formatter test with one that does something. --- test/spec/Feature/Query/InsertSpec.hs | 8 ++-- test/spec/Feature/Query/QuerySpec.hs | 45 +++++++++++---------- test/spec/Feature/Query/UpdateSpec.hs | 56 +++++++++++++-------------- test/spec/fixtures/schema.sql | 4 +- 4 files changed, 56 insertions(+), 57 deletions(-) diff --git a/test/spec/Feature/Query/InsertSpec.hs b/test/spec/Feature/Query/InsertSpec.hs index 6c7ad07db6..e6ae4d3647 100644 --- a/test/spec/Feature/Query/InsertSpec.hs +++ b/test/spec/Feature/Query/InsertSpec.hs @@ -690,7 +690,7 @@ spec actualPgVersion = do request methodPost "/datarep_todos" [("Prefer", "return=representation")] [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00", "icon_image": "3q2+7w", "created_at":-15, "budget": "-100000000000000.13"} |] `shouldRespondWith` - [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00+00", "icon_image": "3q2+7w==", "created_at":-15, "budget": "-100000000000000.13"}] |] + [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00Z", "icon_image": "3q2+7w==", "created_at":-15, "budget": "-100000000000000.13"}] |] { matchStatus = 201 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "*/*"] @@ -701,7 +701,7 @@ spec actualPgVersion = do request methodPost "/datarep_todos?columns=id,label_color&select=id,name,label_color,due_at" [("Prefer", "return=representation")] [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "invalid but should be ignored"} |] `shouldRespondWith` - [json| [{"id":5, "name":null, "label_color": "#001100", "due_at": "2018-01-01T00:00:00+00"}] |] + [json| [{"id":5, "name":null, "label_color": "#001100", "due_at": "2018-01-01T00:00:00Z"}] |] { matchStatus = 201 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "*/*"] @@ -744,7 +744,7 @@ spec actualPgVersion = do request methodPost "/datarep_todos_computed" [("Prefer", "return=representation")] [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "2018-01-03T11:00:00+00"} |] `shouldRespondWith` - [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00+00", "dark_color":"#000880"}] |] + [json| [{"id":5,"name": "party", "label_color": "#001100", "due_at":"2018-01-03T11:00:00Z", "dark_color":"#000880"}] |] { matchStatus = 201 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "*/*"] @@ -755,7 +755,7 @@ spec actualPgVersion = do request methodPost "/datarep_todos_computed?columns=id,label_color&select=id,name,label_color,due_at" [("Prefer", "return=representation")] [json| {"id":5, "name": "party", "label_color": "#001100", "due_at": "invalid but should be ignored"} |] `shouldRespondWith` - [json| [{"id":5, "name":null, "label_color": "#001100", "due_at": "2018-01-01T00:00:00+00"}] |] + [json| [{"id":5, "name":null, "label_color": "#001100", "due_at": "2018-01-01T00:00:00Z"}] |] { matchStatus = 201 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "*/*"] diff --git a/test/spec/Feature/Query/QuerySpec.hs b/test/spec/Feature/Query/QuerySpec.hs index 1f99963962..86059df37f 100644 --- a/test/spec/Feature/Query/QuerySpec.hs +++ b/test/spec/Feature/Query/QuerySpec.hs @@ -1284,7 +1284,7 @@ spec actualPgVersion = do { matchHeaders = [matchContentTypeJson] } it "formats two columns with different formatters" $ get "/datarep_todos?select=id,label_color,due_at&id=lt.4" `shouldRespondWith` - [json| [{"id":1,"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"},{"id":2,"label_color":"#000100","due_at":"2018-01-03T00:00:00+00"},{"id":3,"label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"}] |] + [json| [{"id":1,"label_color":"#000000","due_at":"2018-01-02T00:00:00Z"},{"id":2,"label_color":"#000100","due_at":"2018-01-03T00:00:00Z"},{"id":3,"label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456Z"}] |] { matchHeaders = [matchContentTypeJson] } it "fails in some reasonable way when selecting fields that don't exist" $ get "/datarep_todos?select=id,label_color,banana" `shouldRespondWith` @@ -1312,52 +1312,51 @@ spec actualPgVersion = do it "formats nulls" $ -- due_at is formatted as NULL but label_color NULLs become empty strings-- it's up to the formatting function. get "/datarep_todos?select=id,label_color,due_at&id=gt.2&id=lt.5" `shouldRespondWith` - [json| [{"id":3,"label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00"},{"id":4,"label_color":"","due_at":null}] |] + [json| [{"id":3,"label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456Z"},{"id":4,"label_color":"","due_at":null}] |] { matchHeaders = [matchContentTypeJson] } it "formats star select" $ get "/datarep_todos?select=*&id=lt.4" `shouldRespondWith` [json| [ - {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, - {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, - {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"} + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00Z","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00Z","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456Z","icon_image":null,"created_at":1513213350,"budget":"0.00"} ] |] { matchHeaders = [matchContentTypeJson] } it "formats implicit star select" $ get "/datarep_todos?id=lt.4" `shouldRespondWith` [json| [ - {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, - {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, - {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"} + {"id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00Z","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00Z","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, + {"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456Z","icon_image":null,"created_at":1513213350,"budget":"0.00"} ] |] { matchHeaders = [matchContentTypeJson] } it "formats star and explicit mix" $ get "/datarep_todos?select=due_at,*&id=lt.4" `shouldRespondWith` [json| [ - {"due_at":"2018-01-02T00:00:00+00","id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00+00","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, - {"due_at":"2018-01-03T00:00:00+00","id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00+00","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, - {"due_at":"2018-01-01T14:12:34.123456+00","id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"} + {"due_at":"2018-01-02T00:00:00Z","id":1,"name":"Report","label_color":"#000000","due_at":"2018-01-02T00:00:00Z","icon_image":"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQAAAAA3bvkkAAAAABBJREFUeJxiYAEAAAAA//8DAAAABgAFBXv6vUAAAAAASUVORK5CYII=","created_at":1513213350,"budget":"12.50"}, + {"due_at":"2018-01-03T00:00:00Z","id":2,"name":"Essay","label_color":"#000100","due_at":"2018-01-03T00:00:00Z","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"}, + {"due_at":"2018-01-01T14:12:34.123456Z","id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456Z","icon_image":null,"created_at":1513213350,"budget":"0.00"} ] |] { matchHeaders = [matchContentTypeJson] } it "formats through join" $ get "/datarep_next_two_todos?select=id,name,first_item:datarep_todos!datarep_next_two_todos_first_item_id_fkey(label_color,due_at)" `shouldRespondWith` - [json| [{"id":1,"name":"school related","first_item":{"label_color":"#000100","due_at":"2018-01-03T00:00:00+00"}},{"id":2,"name":"do these first","first_item":{"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}}] |] + [json| [{"id":1,"name":"school related","first_item":{"label_color":"#000100","due_at":"2018-01-03T00:00:00Z"}},{"id":2,"name":"do these first","first_item":{"label_color":"#000000","due_at":"2018-01-02T00:00:00Z"}}] |] { matchHeaders = [matchContentTypeJson] } it "formats through join with star select" $ get "/datarep_next_two_todos?select=id,name,second_item:datarep_todos!datarep_next_two_todos_second_item_id_fkey(*)" `shouldRespondWith` [json| [ - {"id":1,"name":"school related","second_item":{"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"}}, - {"id":2,"name":"do these first","second_item":{"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456+00","icon_image":null,"created_at":1513213350,"budget":"0.00"}} + {"id":1,"name":"school related","second_item":{"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456Z","icon_image":null,"created_at":1513213350,"budget":"0.00"}}, + {"id":2,"name":"do these first","second_item":{"id":3,"name":"Algebra","label_color":"#01E240","due_at":"2018-01-01T14:12:34.123456Z","icon_image":null,"created_at":1513213350,"budget":"0.00"}} ] |] { matchHeaders = [matchContentTypeJson] } it "uses text parser on value for filter given through query parameters" $ get "/datarep_todos?select=id,due_at&label_color=eq.000100" `shouldRespondWith` - [json| [{"id":2,"due_at":"2018-01-03T00:00:00+00"}] |] + [json| [{"id":2,"due_at":"2018-01-03T00:00:00Z"}] |] { matchHeaders = [matchContentTypeJson] } it "in the absense of text parser, does not try to use the JSON parser for query parameters" $ - get "/datarep_todos?select=id,due_at&due_at=eq.T" `shouldRespondWith` - -- okay this test is a bit of a hack but we prove the parser is not used because it'd replace the T and fail a - -- different way. - [json| {"code":"22007","details":null,"hint":null,"message":"invalid input syntax for type timestamp with time zone: \"T\""} |] + get "/datarep_todos?select=id,due_at&due_at=eq.Z" `shouldRespondWith` + -- we prove the parser is not used because it'd replace the Z with `+00:00` and a different error message. + [json| {"code":"22007","details":null,"hint":null,"message":"invalid input syntax for type timestamp with time zone: \"Z\""} |] { matchStatus = 400 , matchHeaders = [matchContentTypeJson] } @@ -1367,19 +1366,19 @@ spec actualPgVersion = do it "uses text parser for filter with 'IN' predicates" $ get "/datarep_todos?select=id,due_at&label_color=in.(000100,01E240)" `shouldRespondWith` [json| [ - {"id":2, "due_at": "2018-01-03T00:00:00+00"}, - {"id":3, "due_at": "2018-01-01T14:12:34.123456+00"} + {"id":2, "due_at": "2018-01-03T00:00:00Z"}, + {"id":3, "due_at": "2018-01-01T14:12:34.123456Z"} ] |] { matchHeaders = [matchContentTypeJson] } it "uses text parser for filter with 'NOT IN' predicates" $ get "/datarep_todos?select=id,due_at&label_color=not.in.(000000,01E240)" `shouldRespondWith` [json| [ - {"id":2, "due_at": "2018-01-03T00:00:00+00"} + {"id":2, "due_at": "2018-01-03T00:00:00Z"} ] |] { matchHeaders = [matchContentTypeJson] } it "uses text parser on value for filter across relations" $ get "/datarep_next_two_todos?select=id,name,datarep_todos!datarep_next_two_todos_first_item_id_fkey(label_color,due_at)&datarep_todos.label_color=neq.000100" `shouldRespondWith` - [json| [{"id":1,"name":"school related","datarep_todos":null},{"id":2,"name":"do these first","datarep_todos":{"label_color":"#000000","due_at":"2018-01-02T00:00:00+00"}}] |] + [json| [{"id":1,"name":"school related","datarep_todos":null},{"id":2,"name":"do these first","datarep_todos":{"label_color":"#000000","due_at":"2018-01-02T00:00:00Z"}}] |] { matchHeaders = [matchContentTypeJson] } -- This is not supported by data reps (would be hard to make it work with high performance). So the test just -- verifies we don't panic or add inappropriate SQL to the filters. diff --git a/test/spec/Feature/Query/UpdateSpec.hs b/test/spec/Feature/Query/UpdateSpec.hs index 429175aab6..0fafd9451a 100644 --- a/test/spec/Feature/Query/UpdateSpec.hs +++ b/test/spec/Feature/Query/UpdateSpec.hs @@ -553,7 +553,7 @@ spec actualPgVersion = do context "for a single row" $ do it "parses values in payload" $ request methodPatch "/datarep_todos?id=eq.2" [("Prefer", "return=headers-only")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z"} |] `shouldRespondWith` "" { matchStatus = 204 @@ -563,7 +563,7 @@ spec actualPgVersion = do it "parses values in payload and formats individually selected values in return=representation" $ request methodPatch "/datarep_todos?id=eq.2&select=id,label_color" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z"} |] `shouldRespondWith` [json| [{"id":2, "label_color": "#221100"}] |] { matchStatus = 200 @@ -573,9 +573,9 @@ spec actualPgVersion = do it "parses values in payload and formats values in return=representation" $ request methodPatch "/datarep_todos?id=eq.2" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20+00", "icon_image": "3q2+7w"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20Z", "icon_image": "3q2+7w"} |] `shouldRespondWith` - [json| [{"id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:20+00","icon_image":"3q2+7w==","created_at":1513213350,"budget":"100000000000000.13"}] |] + [json| [{"id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:20Z","icon_image":"3q2+7w==","created_at":1513213350,"budget":"100000000000000.13"}] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "0-0/*"] @@ -583,10 +583,10 @@ spec actualPgVersion = do it "parses values in payload and formats star mixed selected values in return=representation" $ request methodPatch "/datarep_todos?id=eq.2&select=due_at,*" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00", "created_at": 0} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z", "created_at": 0} |] `shouldRespondWith` -- end up with due_at twice here but that's unrelated to data reps - [json| [{"due_at":"2019-01-03T11:00:00+00","id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:00+00","icon_image":null,"created_at":0,"budget":"100000000000000.13"}] |] + [json| [{"due_at":"2019-01-03T11:00:00Z","id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:00Z","icon_image":null,"created_at":0,"budget":"100000000000000.13"}] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "0-0/*"] @@ -594,7 +594,7 @@ spec actualPgVersion = do context "for multiple rows" $ do it "parses values in payload and formats individually selected values in return=representation" $ request methodPatch "/datarep_todos?id=lt.4&select=id,name,label_color" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z"} |] `shouldRespondWith` [json| [ {"id":1, "name": "Report", "label_color": "#221100"}, @@ -608,12 +608,12 @@ spec actualPgVersion = do it "parses values in payload and formats values in return=representation" $ request methodPatch "/datarep_todos?id=lt.4" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00", "icon_image": "3q2+7w="} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z", "icon_image": "3q2+7w="} |] `shouldRespondWith` [json| [ - {"id":1,"name":"Report","label_color":"#221100","due_at":"2019-01-03T11:00:00+00","icon_image":"3q2+7w==","created_at":1513213350,"budget":"12.50"}, - {"id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:00+00","icon_image":"3q2+7w==","created_at":1513213350,"budget":"100000000000000.13"}, - {"id":3,"name":"Algebra","label_color":"#221100","due_at":"2019-01-03T11:00:00+00","icon_image":"3q2+7w==","created_at":1513213350,"budget":"0.00"} + {"id":1,"name":"Report","label_color":"#221100","due_at":"2019-01-03T11:00:00Z","icon_image":"3q2+7w==","created_at":1513213350,"budget":"12.50"}, + {"id":2,"name":"Essay","label_color":"#221100","due_at":"2019-01-03T11:00:00Z","icon_image":"3q2+7w==","created_at":1513213350,"budget":"100000000000000.13"}, + {"id":3,"name":"Algebra","label_color":"#221100","due_at":"2019-01-03T11:00:00Z","icon_image":"3q2+7w==","created_at":1513213350,"budget":"0.00"} ] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", @@ -622,10 +622,10 @@ spec actualPgVersion = do context "with ?columns parameter" $ do it "ignores json keys not included in ?columns; parses only the ones specified" $ request methodPatch "/datarep_todos?id=eq.2&columns=due_at" [("Prefer", "return=representation")] - [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + [json| {"due_at": "2019-01-03T11:00:00Z", "smth": "here", "label_color": "invalid", "fake_id": 13} |] `shouldRespondWith` [json| [ - {"id":2,"name":"Essay","label_color":"#000100","due_at":"2019-01-03T11:00:00+00","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"} + {"id":2,"name":"Essay","label_color":"#000100","due_at":"2019-01-03T11:00:00Z","icon_image":null,"created_at":1513213350,"budget":"100000000000000.13"} ] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", @@ -634,7 +634,7 @@ spec actualPgVersion = do it "fails if at least one specified column doesn't exist" $ request methodPatch "/datarep_todos?id=eq.2&columns=label_color,helicopters" [("Prefer", "return=representation")] - [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + [json| {"due_at": "2019-01-03T11:00:00Z", "smth": "here", "label_color": "invalid", "fake_id": 13} |] `shouldRespondWith` [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos' does not exist","details":null,"hint":null} |] { matchStatus = 400 @@ -643,13 +643,13 @@ spec actualPgVersion = do it "ignores json keys and gives 200 if no record updated" $ request methodPatch "/datarep_todos?id=eq.2001&columns=label_color" [("Prefer", "return=representation")] - [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + [json| {"due_at": "2019-01-03T11:00:00Z", "smth": "here", "label_color": "invalid", "fake_id": 13} |] `shouldRespondWith` 200 context "on a view" $ do context "for a single row" $ do it "parses values in payload" $ request methodPatch "/datarep_todos_computed?id=eq.2" [("Prefer", "return=headers-only")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z"} |] `shouldRespondWith` "" { matchStatus = 204 @@ -659,7 +659,7 @@ spec actualPgVersion = do it "parses values in payload and formats individually selected values in return=representation" $ request methodPatch "/datarep_todos_computed?id=eq.2&select=id,label_color" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z"} |] `shouldRespondWith` [json| [{"id":2, "label_color": "#221100"}] |] { matchStatus = 200 @@ -669,9 +669,9 @@ spec actualPgVersion = do it "parses values in payload and formats values in return=representation" $ request methodPatch "/datarep_todos_computed?id=eq.2" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:20Z"} |] `shouldRespondWith` - [json| [{"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:20+00"}] |] + [json| [{"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:20Z"}] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", "Content-Range" <:> "0-0/*"] @@ -679,7 +679,7 @@ spec actualPgVersion = do context "for multiple rows" $ do it "parses values in payload and formats individually selected values in return=representation" $ request methodPatch "/datarep_todos_computed?id=lt.4&select=id,name,label_color,dark_color" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z"} |] `shouldRespondWith` [json| [ {"id":1, "name": "Report", "label_color": "#221100", "dark_color":"#110880"}, @@ -693,12 +693,12 @@ spec actualPgVersion = do it "parses values in payload and formats values in return=representation" $ request methodPatch "/datarep_todos_computed?id=lt.4" [("Prefer", "return=representation")] - [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00+00"} |] + [json| {"label_color": "#221100", "due_at": "2019-01-03T11:00:00Z"} |] `shouldRespondWith` [json| [ - {"id":1, "name": "Report", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"}, - {"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"}, - {"id":3, "name": "Algebra", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00+00"} + {"id":1, "name": "Report", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00Z"}, + {"id":2, "name": "Essay", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00Z"}, + {"id":3, "name": "Algebra", "label_color": "#221100", "dark_color":"#110880", "due_at":"2019-01-03T11:00:00Z"} ] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", @@ -707,10 +707,10 @@ spec actualPgVersion = do context "with ?columns parameter" $ do it "ignores json keys not included in ?columns; parses only the ones specified" $ request methodPatch "/datarep_todos_computed?id=eq.2&columns=due_at" [("Prefer", "return=representation")] - [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + [json| {"due_at": "2019-01-03T11:00:00Z", "smth": "here", "label_color": "invalid", "fake_id": 13} |] `shouldRespondWith` [json| [ - {"id":2, "name": "Essay", "label_color": "#000100", "dark_color": "#000080", "due_at":"2019-01-03T11:00:00+00"} + {"id":2, "name": "Essay", "label_color": "#000100", "dark_color": "#000080", "due_at":"2019-01-03T11:00:00Z"} ] |] { matchStatus = 200 , matchHeaders = ["Content-Type" <:> "application/json; charset=utf-8", @@ -719,7 +719,7 @@ spec actualPgVersion = do it "fails if at least one specified column doesn't exist" $ request methodPatch "/datarep_todos_computed?id=eq.2&columns=label_color,helicopters" [("Prefer", "return=representation")] - [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + [json| {"due_at": "2019-01-03T11:00:00Z", "smth": "here", "label_color": "invalid", "fake_id": 13} |] `shouldRespondWith` [json| {"code":"PGRST118","message":"Column 'helicopters' of relation 'datarep_todos_computed' does not exist","details":null,"hint":null} |] { matchStatus = 400 @@ -728,5 +728,5 @@ spec actualPgVersion = do it "ignores json keys and gives 200 if no record updated" $ request methodPatch "/datarep_todos_computed?id=eq.2001&columns=label_color" [("Prefer", "return=representation")] - [json| {"due_at": "2019-01-03T11:00:00+00", "smth": "here", "label_color": "invalid", "fake_id": 13} |] + [json| {"due_at": "2019-01-03T11:00:00Z", "smth": "here", "label_color": "invalid", "fake_id": 13} |] `shouldRespondWith` 200 diff --git a/test/spec/fixtures/schema.sql b/test/spec/fixtures/schema.sql index 916312bc09..5ef0c4ed28 100644 --- a/test/spec/fixtures/schema.sql +++ b/test/spec/fixtures/schema.sql @@ -3145,11 +3145,11 @@ CREATE OR REPLACE FUNCTION isodate(json) RETURNS public.isodate AS $$ $$ LANGUAGE SQL IMMUTABLE; CREATE OR REPLACE FUNCTION isodate(text) RETURNS public.isodate AS $$ - SELECT (replace($1, 'T', ' ')::timestamp with time zone)::public.isodate; + SELECT (replace($1, 'Z', '+00:00')::timestamp with time zone)::public.isodate; $$ LANGUAGE SQL IMMUTABLE; CREATE OR REPLACE FUNCTION json(public.isodate) RETURNS json AS $$ - SELECT to_json(replace($1::text, ' ', 'T')); + SELECT to_json(replace(to_json($1)#>>'{}', '+00:00', 'Z')); $$ LANGUAGE SQL IMMUTABLE; CREATE CAST (public.isodate AS json) WITH FUNCTION json(public.isodate) AS IMPLICIT; From bf3d7882459a30ffc668ad0f6e64cc6d81ce8889 Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Fri, 3 Feb 2023 12:56:40 +0000 Subject: [PATCH 07/11] Fixup: inadvertent CHANGELOG change after rebase. --- CHANGELOG.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ec3a673e7d..8f87d28a75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,7 +18,6 @@ This project adheres to [Semantic Versioning](http://semver.org/). + Completely optional, define the functions in the database and they will be used automatically everywhere + Data representations preserve the ability to write to the original column and require no extra storage or complex triggers (compared to using `GENERATED ALWAYS` columns) + Note: data representations require Postgres 10 (Postgres 11 if using `IN` predicates); data representations are not implemented for RPC - - #2622, Consider any PostgreSQL authentication failure as fatal and exit immediately - @michivi ### Added @@ -344,7 +343,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). ### Deprecated -+- #1348, Deprecate `.` symbol for disambiguating resource embedding(added in #918). The url-safe '!' should be used instead. We refrained from using `+` as part of our syntax because it conflicts with some http clients and proxies. +- #1348, Deprecate `.` symbol for disambiguating resource embedding(added in #918). The url-safe '!' should be used instead. We refrained from using `+` as part of our syntax because it conflicts with some http clients and proxies. ## [6.0.1] - 2019-07-30 From 29a3351f3f6e6596934041686c3f01be9898b2fd Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Wed, 8 Feb 2023 11:23:52 +0000 Subject: [PATCH 08/11] Cleanup: `tfName` -> `cfName` and related. --- src/PostgREST/Plan.hs | 26 +++++++++++++------------- src/PostgREST/Plan/Types.hs | 8 ++++---- src/PostgREST/Query/QueryBuilder.hs | 8 ++++---- src/PostgREST/Query/SqlFragment.hs | 20 ++++++++++---------- 4 files changed, 31 insertions(+), 31 deletions(-) diff --git a/src/PostgREST/Plan.hs b/src/PostgREST/Plan.hs index 05b42917a9..a5ee3be19f 100644 --- a/src/PostgREST/Plan.hs +++ b/src/PostgREST/Plan.hs @@ -135,10 +135,10 @@ resolveTableField table (fieldName, []) = resolveTableFieldName table fieldName -- If the field is known and a JSON path is given, always assume the JSON type. But don't assume a type for entirely unknown fields. resolveTableField table (fieldName, jp) = case resolveTableFieldName table fieldName of - tf@CoercibleField{tfIRType=""} -> tf{tfJsonPath=jp} - tf -> tf{tfJsonPath=jp, tfIRType="json"} + cf@CoercibleField{cfIRType=""} -> cf{cfJsonPath=jp} + cf -> cf{cfJsonPath=jp, cfIRType="json"} --- | Resolve a type within the context based on the given field name and JSON path. Although there are situations where failure to resolve a field is considered an error (see `resolveOrError`), there are also situations where we allow it (RPC calls). If it should be an error and `resolveOrError` doesn't fit, ensure to check the `tfIRType` isn't empty. +-- | Resolve a type within the context based on the given field name and JSON path. Although there are situations where failure to resolve a field is considered an error (see `resolveOrError`), there are also situations where we allow it (RPC calls). If it should be an error and `resolveOrError` doesn't fit, ensure to check the `cfIRType` isn't empty. resolveTypeOrUnknown :: ResolverContext -> Field -> CoercibleField resolveTypeOrUnknown ResolverContext{..} field@(fn, jp) = fromMaybe (unknownField fn jp) $ HM.lookup qi tables >>= @@ -146,25 +146,25 @@ resolveTypeOrUnknown ResolverContext{..} field@(fn, jp) = -- | Install any pre-defined data representation from source to target to coerce this reference. -- --- Note that we change the IR type here. This might seem unintuitive. The short of it is that for a CoercibleField without a transformer, input type == output type. A transformer maps from a -> b, so by definition the input type will be a and the output type b after. And tfIRType is the *input* type. +-- Note that we change the IR type here. This might seem unintuitive. The short of it is that for a CoercibleField without a transformer, input type == output type. A transformer maps from a -> b, so by definition the input type will be a and the output type b after. And cfIRType is the *input* type. -- -- It might feel odd that once a transformer is added we 'forget' the target type (because now a /= b). You might also note there's no obvious way to stack transforms (even if there was a stack, you erased what type you're working with so it's awkward). Alas as satisfying as it would be to engineer a layered mapping system with full type information, we just don't need it. withTransformer :: ResolverContext -> Text -> Text -> CoercibleField -> CoercibleField withTransformer ResolverContext{representations} sourceType targetType field = fromMaybe field $ HM.lookup (sourceType, targetType) representations >>= - (\fieldRepresentation -> Just field{tfIRType=sourceType, tfTransform=Just (drFunction fieldRepresentation)}) + (\fieldRepresentation -> Just field{cfIRType=sourceType, cfTransform=Just (drFunction fieldRepresentation)}) -- | Map the intermediate representation type to the output type, if available. withOutputFormat :: ResolverContext -> CoercibleField -> CoercibleField -withOutputFormat ctx@ResolverContext{outputType} field@CoercibleField{tfIRType} = withTransformer ctx tfIRType outputType field +withOutputFormat ctx@ResolverContext{outputType} field@CoercibleField{cfIRType} = withTransformer ctx cfIRType outputType field -- | Map text into the intermediate representation type, if available. withTextParse :: ResolverContext -> CoercibleField -> CoercibleField -withTextParse ctx field@CoercibleField{tfIRType} = withTransformer ctx "text" tfIRType field +withTextParse ctx field@CoercibleField{cfIRType} = withTransformer ctx "text" cfIRType field -- | Map json into the intermediate representation type, if available. withJsonParse :: ResolverContext -> CoercibleField -> CoercibleField -withJsonParse ctx field@CoercibleField{tfIRType} = withTransformer ctx "json" tfIRType field +withJsonParse ctx field@CoercibleField{cfIRType} = withTransformer ctx "json" cfIRType field -- | Map the intermediate representation type to the output type defined by the resolver context (normally json), if available. resolveOutputField :: ResolverContext -> Field -> CoercibleField @@ -226,7 +226,7 @@ addDataRepresentationAliases rPlanTree = Right $ fmap (\rPlan@ReadPlan{select=se where aliasSelectItem :: (CoercibleField, Maybe Cast, Maybe Alias) -> (CoercibleField, Maybe Cast, Maybe Alias) -- If there already is an alias, don't overwrite it. - aliasSelectItem (fld@(CoercibleField{tfName=fieldName, tfTransform=(Just _)}), Nothing, Nothing) = (fld, Nothing, Just fieldName) + aliasSelectItem (fld@(CoercibleField{cfName=fieldName, cfTransform=(Just _)}), Nothing, Nothing) = (fld, Nothing, Just fieldName) aliasSelectItem fld = fld knownColumnsInContext :: ResolverContext -> [Column] @@ -249,7 +249,7 @@ expandStarsForDataRepresentations ctx@ResolverContext{qi} rPlanTree = Right $ fm expandStarsForTable :: ResolverContext -> ReadPlan -> ReadPlan expandStarsForTable ctx@ResolverContext{representations, outputType} rplan@ReadPlan{select=selectItems} = -- If we have a '*' select AND the target table has at least one data representation, expand. - if ("*" `elem` map (\(field, _, _) -> tfName field) selectItems) && any hasOutputRep knownColumns + if ("*" `elem` map (\(field, _, _) -> cfName field) selectItems) && any hasOutputRep knownColumns then rplan{select=concatMap (expandStarSelectItem knownColumns) selectItems} else rplan where @@ -259,7 +259,7 @@ expandStarsForTable ctx@ResolverContext{representations, outputType} rplan@ReadP hasOutputRep col = HM.member (colNominalType col, outputType) representations expandStarSelectItem :: [Column] -> (CoercibleField, Maybe Cast, Maybe Alias) -> [(CoercibleField, Maybe Cast, Maybe Alias)] - expandStarSelectItem columns (CoercibleField{tfName="*", tfJsonPath=[]}, b, c) = map (\col -> (withOutputFormat ctx $ resolveColumnField col, b, c)) columns + expandStarSelectItem columns (CoercibleField{cfName="*", cfJsonPath=[]}, b, c) = map (\col -> (withOutputFormat ctx $ resolveColumnField col, b, c)) columns expandStarSelectItem _ selectItem = [selectItem] -- | Enforces the `max-rows` config on the result @@ -572,7 +572,7 @@ resolveOrError :: ResolverContext -> Maybe Table -> FieldName -> Either ApiReque resolveOrError _ Nothing _ = Left NotFound resolveOrError ctx (Just table) field = case resolveTableFieldName table field of - CoercibleField{tfIRType=""} -> Left $ ColumnNotFound (tableName table) field + CoercibleField{cfIRType=""} -> Left $ ColumnNotFound (tableName table) field cf -> Right $ withJsonParse ctx cf callPlan :: ProcDescription -> ApiRequest -> ReadPlanTree -> CallPlan @@ -601,7 +601,7 @@ inferColsEmbedNeeds (Node ReadPlan{select} forest) pkCols | "*" `elem` fldNames = ["*"] | otherwise = returnings where - fldNames = tfName . (\(f, _, _) -> f) <$> select + fldNames = cfName . (\(f, _, _) -> f) <$> select -- Without fkCols, when a mutatePlan to -- /projects?select=name,clients(name) occurs, the RETURNING SQL part would -- be `RETURNING name`(see QueryBuilder). This would make the embedding diff --git a/src/PostgREST/Plan/Types.hs b/src/PostgREST/Plan/Types.hs index 956032b74e..31c320ddc9 100644 --- a/src/PostgREST/Plan/Types.hs +++ b/src/PostgREST/Plan/Types.hs @@ -26,10 +26,10 @@ type TransformerProc = Text -- | -- | The type value is allowed to be the empty string. The analog here is soft type checking in programming languages: sometimes we don't need a variable to have a specified type and things will work anyhow. So the empty type variant is valid when we don't know and *don't need to know* about the specific type in some context. Note that this variation should not be used if it guarantees failure: in that case you should instead raise an error at the planning stage and bail out. For example, we can't parse JSON with `json_to_recordset` without knowing the types of each recipient field, and so error out. Using the empty string for the type would be incorrect and futile. On the other hand we use the empty type for RPC calls since type resolution isn't implemented for RPC, but it's fine because the query still works with Postgres' implicit coercion. In the future, hopefully we will support data representations across the board and then the empty type may be permanently retired. data CoercibleField = CoercibleField - { tfName :: FieldName - , tfJsonPath :: JsonPath - , tfIRType :: Text -- ^ The native Postgres type of the field, the type before mapping. - , tfTransform :: Maybe TransformerProc -- ^ The optional mapping from irType -> targetType. + { cfName :: FieldName + , cfJsonPath :: JsonPath + , cfIRType :: Text -- ^ The native Postgres type of the field, the type before mapping. + , cfTransform :: Maybe TransformerProc -- ^ The optional mapping from irType -> targetType. } deriving (Eq) unknownField :: FieldName -> JsonPath -> CoercibleField diff --git a/src/PostgREST/Query/QueryBuilder.hs b/src/PostgREST/Query/QueryBuilder.hs index 29d93d6cd4..22ca050f4a 100644 --- a/src/PostgREST/Query/QueryBuilder.hs +++ b/src/PostgREST/Query/QueryBuilder.hs @@ -98,12 +98,12 @@ mutatePlanToQuery (Insert mainQi iCols body onConflct putConditions returnings _ MergeDuplicates -> if null iCols then "DO NOTHING" - else "DO UPDATE SET " <> BS.intercalate ", " ((pgFmtIdent . tfName) <> const " = EXCLUDED." <> (pgFmtIdent . tfName) <$> iCols) + else "DO UPDATE SET " <> BS.intercalate ", " ((pgFmtIdent . cfName) <> const " = EXCLUDED." <> (pgFmtIdent . cfName) <$> iCols) ) onConflct, returningF mainQi returnings ]) where - cols = BS.intercalate ", " $ pgFmtIdent . tfName <$> iCols + cols = BS.intercalate ", " $ pgFmtIdent . cfName <$> iCols -- An update without a limit is always filtered with a WHERE mutatePlanToQuery (Update mainQi uCols body logicForest range ordts returnings) @@ -138,8 +138,8 @@ mutatePlanToQuery (Update mainQi uCols body logicForest range ordts returnings) whereLogic = if null logicForest then mempty else " WHERE " <> intercalateSnippet " AND " (pgFmtLogicTree mainQi <$> logicForest) mainTbl = SQL.sql (fromQi mainQi) emptyBodyReturnedColumns = if null returnings then "NULL" else BS.intercalate ", " (pgFmtColumn (QualifiedIdentifier mempty $ qiName mainQi) <$> returnings) - nonRangeCols = BS.intercalate ", " (pgFmtIdent . tfName <> const " = _." <> pgFmtIdent . tfName <$> uCols) - rangeCols = BS.intercalate ", " ((\col -> pgFmtIdent (tfName col) <> " = (SELECT " <> pgFmtIdent (tfName col) <> " FROM pgrst_update_body) ") <$> uCols) + nonRangeCols = BS.intercalate ", " (pgFmtIdent . cfName <> const " = _." <> pgFmtIdent . cfName <$> uCols) + rangeCols = BS.intercalate ", " ((\col -> pgFmtIdent (cfName col) <> " = (SELECT " <> pgFmtIdent (cfName col) <> " FROM pgrst_update_body) ") <$> uCols) (whereRangeIdF, rangeIdF) = mutRangeF mainQi (fst . otTerm <$> ordts) mutatePlanToQuery (Delete mainQi logicForest range ordts returnings) diff --git a/src/PostgREST/Query/SqlFragment.hs b/src/PostgREST/Query/SqlFragment.hs index 830240d83e..3c70ed47ad 100644 --- a/src/PostgREST/Query/SqlFragment.hs +++ b/src/PostgREST/Query/SqlFragment.hs @@ -236,27 +236,27 @@ pgFmtColumn table "*" = fromQi table <> ".*" pgFmtColumn table c = fromQi table <> "." <> pgFmtIdent c pgFmtField :: QualifiedIdentifier -> CoercibleField -> SQL.Snippet -pgFmtField table CoercibleField{tfName=fn, tfJsonPath=[]} = SQL.sql (pgFmtColumn table fn) +pgFmtField table CoercibleField{cfName=fn, cfJsonPath=[]} = SQL.sql (pgFmtColumn table fn) -- Using to_jsonb instead of to_json to avoid missing operator errors when filtering: -- "operator does not exist: json = unknown" -pgFmtField table CoercibleField{tfName=fn, tfJsonPath=jp} = SQL.sql ("to_jsonb(" <> pgFmtColumn table fn <> ")") <> pgFmtJsonPath jp +pgFmtField table CoercibleField{cfName=fn, cfJsonPath=jp} = SQL.sql ("to_jsonb(" <> pgFmtColumn table fn <> ")") <> pgFmtJsonPath jp -- Select the value of a named element from a table, applying its optional coercion mapping if any. pgFmtTableCoerce :: QualifiedIdentifier -> CoercibleField -> SQL.Snippet -pgFmtTableCoerce table fld@(CoercibleField{tfTransform=(Just formatterProc)}) = pgFmtCallUnary formatterProc (pgFmtField table fld) +pgFmtTableCoerce table fld@(CoercibleField{cfTransform=(Just formatterProc)}) = pgFmtCallUnary formatterProc (pgFmtField table fld) pgFmtTableCoerce table f = pgFmtField table f -- | Like the previous but now we just have a name so no namespace or JSON paths. pgFmtCoerceNamed :: CoercibleField -> SQL.Snippet -pgFmtCoerceNamed CoercibleField{tfName=fn, tfTransform=(Just formatterProc)} = pgFmtCallUnary formatterProc (SQL.sql (pgFmtIdent fn)) <> " AS " <> SQL.sql (pgFmtIdent fn) -pgFmtCoerceNamed CoercibleField{tfName=fn} = SQL.sql (pgFmtIdent fn) +pgFmtCoerceNamed CoercibleField{cfName=fn, cfTransform=(Just formatterProc)} = pgFmtCallUnary formatterProc (SQL.sql (pgFmtIdent fn)) <> " AS " <> SQL.sql (pgFmtIdent fn) +pgFmtCoerceNamed CoercibleField{cfName=fn} = SQL.sql (pgFmtIdent fn) pgFmtSelectItem :: QualifiedIdentifier -> (CoercibleField, Maybe Cast, Maybe Alias) -> SQL.Snippet -pgFmtSelectItem table (fld, Nothing, alias) = pgFmtTableCoerce table fld <> SQL.sql (pgFmtAs (tfName fld) (tfJsonPath fld) alias) +pgFmtSelectItem table (fld, Nothing, alias) = pgFmtTableCoerce table fld <> SQL.sql (pgFmtAs (cfName fld) (cfJsonPath fld) alias) -- Ideally we'd quote the cast with "pgFmtIdent cast". However, that would invalidate common casts such as "int", "bigint", etc. -- Try doing: `select 1::"bigint"` - it'll err, using "int8" will work though. There's some parser magic that pg does that's invalidated when quoting. -- Not quoting should be fine, we validate the input on Parsers. -pgFmtSelectItem table (fld, Just cast, alias) = "CAST (" <> pgFmtTableCoerce table fld <> " AS " <> SQL.sql (encodeUtf8 cast) <> " )" <> SQL.sql (pgFmtAs (tfName fld) (tfJsonPath fld) alias) +pgFmtSelectItem table (fld, Just cast, alias) = "CAST (" <> pgFmtTableCoerce table fld <> " AS " <> SQL.sql (encodeUtf8 cast) <> " )" <> SQL.sql (pgFmtAs (cfName fld) (cfJsonPath fld) alias) pgFmtSelectFromJson :: [CoercibleField] -> SQL.Snippet pgFmtSelectFromJson fields = @@ -270,7 +270,7 @@ pgFmtSelectFromJson fields = ) where parsedCols = intercalateSnippet ", " $ pgFmtCoerceNamed <$> fields - typedCols = BS.intercalate ", " $ pgFmtIdent . tfName <> const " " <> encodeUtf8 . tfIRType <$> fields + typedCols = BS.intercalate ", " $ pgFmtIdent . cfName <> const " " <> encodeUtf8 . cfIRType <$> fields pgFmtOrderTerm :: QualifiedIdentifier -> OrderTerm -> SQL.Snippet pgFmtOrderTerm qi ot = @@ -291,13 +291,13 @@ pgFmtOrderTerm qi ot = -- | Interpret a literal in the way the planner indicated through the CoercibleField. pgFmtUnknownLiteralForField :: SQL.Snippet -> CoercibleField -> SQL.Snippet -pgFmtUnknownLiteralForField value CoercibleField{tfTransform=(Just parserProc)} = pgFmtCallUnary parserProc value +pgFmtUnknownLiteralForField value CoercibleField{cfTransform=(Just parserProc)} = pgFmtCallUnary parserProc value -- But when no transform is requested, we just use the literal as-is. pgFmtUnknownLiteralForField value _ = value -- | Array version of the above, used by ANY(). pgFmtArrayLiteralForField :: [Text] -> CoercibleField -> SQL.Snippet -pgFmtArrayLiteralForField values CoercibleField{tfTransform=(Just parserProc)} = SQL.sql "ARRAY[" <> intercalateSnippet ", " (pgFmtCallUnary parserProc . unknownLiteral <$> values) <> "]" +pgFmtArrayLiteralForField values CoercibleField{cfTransform=(Just parserProc)} = SQL.sql "ARRAY[" <> intercalateSnippet ", " (pgFmtCallUnary parserProc . unknownLiteral <$> values) <> "]" -- When no transformation is requested, use an array literal which should be simpler, maybe faster. pgFmtArrayLiteralForField values _ = unknownLiteral (pgBuildArrayLiteral values) From 671a97866527dc74cff59d1ffd142aac040d3f81 Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Wed, 8 Feb 2023 11:31:41 +0000 Subject: [PATCH 09/11] Document what IRType means. --- src/PostgREST/Plan/Types.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/PostgREST/Plan/Types.hs b/src/PostgREST/Plan/Types.hs index 31c320ddc9..6212c0dea8 100644 --- a/src/PostgREST/Plan/Types.hs +++ b/src/PostgREST/Plan/Types.hs @@ -28,7 +28,7 @@ type TransformerProc = Text data CoercibleField = CoercibleField { cfName :: FieldName , cfJsonPath :: JsonPath - , cfIRType :: Text -- ^ The native Postgres type of the field, the type before mapping. + , cfIRType :: Text -- ^ The native Postgres type of the field, the intermediate (IR) type before mapping. , cfTransform :: Maybe TransformerProc -- ^ The optional mapping from irType -> targetType. } deriving (Eq) From a4e35b67004b7c2b8af12d7969dbb57a053ad66b Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Wed, 8 Feb 2023 11:31:56 +0000 Subject: [PATCH 10/11] Formatting. --- src/PostgREST/Plan.hs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/PostgREST/Plan.hs b/src/PostgREST/Plan.hs index a5ee3be19f..b626a263e7 100644 --- a/src/PostgREST/Plan.hs +++ b/src/PostgREST/Plan.hs @@ -36,7 +36,7 @@ import Data.Tree (Tree (..)) import PostgREST.ApiRequest (Action (..), ApiRequest (..), - InvokeMethod (..), + InvokeMethod (..), Mutation (..), Payload (..)) import PostgREST.Config (AppConfig (..)) From 8c35fce90f1722c3696c1666acfe1dc8a5d89e36 Mon Sep 17 00:00:00 2001 From: Alexander Ljungberg <aljungberg@wireload.net> Date: Wed, 8 Feb 2023 12:51:18 +0000 Subject: [PATCH 11/11] New: use a subquery to interpret `IN` literals requiring data rep transformation. - With the previous method, very long queries such as `ANY (ARRAY[test.color('000100'), test.color('CAFE12'), test.color('01E240'), ...` could be generated. Consider the case where the parser function name is 45 characters and there's a hundred literals. That's 4.5kB of SQL just for the function name alone! - New version uses `unnest`: `ANY (SELECT test.color(unnest('{000100,CAFE12,01E240,...}'::text[]))` to produce a much shorter query. - This is likely to be more performant and either way much more readable and debuggable in the logs. --- src/PostgREST/Query/SqlFragment.hs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/PostgREST/Query/SqlFragment.hs b/src/PostgREST/Query/SqlFragment.hs index 3c70ed47ad..3b756641e4 100644 --- a/src/PostgREST/Query/SqlFragment.hs +++ b/src/PostgREST/Query/SqlFragment.hs @@ -297,8 +297,10 @@ pgFmtUnknownLiteralForField value _ = value -- | Array version of the above, used by ANY(). pgFmtArrayLiteralForField :: [Text] -> CoercibleField -> SQL.Snippet -pgFmtArrayLiteralForField values CoercibleField{cfTransform=(Just parserProc)} = SQL.sql "ARRAY[" <> intercalateSnippet ", " (pgFmtCallUnary parserProc . unknownLiteral <$> values) <> "]" --- When no transformation is requested, use an array literal which should be simpler, maybe faster. +-- When a transformation is requested, we need to apply the transformation to each element of the array. This could be done by just making a query with `parser(value)` for each value, but may lead to huge query lengths. Imagine `data_representations.color_from_text('...'::text)` for repeated for a hundred values. Instead we use `unnest()` to unpack a standard array literal and then apply the transformation to each element, like a map. +-- Note the literals will be treated as text since in every case when we use ANY() the parameters are textual (coming from a query string). We want to rely on the `text->domain` parser to do the right thing. +pgFmtArrayLiteralForField values CoercibleField{cfTransform=(Just parserProc)} = SQL.sql "(SELECT " <> pgFmtCallUnary parserProc (SQL.sql "unnest(" <> unknownLiteral (pgBuildArrayLiteral values) <> "::text[])") <> ")" +-- When no transformation is requested, we don't need a subquery. pgFmtArrayLiteralForField values _ = unknownLiteral (pgBuildArrayLiteral values) pgFmtFilter :: QualifiedIdentifier -> CoercibleFilter -> SQL.Snippet