Skip to content

Commit

Permalink
Merge branch 'babelfish-for-postgresql:BABEL_3_X_DEV' into jira-babel…
Browse files Browse the repository at this point in the history
…-4393
  • Loading branch information
Deepesh125 authored Oct 19, 2023
2 parents c5ddb5d + d41258d commit eb02b03
Show file tree
Hide file tree
Showing 36 changed files with 2,458 additions and 895 deletions.
4 changes: 2 additions & 2 deletions .github/composite-actions/dump-restore-util/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,11 @@ runs:
if [[ '${{ inputs.logical_database }}' == 'null' ]];then
echo 'Starting to dump whole Babelfish physical database'
~/${{ inputs.pg_new_dir }}/bin/pg_dumpall -h localhost --database jdbc_testdb --username jdbc_user --globals-only --quote-all-identifiers --verbose --no-role-passwords -f pg_dump_globals.sql 2>>error.log
~/${{ inputs.pg_new_dir }}/bin/pg_dumpall -h localhost --database jdbc_testdb --username jdbc_user --roles-only --quote-all-identifiers --verbose --no-role-passwords -f pg_dump_globals.sql 2>>error.log
~/${{ inputs.pg_new_dir }}/bin/pg_dump -h localhost --username jdbc_user $DUMP_OPTS --quote-all-identifiers --verbose --file="pg_dump.archive" --dbname=jdbc_testdb 2>>error.log
else
echo "Starting to dump Babelfish logical database ${{ inputs.logical_database }}"
~/${{ inputs.pg_new_dir }}/bin/pg_dumpall -h localhost --database jdbc_testdb --username jdbc_user --globals-only --quote-all-identifiers --verbose --no-role-passwords --bbf-database-name='${{ inputs.logical_database }}' -f pg_dump_globals.sql 2>>error.log
~/${{ inputs.pg_new_dir }}/bin/pg_dumpall -h localhost --database jdbc_testdb --username jdbc_user --roles-only --quote-all-identifiers --verbose --no-role-passwords --bbf-database-name='${{ inputs.logical_database }}' -f pg_dump_globals.sql 2>>error.log
~/${{ inputs.pg_new_dir }}/bin/pg_dump -h localhost --username jdbc_user $DUMP_OPTS --quote-all-identifiers --verbose --bbf-database-name='${{ inputs.logical_database }}' --file="pg_dump.archive" --dbname=jdbc_testdb 2>>error.log
fi
Expand Down
88 changes: 51 additions & 37 deletions contrib/babelfishpg_tds/src/backend/tds/tdstypeio.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,10 +153,10 @@ typedef struct FunctionCacheByTdsIdEntry
TdsIoFunctionData data;
} FunctionCacheByTdsIdEntry;

/*
/*
* This is a modified copy of a function from POSTGIS to get SRID from GSERIALIZED struct
*/
static int32_t
static int32_t
get_srid(uint8_t *id)
{
int32_t srid = 0;
Expand Down Expand Up @@ -242,9 +242,9 @@ getSendFunc(int funcId)
case TDS_SEND_DATETIMEOFFSET:
return TdsSendTypeDatetimeoffset;
case TDS_SEND_GEOMETRY:
return TdsSendTypeGeometry;
case TDS_SEND_GEOGRAPHY:
return TdsSendTypeGeography;
return TdsSendTypeGeometry;
case TDS_SEND_GEOGRAPHY:
return TdsSendTypeGeography;
/* TODO: should Assert here once all types are implemented */
default:
return NULL;
Expand Down Expand Up @@ -321,8 +321,8 @@ getRecvFunc(int funcId)
case TDS_RECV_DATETIMEOFFSET:
return TdsRecvTypeDatetimeoffset;
case TDS_RECV_GEOMETRY:
return TdsRecvTypeGeometry;
case TDS_RECV_GEOGRAPHY:
return TdsRecvTypeGeometry;
case TDS_RECV_GEOGRAPHY:
return TdsRecvTypeGeography;
/* TODO: should Assert here once all types are implemented */
default:
Expand Down Expand Up @@ -2014,7 +2014,7 @@ TdsRecvTypeDatetime2(const char *message, const ParameterToken token)
Datum
TdsRecvTypeGeometry(const char *message, const ParameterToken token)
{
Datum result = 0;
Datum result = 0;

/* Decode binary and convert if needed */
StringInfo buf = TdsGetStringInfoBufferFromToken(message, token);
Expand All @@ -2023,25 +2023,25 @@ TdsRecvTypeGeometry(const char *message, const ParameterToken token)

ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
errmsg("Prepared Queries for Geometry DataType Currently not Supported in BabelFish")));
errmsg("Prepared Queries for Geometry DataType Currently not Supported in BabelFish")));

pfree(buf);
return result;
pfree(buf);
return result;
}

/* -------------------------------
* TdsRecvTypeGeography - converts external binary format to
* Geography data type
* Geography data type
* --------------------------------
*/
*/
/*
* It is a Placeholder Function for now
* TODO: Will need to address it in subsequent Code Changes
*/
Datum
TdsRecvTypeGeography(const char *message, const ParameterToken token)
{
Datum result = 0;
Datum result = 0;

/* Decode binary and convert if needed */
StringInfo buf = TdsGetStringInfoBufferFromToken(message, token);
Expand All @@ -2052,8 +2052,8 @@ TdsRecvTypeGeography(const char *message, const ParameterToken token)
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
errmsg("Prepared Queries for Geography DataType Currently not Supported in BabelFish")));

pfree(buf);
return result;
pfree(buf);
return result;
}

static inline uint128
Expand Down Expand Up @@ -2426,8 +2426,8 @@ TdsRecvTypeTable(const char *message, const ParameterToken token)
case TDS_TYPE_SQLVARIANT:
values[i] = TdsTypeSqlVariantToDatum(temp);
break;
case TDS_TYPE_SPATIAL:
break;
case TDS_TYPE_SPATIAL:
break;
}
/* Build a string for bind parameters. */
if (colMetaData[currentColumn].columnTdsType != TDS_TYPE_NVARCHAR || row->isNull[currentColumn] == 'n')
Expand Down Expand Up @@ -2700,10 +2700,13 @@ TdsSendTypeBinary(FmgrInfo *finfo, Datum value, void *vMetaData)
maxLen = 0;
bytea *vlena = DatumGetByteaPCopy(value);
bytea *buf;
int copyLen = 0;
TdsColumnMetaData *col = (TdsColumnMetaData *) vMetaData;

maxLen = col->metaEntry.type7.maxSize;
buf = (bytea *) palloc0(sizeof(bytea) * maxLen);
copyLen = Max((sizeof(bytea) * maxLen), VARSIZE_ANY_EXHDR(vlena));

buf = (bytea *) palloc0(copyLen);
memcpy(buf, VARDATA_ANY(vlena), VARSIZE_ANY_EXHDR(vlena));

if ((rc = TdsPutUInt16LE(maxLen)) == 0)
Expand Down Expand Up @@ -3139,6 +3142,7 @@ TdsSendTypeNumeric(FmgrInfo *finfo, Datum value, void *vMetaData)
TdsColumnMetaData *col = (TdsColumnMetaData *) vMetaData;
uint8_t max_scale = col->metaEntry.type5.scale;
uint8_t max_precision = col->metaEntry.type5.precision;
int target_precision = 0;

out = OutputFunctionCall(finfo, value);
if (out[0] == '-')
Expand Down Expand Up @@ -3175,24 +3179,34 @@ TdsSendTypeNumeric(FmgrInfo *finfo, Datum value, void *vMetaData)
if (scale == -1)
scale = 0;

/* Perform the overflow check before scribbling on to decString. */
target_precision = precision + (max_scale - scale);
if (target_precision > TDS_MAX_NUM_PRECISION ||
target_precision > max_precision)
ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("Arithmetic overflow error for data type numeric.")));

/*
* Fill in the remaining 0's if the processed scale from out is less than
* max_scale This is needed because the output generated by engine may not
* always produce the same precision/scale as calculated by
* resolve_numeric_typmod_from_exp, which is the precision/scale we have
* sent to the client with column metadata.
*/
* Fill in the remaining 0's if the processed scale from out is less than
* max_scale This is needed because the output generated by engine may not
* always produce the same precision/scale as calculated by
* resolve_numeric_typmod_from_exp, which is the precision/scale we have
* sent to the client with column metadata.
*/
while (scale++ < max_scale)
{
decString[precision++] = '0';
}
decString[precision] = '\0';
Assert(precision <= outlen);
Assert(precision == target_precision);

if (precision > TDS_MAX_NUM_PRECISION ||
precision > max_precision)
ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("Arithmetic overflow error for data type numeric.")));
/*
* Verify that we did not go beyond the memory allocated.
* We allow precision < outlen. Consider the case when
* out="123.456", max_scale=8. Then by the end, precision=11
* but outlen=15.
*/
Assert(precision <= outlen);

if (precision >= 1 && precision < 10)
length = 4;
Expand Down Expand Up @@ -4150,16 +4164,16 @@ TdsSendTypeDatetimeoffset(FmgrInfo *finfo, Datum value, void *vMetaData)
return rc;
}

int
int
TdsSendSpatialHelper(FmgrInfo *finfo, Datum value, void *vMetaData, int TdsInstr)
{
int rc = EOF,
npoints,
len, /* number of bytes used to store the string. */
len, /* number of bytes used to store the string. */
actualLen; /* Number of bytes that would be needed to
* store given string in given encoding. */
char *destBuf,
*buf,
char *destBuf,
*buf,
*itr;

int32_t srid;
Expand All @@ -4175,7 +4189,7 @@ TdsSendSpatialHelper(FmgrInfo *finfo, Datum value, void *vMetaData, int TdsInstr
* 16 -> 2 8-Byte float coordinates (TODO: Need to change when Z and M flags are defined for N-dimension Points)
* 6 -> 4 Byte SRID + 2 Byte (01 0C)
*/
len = npoints*16 + 6;
len = npoints*16 + 6;
buf = (char *) palloc0(len);

/* Driver Expects 4 Byte SRID */
Expand All @@ -4196,8 +4210,8 @@ TdsSendSpatialHelper(FmgrInfo *finfo, Datum value, void *vMetaData, int TdsInstr
* First 8 Bytes of gser->data are fixed in PostGIS:
* 4 Bytes -> Represents the Type
* 4 Bytes -> Represents the npoints
*/
memcpy(itr, (char *) gser->data + 8, len - 6);
*/
memcpy(itr, (char *) gser->data + 8, len - 6);

destBuf = TdsEncodingConversion(buf, len, PG_UTF8, col->encoding, &actualLen);

Expand Down
23 changes: 21 additions & 2 deletions contrib/babelfishpg_tds/test/t/004_bbfdumprestore.pl
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@
$oldnode->start;
my @dumpall_command = (
'pg_dumpall', '--database', 'testdb', '--username', 'test_master',
'--port', $oldnode->port, '--globals-only', '--quote-all-identifiers',
'--port', $oldnode->port, '--roles-only', '--quote-all-identifiers',
'--verbose', '--no-role-passwords', '--file', $dump1_file);
$newnode->command_ok(\@dumpall_command, 'Dump global objects.');
# Dump Babelfish database using pg_dump.
Expand Down Expand Up @@ -135,7 +135,7 @@
# need to use dump utilities from the new node here.
@dumpall_command = (
'pg_dumpall', '--database', 'testdb', '--username', 'test_master',
'--port', $newnode2->port, '--globals-only', '--quote-all-identifiers',
'--port', $newnode2->port, '--roles-only', '--quote-all-identifiers',
'--verbose', '--no-role-passwords', '--file', $dump3_file);
$newnode2->command_ok(\@dumpall_command, 'Dump global objects.');
# Dump Babelfish database using pg_dump. Let's dump with the custom format
Expand Down Expand Up @@ -177,4 +177,23 @@
qr/Dump and restore across different migration modes is not yet supported./,
'Restore of Babelfish database failed since source and target migration modes do not match.');
$newnode->stop;

############################################################################################
########################### Test dump for non Babelfish database ###########################
############################################################################################
$newnode->start;

# Dump global objects using pg_dumpall.
@dumpall_command = (
'pg_dumpall', '--database', 'postgres', '--port', $newnode->port,
'--roles-only', '--quote-all-identifiers', '--verbose',
'--no-role-passwords', '--file', $dump1_file);
$newnode->command_ok(\@dumpall_command, 'Dump global objects.');
# Dump Babelfish database using pg_dump.
@dump_command = (
'pg_dump', '--quote-all-identifiers', '--port', $newnode->port,
'--verbose', '--dbname', 'postgres',
'--file', $dump2_file);
$newnode->command_ok(\@dump_command, 'Dump non-Babelfish (postgres db) database.');
$newnode->stop;
done_testing();
32 changes: 15 additions & 17 deletions contrib/babelfishpg_tsql/runtime/functions.c
Original file line number Diff line number Diff line change
Expand Up @@ -1520,7 +1520,7 @@ object_name(PG_FUNCTION_ARGS)
SysScanDesc tgscan;
EphemeralNamedRelation enr;
bool found = false;
char *result = NULL;
text *result_text = NULL;

if (input1 < 0)
PG_RETURN_NULL();
Expand Down Expand Up @@ -1552,9 +1552,7 @@ object_name(PG_FUNCTION_ARGS)
enr = get_ENR_withoid(currentQueryEnv, object_id, ENR_TSQL_TEMP);
if (enr != NULL && enr->md.enrtype == ENR_TSQL_TEMP)
{
result = enr->md.name;

PG_RETURN_VARCHAR_P((VarChar *) cstring_to_text(result));
PG_RETURN_VARCHAR_P((VarChar *) cstring_to_text(enr->md.name));
}

/* search in pg_class by object_id */
Expand All @@ -1565,8 +1563,7 @@ object_name(PG_FUNCTION_ARGS)
if (pg_class_aclcheck(object_id, user_id, ACL_SELECT) == ACLCHECK_OK)
{
Form_pg_class pg_class = (Form_pg_class) GETSTRUCT(tuple);
result = NameStr(pg_class->relname);

result_text = cstring_to_text(NameStr(pg_class->relname)); // make a copy before releasing syscache
schema_id = pg_class->relnamespace;
}
ReleaseSysCache(tuple);
Expand All @@ -1583,8 +1580,7 @@ object_name(PG_FUNCTION_ARGS)
if (pg_proc_aclcheck(object_id, user_id, ACL_EXECUTE) == ACLCHECK_OK)
{
Form_pg_proc procform = (Form_pg_proc) GETSTRUCT(tuple);
result = NameStr(procform->proname);

result_text = cstring_to_text(NameStr(procform->proname));
schema_id = procform->pronamespace;
}
ReleaseSysCache(tuple);
Expand All @@ -1602,7 +1598,7 @@ object_name(PG_FUNCTION_ARGS)
if (pg_type_aclcheck(object_id, user_id, ACL_USAGE) == ACLCHECK_OK)
{
Form_pg_type pg_type = (Form_pg_type) GETSTRUCT(tuple);
result = NameStr(pg_type->typname);
result_text = cstring_to_text(NameStr(pg_type->typname));
}
ReleaseSysCache(tuple);
found = true;
Expand Down Expand Up @@ -1630,8 +1626,7 @@ object_name(PG_FUNCTION_ARGS)
if (OidIsValid(pg_trigger->tgrelid) &&
pg_class_aclcheck(pg_trigger->tgrelid, user_id, ACL_SELECT) == ACLCHECK_OK)
{
result = NameStr(pg_trigger->tgname);

result_text = cstring_to_text(NameStr(pg_trigger->tgname));
schema_id = get_rel_namespace(pg_trigger->tgrelid);
}
found = true;
Expand All @@ -1651,26 +1646,29 @@ object_name(PG_FUNCTION_ARGS)
/* check if user have right permission on object */
if (OidIsValid(con->conrelid) && (pg_class_aclcheck(con->conrelid, user_id, ACL_SELECT) == ACLCHECK_OK))
{
result = NameStr(con->conname);

result_text = cstring_to_text(NameStr(con->conname));
schema_id = con->connamespace;
}
ReleaseSysCache(tuple);
found = true;
}
}

if (result)
if (result_text)
{
/*
* Check if schema corresponding to found object belongs to specified
* database, schema also can be shared schema like "sys" or
* "information_schema_tsql". In case of pg_type schema_id will be
* invalid.
*/
if (!OidIsValid(schema_id) || is_schema_from_db(schema_id, database_id)
|| (schema_id == get_namespace_oid("sys", true)) || (schema_id == get_namespace_oid("information_schema_tsql", true)))
PG_RETURN_VARCHAR_P((VarChar *) cstring_to_text(result));
if (!OidIsValid(schema_id) ||
is_schema_from_db(schema_id, database_id) ||
(schema_id == get_namespace_oid("sys", true)) ||
(schema_id == get_namespace_oid("information_schema_tsql", true)))
{
PG_RETURN_VARCHAR_P((VarChar *) result_text);
}
}
PG_RETURN_NULL();
}
Expand Down
9 changes: 6 additions & 3 deletions contrib/babelfishpg_tsql/sql/ownership.sql
Original file line number Diff line number Diff line change
Expand Up @@ -309,10 +309,10 @@ CAST(CAST(Base.oid as INT) as sys.varbinary(85)) AS sid,
CAST(Ext.type AS CHAR(1)) as type,
CAST(
CASE
WHEN Ext.type = 'S' THEN 'SQL_LOGIN'
WHEN Ext.type = 'S' THEN 'SQL_LOGIN'
WHEN Ext.type = 'R' THEN 'SERVER_ROLE'
WHEN Ext.type = 'U' THEN 'WINDOWS_LOGIN'
ELSE NULL
ELSE NULL
END
AS NVARCHAR(60)) AS type_desc,
CAST(Ext.is_disabled AS INT) AS is_disabled,
Expand All @@ -323,7 +323,10 @@ CAST(Ext.default_language_name AS SYS.SYSNAME) AS default_language_name,
CAST(CASE WHEN Ext.type = 'R' THEN NULL ELSE Ext.credential_id END AS INT) AS credential_id,
CAST(CASE WHEN Ext.type = 'R' THEN 1 ELSE Ext.owning_principal_id END AS INT) AS owning_principal_id,
CAST(CASE WHEN Ext.type = 'R' THEN 1 ELSE Ext.is_fixed_role END AS sys.BIT) AS is_fixed_role
FROM pg_catalog.pg_roles AS Base INNER JOIN sys.babelfish_authid_login_ext AS Ext ON Base.rolname = Ext.rolname;
FROM pg_catalog.pg_roles AS Base INNER JOIN sys.babelfish_authid_login_ext AS Ext ON Base.rolname = Ext.rolname
WHERE pg_has_role(suser_id(), 'sysadmin'::TEXT, 'MEMBER')
OR Ext.orig_loginname = suser_name()
OR Ext.type = 'R';

GRANT SELECT ON sys.server_principals TO PUBLIC;

Expand Down
2 changes: 1 addition & 1 deletion contrib/babelfishpg_tsql/sql/sys_functions.sql
Original file line number Diff line number Diff line change
Expand Up @@ -4050,7 +4050,7 @@ BEGIN
column_length := 256;
ELSIF column_data_type IS NULL THEN

-- Check if it's a user-defined data type
-- Check if it ia user-defined data type
SELECT sys.translate_pg_type_to_tsql(typbasetype), typlen, typtypmod
INTO column_data_type, typelen, typemod
FROM pg_type
Expand Down
Loading

0 comments on commit eb02b03

Please sign in to comment.