Skip to content

Commit

Permalink
Merge branch 'main' into fix-service-def-expl
Browse files Browse the repository at this point in the history
  • Loading branch information
chadwcarlson authored Dec 9, 2024
2 parents ccbf98f + 929177c commit 7dbdc63
Show file tree
Hide file tree
Showing 20 changed files with 1,358 additions and 108 deletions.
4 changes: 4 additions & 0 deletions .github/actions/redirection-verification/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@ inputs:
environment-url:
description: 'Pull Request Environment URL'
required: true
number_retries:
description: 'The number of attempts we should make to contact the target environment URL. 1 second delay between attempt.'
required: false
default: '100'

####
#outputs:
Expand Down
130 changes: 82 additions & 48 deletions .github/actions/redirection-verification/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,55 +35,89 @@ function linkify(path,url) {
* @type {string}
*/
axios.defaults.baseURL = core.getInput('environment-url')
//axios.defaults.baseURL = 'https://httpstat.us/random/200,500-504,500-504,500-504'
const retries = Number(core.getInput('number_retries'))
//const retries = Number('100')
function sleep(ms) {
return new Promise((resolve) => {
setTimeout(resolve, ms);
});
}

try {
/**
* @todo Can we get the full workspace path to this file?
* @type {*}
*/
const yamlData = yaml.load(fs.readFileSync('./.platform/routes.yaml', 'utf8'));
/**
* @todo the key (docs.upsun.com) here should be a variable that is set somewhere else
* @type {Record<string, string[]> | _.LodashAt | ((request: string) => (string[] | null)) | string[]}
*/
const anchors = yamlData['https://docs.upsun.com/'].redirects.paths

const RedirectKeys = Object.keys(anchors).filter((path)=>{
const verifyTargetResponse = async(count = 0) => {
try {
const axiosResponse = await axios.get('/');
core.notice('Target URL finally responded with a 200. Proceeding.')
return axiosResponse;
} catch (error) {
if (error || error.status != 200) {
core.info(`At attempt ${count}, target url responded with status ${error.status}, retrying...`)
if (count++ < retries) {
await sleep(1000);
return verifyTargetResponse(count);
} else {
core.setFailed(`Max number of retries (${retries}) reached. Aborting.`)
};
} else {
core.setFailed(`Action failed with error ${error}`)
};
};
};

const verify = async () => {
let targetReady = await verifyTargetResponse();
core.info('Target URL ready. Beginning verification.')
try {
/**
* @todo the piece we're using to identify our contracts (/anchors/) should be a variable
* @todo Can we get the full workspace path to this file?
* @type {*}
*/
return path.startsWith('/anchors/')
})

const validateRedirects = RedirectKeys.map(async (path, index, array) => {
//console.log(`I'm going to test ${path} to see if it goes to ${anchors[path].to}`)

try {
const response = await axios.head(path);
//core.info(`Response for our check of ${path} is ${response.status}`)
return response
} catch (reqerr) {
//core.warning(`issue encountered with path ${path}!!! Returned status is ${reqerr.status}`)
let row = [{data: linkify(path, axios.defaults.baseURL)},{data: linkify( anchors[path].to, axios.defaults.baseURL) }]
tableData.push(row)
}
});


Promise.all(validateRedirects).then(() => {
if(tableData.length > 1) {

core.error('There was an error with one or more redirects.')

core.summary.addTable(tableData)

core.summary.write()
core.setFailed('There was an error with one or more contracted redirects.')
} else {
core.notice('All contracted redirections are valid.')
}
});

} catch (error) {
core.setFailed(`Action failed with error ${error}`)
const yamlData = yaml.load(fs.readFileSync('./.platform/routes.yaml', 'utf8'));
/**
* @todo the key (docs.upsun.com) here should be a variable that is set somewhere else
* @type {Record<string, string[]> | _.LodashAt | ((request: string) => (string[] | null)) | string[]}
*/
const anchors = yamlData['https://docs.upsun.com/'].redirects.paths

const RedirectKeys = Object.keys(anchors).filter((path)=>{
/**
* @todo the piece we're using to identify our contracts (/anchors/) should be a variable
*/
return path.startsWith('/anchors/')
})

const validateRedirects = RedirectKeys.map(async (path, index, array) => {
//console.log(`I'm going to test ${path} to see if it goes to ${anchors[path].to}`)

try {
const response = await axios.head(path);
core.debug(`Response for our check of ${path} is ${response.status}`)
return response
} catch (reqerr) {
//core.warning(`issue encountered with path ${path}!!! Returned status is ${reqerr.status}`)
let row = [{data: linkify(path, axios.defaults.baseURL)},{data: linkify( anchors[path].to, axios.defaults.baseURL) }]
tableData.push(row)
}
});


Promise.all(validateRedirects).then(() => {
if(tableData.length > 1) {

core.error('There was an error with one or more redirects.')

core.summary.addTable(tableData)

core.summary.write()
core.setFailed('There was an error with one or more contracted redirects.')
} else {
core.notice('All contracted redirections are valid.')
}
});

} catch (error) {
core.setFailed(`Action failed with error ${error}`)
}
}

verify();
95 changes: 95 additions & 0 deletions shared/data/php_extensions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1450,3 +1450,98 @@ grid:
- zendopcache
with-webp:
- imagick
"8.4":
available:
- amqp
- apcu
# - blackfire
# - datadog
# - enchant
# - event
# - ffi
# - geoip
# - gmp
# - gnupg
# - http
- igbinary
- imap
# - ldap
# - mailparse
# - memcached
- mongodb
# - msgpack
# - mysql
# - newrelic
# - oauth
# - odbc
# - openswoole
# - opentelemetry
- pdo_dblib
- pdo_odbc
- pdo_pgsql
# - pdo_sqlsrv
- pgsql
# - protobuf
# - pspell
# - raphf
# - rdkafka
# - readline
- redis
- shmop
# - snmp
# - sodium
- sourceguardian
# - sqlsrv
# - ssh2
# - swoole
# - sybase
# - tideways
# - tideways-xhprof
# - tidy
# - uuid
# - uv
# - xdebug
- xmlrpc
- xsl
# - yaml
default:
- bcmath
- bz2
- calendar
- ctype
- curl
- dba
- dom
- exif
- fileinfo
- ftp
- gd
- gettext
- iconv
- intl
- mbstring
- mysqli
- mysqlnd
# - opcache
- pdo
- pdo_mysql
- pdo_sqlite
- phar
- posix
- simplexml
- soap
- sockets
- sqlite3
- sysvmsg
- sysvsem
- sysvshm
- tokenizer
- xml
- xmlreader
- xmlwriter
- zip
built-in:
- json
- zendopcache
# with-webp:
# - imagick
4 changes: 3 additions & 1 deletion shared/data/registry.json
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,8 @@
],
"supported": [
"7.0",
"6.0"
"6.0",
"8.0"
],
"legacy": [
"3.1",
Expand Down Expand Up @@ -838,6 +839,7 @@
"5.4"
],
"supported": [
"8.4",
"8.3",
"8.2",
"8.1"
Expand Down
2 changes: 2 additions & 0 deletions sites/platform/.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ dotnet:
supported:
- '7.0'
- '6.0'
- '8.0'
legacy:
- '3.1'
- '2.2'
Expand Down Expand Up @@ -674,6 +675,7 @@ php:
- '5.5'
- '5.4'
supported:
- '8.4'
- '8.3'
- '8.2'
- '8.1'
Expand Down
4 changes: 1 addition & 3 deletions sites/platform/src/administration/cli/_index.md
Original file line number Diff line number Diff line change
Expand Up @@ -164,9 +164,7 @@ The code is only merged between environments remotely.

### Customize the CLI

You can customize how the CLI operates and what it returns with a configuration file (`~/{{% vendor/configdir %}}/config.yaml`)
or environment variables.
For details, see the [customization instructions on GitHub](https://github.com/platformsh/legacy-cli#user-content-customization).
You can customize how the CLI operates and what it returns with a configuration file (`~/.platformsh/config.yaml`) or environment variables. For details, see the [customization instructions on GitHub](https://github.com/platformsh/legacy-cli#user-content-customization).

#### Automate repetitive tasks

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ Each instance hosts the entire application stack, allowing this architecture sup

##### Build process

The build process for your application is identical for both the Grid Environment and the Dedicated Gen 2 cluster. However, because the hosts are provisioned by Platform.sh, not as a container, service configuration must be done by Platform.sh’s Customer Success team. The flexibility for DG2 and Grid can be made to be the same but only via opening a [support ticket](/learn/overview/get-support.md).
The build process for your application is identical for both the Grid Environment and the Dedicated Gen 2 cluster. However, because the hosts are provisioned by Platform.sh, not as a container, service configuration must be done by Platform.sh’s Customer Success team.

For more information, learn about [default storage settings](#storage) and how your app can [connect to services](dedicated-environments/dedicated-gen-3/overview.md#available-services).

### Split architecture

Split architecture works under Dedicated Generation 2 and allows to give more resources globally to a project. Services (data services, caching service or search engines) are split from application runtimes. Services will be running on a cluster of core nodes, and the application will be running on a cluster of web nodes.
Split architecture works under Dedicated Generation 2 and allows you to give more resources globally to a project. Services (data services, caching service or search engines) are split from application runtimes. Services will be running on a cluster of core nodes, and the application will be running on a cluster of web nodes.

This allows us to grant more room for the application or the services regarding resources. Both clusters can differ in size. Split architecture clusters can horizontally scale the application by adding additional nodes. 

Expand All @@ -36,7 +36,7 @@ Any defined users or environment variables are also propagated to the Dedicated

{{< note title="Note" theme="info" >}}

There is no automatic cloning of data from the Dedicated Gen 2 cluster to the development environment the way there is between branches in the development environment.
There is no automatic cloning of data from the Dedicated Gen 2 cluster to the development environments like there is between the grid-based development branches.

{{< /note >}} 

Expand All @@ -48,7 +48,7 @@ When deploying to the Dedicated Gen 2 cluster the process is slightly different

- The new application image is built in the exact same fashion as for the Grid.
- Any active background tasks on the cluster, including cron tasks, are terminated.
- The cluster (production or staging) is closed, meaning it doesn’t accept new requests. Incoming requests receive an HTTP 500 error.
- The cluster (production or staging) is closed, meaning it doesn’t accept new requests. Incoming requests receive an HTTP 503 response.
- The application image on all three servers is replaced with the new image.
- The deploy hook is run on one, and only one, of the three servers.
- The cluster is opened to allow new requests.
Expand All @@ -75,4 +75,4 @@ While your DG2 production and staging Environments are on dedicated virtual mach

### Memory

Dedicated Generation 2 includes a single node dedicated staging with 2 CPUs. This runs the same software configuration as the production cluster but only on a single node. This is usually enough for functional testing before moving to production. You can choose to upgrade your staging to a more powerful machine or add more than one dedicated staging system. Those will still be a single machine.
Dedicated Generation 2 includes a single node dedicated staging with 2 CPUs. This runs the same software configuration as the production cluster but only on a single node. This is usually enough for functional testing before moving to production. You can choose to upgrade your staging to a more powerful machine or add more than one dedicated staging system. Those will still be a single machine.
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ This is not the case with [{{% names/dedicated-gen-3 %}}](/dedicated-environment
## Syncing data between environments

Because of the differences between {{% names/dedicated-gen-2 %}} and Grid Environments,
basic [syncs](/glossary.md#sync) and [merges](/glossary.md#merge) aren't available between development environments and production or staging environments. So you don't see working buttons with those options in the Console.
data [syncs](/glossary.md#sync) aren't available between development environments and production or staging environments. So you don't see working buttons with those options in the Console. However, code [syncs](/glossary.md#sync) and [merges](/glossary.md#merge) are available when the code is different between a parent and child environment.

To transfer data between environments, backup your Production/Staging data and then synchronize Development data. See how to [back up and transfer data](../../development/transfer-dedicated.md#synchronize-files-from-development-to-stagingproduction).

Expand Down Expand Up @@ -106,7 +106,6 @@ The following settings require a [support ticket](/learn/overview/get-support):
* Increasing storage
* Allocating storage among mounts and services
* [PHP extensions](../../languages/php/extensions.md)
* Web server configuration (the [`web.locations` section of your app configuration](/create-apps/app-reference/single-runtime-image.md#locations))

## Logs

Expand Down
Loading

0 comments on commit 7dbdc63

Please sign in to comment.