“I paint with a brayer and press. A color lover with monochrome moods”. -Kathleen DeMeo
A. [Course] Elastic Google Cloud Infrastructure: Scaling and Automation
B. [Quest] Deploy and Manage Cloud Environments with Google Cloud
B.1. Configuring IAM Permissions with gcloud
B.2. Hosting a Web App on Google Cloud Using Compute Engine
- Create Compute Engine instances
- Create instance templates from source instances
- Create managed instance groups
- Create and test managed instance group health checks
- Create HTTP(S) Load Balancers
- Create load balancer health checks
- Use a Content Delivery Network (CDN) for Caching
- instance script `startup-script.sh`
```sh
#!/bin/bash
# Install logging monitor. The monitor will automatically pick up logs sent to
# syslog.
curl -s "https://storage.googleapis.com/signals-agents/logging/google-fluentd-install.sh" | bash
service google-fluentd restart &
# Install dependencies from apt
apt-get update
apt-get install -yq ca-certificates git build-essential supervisor psmisc
# Install nodejs
mkdir /opt/nodejs
curl https://nodejs.org/dist/v16.14.0/node-v16.14.0-linux-x64.tar.gz | tar xvzf - -C /opt/nodejs --strip-components=1
ln -s /opt/nodejs/bin/node /usr/bin/node
ln -s /opt/nodejs/bin/npm /usr/bin/npm
# Get the application source code from the Google Cloud Storage bucket.
mkdir /fancy-store
gsutil -m cp -r gs://fancy-store-$DEVSHELL_PROJECT_ID/monolith-to-microservices/microservices/* /fancy-store/
# Install app dependencies.
cd /fancy-store/
npm install
# Create a nodeapp user. The application will run as this user.
useradd -m -d /home/nodeapp nodeapp
chown -R nodeapp:nodeapp /opt/app
# Configure supervisor to run the node app.
cat >/etc/supervisor/conf.d/node-app.conf << EOF
[program:nodeapp]
directory=/fancy-store
command=npm start
autostart=true
autorestart=true
user=nodeapp
environment=HOME="/home/nodeapp",USER="nodeapp",NODE_ENV="production"
stdout_logfile=syslog
stderr_logfile=syslog
EOF
supervisorctl reread
supervisorctl update
```
- Provisioning the architecture
```sh
# Enable Compute Engine APIs
gcloud services enable compute.googleapis.com
# Create Cloud Storage bucket
gsutil mb gs://fancy-store-$DEVSHELL_PROJECT_ID
# Clone git repo to the Cloud Shell
git clone https://github.com/googlecodelabs/monolith-to-microservices.git
# build the project local
cd ~/monolith-to-microservices
./setup.sh
# NodeJS version
nvm install --lts
# Start the web server
cd microservices
npm start
# Create Compute Engine instances startup script, on Cloud Shell local
vi startup-script.sh
# Copy the script to Cloud Storage bucket
gsutil cp ~/monolith-to-microservices/startup-script.sh gs://fancy-store-$DEVSHELL_PROJECT_ID
# Copy the code to Cloud Storage bucket
cd ~
rm -rf monolith-to-microservices/*/node_modules
gsutil -m cp -r monolith-to-microservices gs://fancy-store-$DEVSHELL_PROJECT_ID/
# Deploy the backend instance
gcloud compute instances create backend \
--machine-type=n1-standard-1 \
--tags=backend \
--metadata=startup-script-url=https://storage.googleapis.com/fancy-store-$DEVSHELL_PROJECT_ID/startup-script.sh
# instance IPs
gcloud compute instances list
# Config frontend communicating with backend by IPs, in `.env` file in frontend project
REACT_APP_ORDERS_URL=http://[BACKEND_ADDRESS]:8081/api/orders
REACT_APP_PRODUCTS_URL=http://[BACKEND_ADDRESS]:8082/api/products
# Rebuild frontend project
cd ~/monolith-to-microservices/react-app
npm install && npm run-script build
# Copy the code to Cloud Storage bucket
cd ~
rm -rf monolith-to-microservices/*/node_modules
gsutil -m cp -r monolith-to-microservices gs://fancy-store-$DEVSHELL_PROJECT_ID/
# Deploy frontend instance
gcloud compute instances create frontend \
--machine-type=n1-standard-1 \
--tags=frontend \
--metadata=startup-script-url=https://storage.googleapis.com/fancy-store-$DEVSHELL_PROJECT_ID/startup-script.sh
# Configure the network
gcloud compute firewall-rules create fw-fe \
--allow tcp:8080 \
--target-tags=frontend
gcloud compute firewall-rules create fw-be \
--allow tcp:8081-8082 \
--target-tags=backend
gcloud compute instances list
watch -n 2 curl http://[FRONTEND_ADDRESS]:8080
# Create instance template, to all managed instance group to create new instance automatically
gcloud compute instances stop frontend
gcloud compute instances stop backend
gcloud compute instance-templates create fancy-fe --source-instance=frontend
gcloud compute instance-templates create fancy-be --source-instance=backend
gcloud compute instance-templates list
gcloud compute instances delete backend
# Create managed instance groups, to allow application to scale
gcloud compute instance-groups managed create fancy-fe-mig \
--base-instance-name fancy-fe \
--size 2 \
--template fancy-fe
gcloud compute instance-groups managed create fancy-be-mig \
--base-instance-name fancy-be \
--size 2 \
--template fancy-be
# Create named port, for managed instance group to expose the service to all instances internally
gcloud compute instance-groups set-named-ports fancy-fe-mig \
--named-ports frontend:8080
gcloud compute instance-groups set-named-ports fancy-be-mig \
--named-ports orders:8081,products:8082
# Create health check over app, for autohealing.
gcloud compute health-checks create http fancy-fe-hc \
--port 8080 \
--check-interval 30s \
--healthy-threshold 1 \
--timeout 10s \
--unhealthy-threshold 3
gcloud compute health-checks create http fancy-be-hc \
--port 8081 \
--request-path=/api/orders \
--check-interval 30s \
--healthy-threshold 1 \
--timeout 10s \
--unhealthy-threshold 3
# Configure firewall rule for health check
gcloud compute firewall-rules create allow-health-check \
--allow tcp:8080-8081 \
--source-ranges 130.211.0.0/22,35.191.0.0/16 \
--network default
# Configure MIG with health check
gcloud compute instance-groups managed update fancy-fe-mig \
--health-check fancy-fe-hc \
--initial-delay 300
gcloud compute instance-groups managed update fancy-be-mig \
--health-check fancy-be-hc \
--initial-delay 300
# Create healthcheck for LB to forward capable requests
gcloud compute http-health-checks create fancy-fe-frontend-hc --request-path / --port 8080
gcloud compute http-health-checks create fancy-be-orders-hc --request-path /api/orders --port 8081
gcloud compute http-health-checks create fancy-be-products-hc --request-path /api/products --port 8082
# Create LB backend services with healthcheck and named port
gcloud compute backend-services create fancy-fe-frontend \
--http-health-checks fancy-fe-frontend-hc \
--port-name frontend \
--global
gcloud compute backend-services create fancy-be-orders \
--http-health-checks fancy-be-orders-hc \
--port-name orders \
--global
gcloud compute backend-services create fancy-be-products \
--http-health-checks fancy-be-products-hc \
--port-name products \
--global
# Add LB backend services, pairing MIG
gcloud compute backend-services add-backend fancy-fe-frontend \
--instance-group fancy-fe-mig \
--instance-group-zone us-central1-f \
--global
gcloud compute backend-services add-backend fancy-be-orders \
--instance-group fancy-be-mig \
--instance-group-zone us-central1-f \
--global
gcloud compute backend-services add-backend fancy-be-products \
--instance-group fancy-be-mig \
--instance-group-zone us-central1-f \
--global
# Add URL map, for directing requests to proper backend service
gcloud compute url-maps create fancy-map --default-service fancy-fe-frontend
gcloud compute url-maps add-path-matcher fancy-map \
--default-service fancy-fe-frontend \
--path-matcher-name orders \
--path-rules "/api/orders=fancy-be-orders,/api/products=fancy-be-products"
# Create HTTP proxy, ties to the URL map
gcloud compute target-http-proxies create fancy-proxy --url-map fancy-map
# Create global forwarding rules ties a public IP and port to HTTP proxy
gcloud compute forwarding-rules create fancy-http-rule --global --target-http-proxy fancy-proxy --ports 80
# public IP of HTTP proxy
gcloud compute forwarding-rules list --global
# Update frontend project code, `.evn` file
REACT_APP_ORDERS_URL=http://[LB_IP]/api/orders
REACT_APP_PRODUCTS_URL=http://[LB_IP]/api/products
# rebuild the project locally on Cloud Shell
cd ~/monolith-to-microservices/react-app
npm install && npm run-script build
# update Cloud Storage bucket
cd ~
rm -rf monolith-to-microservices/*/node_modules
gsutil -m cp -r monolith-to-microservices gs://fancy-store-$DEVSHELL_PROJECT_ID/
# Rolling update, by restarting the instances. (Since startup script will pull the code again)
gcloud compute instance-groups managed rolling-action replace fancy-fe-mig --max-unavailable 100%
watch -n 2 gcloud compute instance-groups list-instances fancy-fe-mig
watch -n 2 gcloud compute backend-services get-health fancy-fe-frontend --global
# Scaling compute engine, setting autoscaling policy
gcloud compute instance-groups managed set-autoscaling \
fancy-fe-mig \
--max-num-replicas 2 \
--target-load-balancing-utilization 0.60
gcloud compute instance-groups managed set-autoscaling \
fancy-be-mig \
--max-num-replicas 2 \
--target-load-balancing-utilization 0.60
# Enable CDN, the cache in Google Front End (GFE)
gcloud compute backend-services update fancy-fe-frontend \
--enable-cdn --global
```
- update instance template
```sh
# Update a running instance first
gcloud compute instances set-machine-type frontend --machine-type custom-4-3840
# Create a new template from the updated instance
gcloud compute instance-templates create fancy-fe-new \
--source-instance=frontend \
--source-instance-zone=us-central1-f
# Roll out new template to instance group
gcloud compute instance-groups managed rolling-action start-update fancy-fe-mig \
--version template=fancy-fe-new
# Check the VM machine type
watch -n 2 gcloud compute instance-groups managed list-instances fancy-fe-mig
gcloud compute instances describe [VM_NAME] | grep machineType
```
- update project code
```sh
# switch another index file
cd ~/monolith-to-microservices/react-app/src/pages/Home
mv index.js.new index.js
# build it
cd ~/monolith-to-microservices/react-app
npm install && npm run-script build
# re-push to Cloud Storage bucket
cd ~
rm -rf monolith-to-microservices/*/node_modules
gsutil -m cp -r monolith-to-microservices gs://fancy-store-$DEVSHELL_PROJECT_ID/
# rolling the update to MIG
gcloud compute instance-groups managed rolling-action replace fancy-fe-mig \
--max-unavailable=100%
# Check
watch -n 2 gcloud compute instance-groups list-instances fancy-fe-mig
watch -n 2 gcloud compute backend-services get-health fancy-fe-frontend --global
```
- simulate failure
```sh
# Get an instance name
gcloud compute instance-groups list-instances fancy-fe-mig
# ssh in the instance
gcloud compute ssh [INSTANCE_NAME]
# stop the app
sudo supervisorctl stop nodeapp; sudo killall node
exit
# Check, if another instance up running
watch -n 2 gcloud compute operations list --filter='operationType~compute.instances.repair.*'
```
B.3. Orchestrating the Cloud with Kubernetes
B.4. Networking 101
- VPC
- Projects > Networks > Subnetworks
- Firewall rules: Ingress vs Egress
- Routes: automatically created by Google Cloud.
- Custom network
```sh
# Create custom network
gcloud compute networks create taw-custom-network --subnet-mode custom
# Create subnetwork
gcloud compute networks subnets create subnet-us-central \
--network taw-custom-network \
--region us-central1 \
--range 10.0.0.0/16
gcloud compute networks subnets create subnet-europe-west \
--network taw-custom-network \
--region europe-west1 \
--range 10.1.0.0/16
gcloud compute networks subnets create subnet-asia-east \
--network taw-custom-network \
--region asia-east1 \
--range 10.2.0.0/16
# Check created subnetworks
gcloud compute networks subnets list --network taw-custom-network
# Add Firewall rules to allow Ingress access
gcloud compute firewall-rules create "nw101-allow-http" \
--allow tcp:80 --network "taw-custom-network" \
--source-ranges 0.0.0.0/0 \
--target-tags "http"
gcloud compute firewall-rules create "nw101-allow-icmp" \
--allow icmp --network "taw-custom-network" \
--source-ranges 0.0.0.0/0 \
--target-tags "rules"
gcloud compute firewall-rules create "nw101-allow-internal" \
--allow tcp:0-65535,udp:0-65535,icmp --network "taw-custom-network" \
--source-ranges "10.0.0.0/16","10.2.0.0/16","10.1.0.0/16"
gcloud compute firewall-rules create "nw101-allow-ssh" \
--allow tcp:22 --network "taw-custom-network" \
--source-ranges 0.0.0.0/0 \
--target-tags "ssh"
gcloud compute firewall-rules create "nw101-allow-rdp" \
--allow tcp:3389 --network "taw-custom-network"
# Create VM instances in each subnet
gcloud compute instances create us-test-01 \
--subnet subnet-us-central \
--zone us-central1-a \
--tags ssh,http,rules
gcloud compute instances create europe-test-01 \
--subnet subnet-europe-west \
--zone europe-west1-b \
--tags ssh,http,rules
gcloud compute instances create asia-test-01 \
--subnet subnet-asia-east \
--zone asia-east1-a \
--tags ssh,http,rules
# Check connect from your VMs
ssh us-test-01
ping -c 3 <europe-test-01-external-ip-address>
ping -c 3 <asia-test-01-external-ip-address>
# Check latency
ssh us-test-01
ping -c 3 europe-test-01.europe-west1-b
# Trace networking
sudo apt-get update && sudo apt-get -y install traceroute mtr tcpdump iperf whois host dnsutils siege
traceroute www.icann.org
traceroute www.wikipedia.org
traceroute www.adcash.com
traceroute bad.horse
traceroute -m 255 bad.horse
# Performance
sudo apt-get update && sudo apt-get -y install traceroute mtr tcpdump iperf whois host dnsutils siege
ssh us-test-01
iperf -s #run in server mode
ssh europe-test-01
iperf -c us-test-01.us-central1-a #run in client mode, connection to eu1-vm
# from us-test-01
iperf -c europe-test-01.europe-west1-b -u -b 2G #iperf client side - send 2 Gbits/s
# from us-test-02
iperf -c us-test-01.us-central1-a -P 20
# https://cloud.google.com/compute/docs/network-bandwidth
```
B.5. Using BigQuery and Cloud Logging to Analyze BigQuery Usage
Cloud Logging allows you to store, search, analyze, monitor, and alert on log data and events from the Google Cloud including BigQuery.
Any operations on BigQuery will be logged, and Cloud Logging can expore
them, where you can create a sink
which will send these interested logs to the Sink service
and from there to do further things.
# BigQuery
bq query --location=us --use_legacy_sql=false --use_cache=false \
'select month, avg(mean_temp) as avgtemp from `qwiklabs-resources.qlweather_geo.gsod` where station_number = 947680 and year = 2010 group by month order by month'
bq query --location=us --use_legacy_sql=false --use_cache=false \
'select month, avg(mean_temp) as avgtemp from `qwiklabs-resources.qlweather_geo.gsod` where station_number = 947680 and year = 2010 group by month order by month'
bq query --location=us --use_legacy_sql=false --use_cache=false \
'select CONCAT(departure_airport, "-", arrival_airport) as route, count(*) as numberflights from `bigquery-samples.airline_ontime_data.airline_id_codes` ac, `qwiklabs-resources.qlairline_ontime_data.flights` fl where ac.code = fl.airline_code and regexp_contains(ac.airline , r"Alaska") group by 1 order by 2 desc LIMIT 10'
B.6. Migrate to Cloud SQL for PostgreSQL using Database Migration Service
Database Migration Service provides options for one-time and continuous jobs to migrate data to Cloud SQL using different connectivity options, including IP allowlists, VPC peering, and reverse SSH tunnels (see documentation on connectivity options at https://cloud.google.com/database-migration/docs/postgresql/configure-connectivity.
In this lab, you migrate a stand-alone PostgreSQL database (running on a virtual machine) to Cloud SQL for PostgreSQL using a continuous Database Migration Service job and VPC peering for connectivity.
Migrating a database via Database Migration Service requires some preparation of the source database, including creating a dedicated user with replication rights, adding the pglogical database extension to the source database and granting rights to the schemata and tables in the database to be migrated, as well as the postgres database, to that user.
After you create and run the migration job, you confirm that an initial copy of your database has been successfully migrated to your Cloud SQL for PostgreSQL instance. You also explore how continuous migration jobs apply data updates from your source database to your Cloud SQL instance. To conclude the migration job, you promote the Cloud SQL instance to be a stand-alone database for reading and writing data.
- Prepare the source database for migration.
```sh
# Enable required Cloud apis
datamigration.googleapis.com
servicenetworking.googleapis.com
# Installing and configuring the pglogical database extension. https://github.com/2ndQuadrant/pglogical
sudo apt install postgresql-13-pglogical
# Configuring the stand-alone PostgreSQL database to allow access from Cloud Shell and Cloud SQL.
sudo su - postgres -c "gsutil cp gs://cloud-training/gsp918/pg_hba_append.conf ."
sudo su - postgres -c "gsutil cp gs://cloud-training/gsp918/postgresql_append.conf ."
sudo su - postgres -c "cat pg_hba_append.conf >> /etc/postgresql/13/main/pg_hba.conf"
sudo su - postgres -c "cat postgresql_append.conf >> /etc/postgresql/13/main/postgresql.conf"
sudo systemctl restart postgresql@13-main
# Adding the pglogical database extension to the postgres, orders and gmemegen_db databases on the stand-alone server.
sudo su - postgres
psql
\c postgres;
CREATE EXTENSION pglogical;
\c orders;
CREATE EXTENSION pglogical;
\c gmemegen_db;
CREATE EXTENSION pglogical;
\l
# Creating a migration_admin user (with Replication permissions) for database migration and
CREATE USER migration_admin PASSWORD 'DMS_1s_cool!';
ALTER DATABASE orders OWNER TO migration_admin;
ALTER ROLE migration_admin WITH REPLICATION;
# granting the required permissions to schemata and relations to that user.
\c postgres;
GRANT USAGE ON SCHEMA pglogical TO migration_admin;
GRANT ALL ON SCHEMA pglogical TO migration_admin;
GRANT SELECT ON pglogical.tables TO migration_admin;
GRANT SELECT ON pglogical.depend TO migration_admin;
GRANT SELECT ON pglogical.local_node TO migration_admin;
GRANT SELECT ON pglogical.local_sync_status TO migration_admin;
GRANT SELECT ON pglogical.node TO migration_admin;
GRANT SELECT ON pglogical.node_interface TO migration_admin;
GRANT SELECT ON pglogical.queue TO migration_admin;
GRANT SELECT ON pglogical.replication_set TO migration_admin;
GRANT SELECT ON pglogical.replication_set_seq TO migration_admin;
GRANT SELECT ON pglogical.replication_set_table TO migration_admin;
GRANT SELECT ON pglogical.sequence_state TO migration_admin;
GRANT SELECT ON pglogical.subscription TO migration_admin;
\c orders;
GRANT USAGE ON SCHEMA pglogical TO migration_admin;
GRANT ALL ON SCHEMA pglogical TO migration_admin;
GRANT SELECT ON pglogical.tables TO migration_admin;
GRANT SELECT ON pglogical.depend TO migration_admin;
GRANT SELECT ON pglogical.local_node TO migration_admin;
GRANT SELECT ON pglogical.local_sync_status TO migration_admin;
GRANT SELECT ON pglogical.node TO migration_admin;
GRANT SELECT ON pglogical.node_interface TO migration_admin;
GRANT SELECT ON pglogical.queue TO migration_admin;
GRANT SELECT ON pglogical.replication_set TO migration_admin;
GRANT SELECT ON pglogical.replication_set_seq TO migration_admin;
GRANT SELECT ON pglogical.replication_set_table TO migration_admin;
GRANT SELECT ON pglogical.sequence_state TO migration_admin;
GRANT SELECT ON pglogical.subscription TO migration_admin;
GRANT USAGE ON SCHEMA public TO migration_admin;
GRANT ALL ON SCHEMA public TO migration_admin;
GRANT SELECT ON public.distribution_centers TO migration_admin;
GRANT SELECT ON public.inventory_items TO migration_admin;
GRANT SELECT ON public.order_items TO migration_admin;
GRANT SELECT ON public.products TO migration_admin;
GRANT SELECT ON public.users TO migration_admin;
\c gmemegen_db;
GRANT USAGE ON SCHEMA pglogical TO migration_admin;
GRANT ALL ON SCHEMA pglogical TO migration_admin;
GRANT SELECT ON pglogical.tables TO migration_admin;
GRANT SELECT ON pglogical.depend TO migration_admin;
GRANT SELECT ON pglogical.local_node TO migration_admin;
GRANT SELECT ON pglogical.local_sync_status TO migration_admin;
GRANT SELECT ON pglogical.node TO migration_admin;
GRANT SELECT ON pglogical.node_interface TO migration_admin;
GRANT SELECT ON pglogical.queue TO migration_admin;
GRANT SELECT ON pglogical.replication_set TO migration_admin;
GRANT SELECT ON pglogical.replication_set_seq TO migration_admin;
GRANT SELECT ON pglogical.replication_set_table TO migration_admin;
GRANT SELECT ON pglogical.sequence_state TO migration_admin;
GRANT SELECT ON pglogical.subscription TO migration_admin;
GRANT USAGE ON SCHEMA public TO migration_admin;
GRANT ALL ON SCHEMA public TO migration_admin;
GRANT SELECT ON public.meme TO migration_admin;
\c orders;
\dt
ALTER TABLE public.distribution_centers OWNER TO migration_admin;
ALTER TABLE public.inventory_items OWNER TO migration_admin;
ALTER TABLE public.order_items OWNER TO migration_admin;
ALTER TABLE public.products OWNER TO migration_admin;
ALTER TABLE public.users OWNER TO migration_admin;
\dt
\q
exit
```
- Create a profile for a source connection to a PostgreSQL instance (e.g., stand-alone PostgreSQL). `Database Migration` > `Connection profiles` > `+ Create Profile`.
- Configure connectivity between the source and destination database instances using VPC peering. `Database Migration` > `Migration jobs` > `+ Create Migration Job`
- Configure firewall and database access rules to allow access to the source database for migration.
- Create, run, and verify a continuous migration job using Database Migration Service.
- Promote the destination instance (Cloud SQL for PostgreSQL) to be a stand-alone database for reading and writing data.