new exam files

This commit is contained in:
Sayed Abubaker Hashimi 2025-05-07 13:02:50 +02:00
parent 18f4edbf1b
commit 00bcc3dadd
31 changed files with 343 additions and 233 deletions

62
sk1/.gitignore vendored Normal file
View File

@ -0,0 +1,62 @@
# Node.js dependencies
node_modules/
npm-debug.log
yarn-debug.log
yarn-error.log
package-lock.json
yarn.lock
# Environment variables and secrets
.env
.env.local
.env.development
.env.test
.env.production
*.env
# Google Cloud and Kubernetes
gcloud.json
key.json
*.kubeconfig
# Log files
logs/
*.log
# Build files
dist/
build/
coverage/
.next/
# Docker
.docker/
docker-compose.override.yml
# IDE and editor files
.idea/
.vscode/
*.swp
*.swo
.DS_Store
Thumbs.db
*~
# Generated Kubernetes files
k8s/ingress-with-domain.yaml
k8s/*-updated.yaml
k8s/generated/
# Windows WSL file identifiers
*Zone.Identifier
# Temporary files
*.tmp
*.bak
.cache/
*.tgz
.npm/
# Testing
coverage/
.nyc_output/

0
z2/Dockerfile → sk1/Dockerfile Normal file → Executable file
View File

View File

@ -1,4 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: contact-app
apiVersion: v1
kind: Namespace
metadata:
name: contact-app

View File

@ -1,24 +1,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: contact-app
namespace: contact-app
spec:
replicas: 1
selector:
matchLabels:
app: contact-app
template:
metadata:
labels:
app: contact-app
spec:
containers:
- name: contact-app
image: contact-app:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
env:
- name: MONGODB_URI
value: "mongodb://mongo:27017/contacts"
apiVersion: apps/v1
kind: Deployment
metadata:
name: contact-app
spec:
replicas: 1
selector:
matchLabels:
app: contact-app
template:
metadata:
labels:
app: contact-app
spec:
containers:
- name: contact-app
image: <YOUR_GCR_IMAGE>
ports:
- containerPort: 3000
env:
- name: MONGODB_URI
value: "mongodb://mongo:27017/contactlist"
resources:
requests:
cpu: "100m"
memory: "128Mi"
limits:
cpu: "200m"
memory: "256Mi"

View File

@ -1,13 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: contact-app
namespace: contact-app
spec:
selector:
app: contact-app
ports:
- protocol: TCP
port: 3000
targetPort: 3000
type: NodePort
apiVersion: v1
kind: Service
metadata:
name: contact-app
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http
selector:
app: contact-app

View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: contact-app-lb
spec:
type: LoadBalancer
selector:
app: contact-app
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http

9
sk1/k8s/global-ip.yaml Normal file
View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: contact-app-ip
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 80

28
sk1/k8s/ingress.yaml Normal file
View File

@ -0,0 +1,28 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: contact-app-ingress
annotations:
networking.gke.io/managed-certificates: contact-app-cert
kubernetes.io/ingress.global-static-ip-name: contact-app-ip
spec:
ingressClassName: gce
rules:
- http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: contact-app
port:
number: 80
---
apiVersion: networking.gke.io/v1
kind: ManagedCertificate
metadata:
name: contact-app-cert
spec:
domains:
# This will be populated with the automatic domain from GKE
- DOMAIN_PLACEHOLDER

View File

@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongo
spec:
replicas: 1
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: mongo:4.4
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-storage
mountPath: /data/db
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "300m"
volumes:
- name: mongo-storage
persistentVolumeClaim:
claimName: mongo-pvc

10
sk1/k8s/mongo-pvc.yaml Normal file
View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mongo-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: mongo
spec:
clusterIP: None
selector:
app: mongo
ports:
- port: 27017
targetPort: 27017

0
z2/package.json → sk1/package.json Normal file → Executable file
View File

97
sk1/prepare-app.sh Executable file
View File

@ -0,0 +1,97 @@
#!/bin/bash
# filepath: prepare-app.sh
set -e
# Set variables
PROJECT_ID=$(gcloud config get-value project)
REGION=europe-central2
ZONE=europe-central2-a
CLUSTER_NAME=sayed-cluster-1
# Build and push the Docker image
echo "[1/4] Building and pushing Docker image..."
docker build -t gcr.io/$PROJECT_ID/contact-app:latest .
gcloud auth configure-docker
docker push gcr.io/$PROJECT_ID/contact-app:latest
# Check if cluster exists, if not create it
echo "[2/4] Checking for GKE cluster..."
if ! gcloud container clusters describe $CLUSTER_NAME --region $REGION --project $PROJECT_ID &>/dev/null; then
echo "Cluster $CLUSTER_NAME not found. Creating cluster..."
gcloud container clusters create-auto $CLUSTER_NAME \
--region $REGION \
--project $PROJECT_ID
echo "Cluster created successfully."
else
echo "Cluster $CLUSTER_NAME already exists."
fi
# Connect to the cluster
echo "[3/4] Connecting to GKE cluster..."
gcloud container clusters get-credentials $CLUSTER_NAME --region $REGION --project $PROJECT_ID
kubectl config set-context --current --namespace=default
# Create namespace and apply MongoDB resources
echo "[4/4] Deploying Kubernetes resources..."
kubectl create namespace contact-app --dry-run=client -o yaml | kubectl apply -f -
kubectl apply -n contact-app -f k8s/mongo-pvc.yaml
kubectl apply -n contact-app -f k8s/mongo-deployment.yaml
kubectl apply -n contact-app -f k8s/mongo-service.yaml
# Apply app with image substitution and create LoadBalancer service
echo "Deploying application..."
sed "s|<YOUR_GCR_IMAGE>|gcr.io/$PROJECT_ID/contact-app:latest|g" k8s/app-deployment.yaml | kubectl apply -n contact-app -f -
kubectl apply -n contact-app -f k8s/app-service.yaml
kubectl apply -n contact-app -f k8s/contact-app-lb.yaml
# Wait for LoadBalancer to get external IP
echo "Waiting for LoadBalancer to get external IP..."
while [[ -z $(kubectl get service contact-app-lb -n contact-app -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 2>/dev/null) ]]; do
echo -n "."
sleep 5
done
# Get the external IP
EXTERNAL_IP=$(kubectl get service contact-app-lb -n contact-app -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo "External IP: $EXTERNAL_IP"
# Create a domain using nip.io
DOMAIN="${EXTERNAL_IP}.nip.io"
echo "Domain: $DOMAIN"
# Create and apply ingress with the domain
cat <<EOF > k8s/ingress-with-domain.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: contact-app-ingress
annotations:
networking.gke.io/managed-certificates: contact-app-cert
kubernetes.io/ingress.class: "gce"
spec:
rules:
- host: ${DOMAIN}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: contact-app
port:
number: 80
---
apiVersion: networking.gke.io/v1
kind: ManagedCertificate
metadata:
name: contact-app-cert
spec:
domains:
- ${DOMAIN}
EOF
kubectl apply -n contact-app -f k8s/ingress-with-domain.yaml
echo "Deployment started. It may take a few minutes for the HTTPS certificate to be provisioned."
echo "Once provisioned, your application will be available at:"
echo "https://${DOMAIN}"

33
sk1/remove-app.sh Executable file
View File

@ -0,0 +1,33 @@
#!/bin/bash
# filepath: remove-app.sh
set -e
# Set variables
PROJECT_ID=$(gcloud config get-value project)
REGION=europe-central2
ZONE=europe-central2-a
CLUSTER_NAME=sayed-cluster-1
# Get cluster credentials
gcloud container clusters get-credentials $CLUSTER_NAME --region $REGION --project $PROJECT_ID
echo "Removing application resources..."
# Delete Ingress and certificates
kubectl delete ingress contact-app-ingress -n contact-app --ignore-not-found=true
kubectl delete managedcertificate contact-app-cert -n contact-app --ignore-not-found=true
# Delete app deployment and services
kubectl delete deployment contact-app -n contact-app --ignore-not-found=true
kubectl delete service contact-app -n contact-app --ignore-not-found=true
kubectl delete service contact-app-lb -n contact-app --ignore-not-found=true
# Delete MongoDB deployment, service, and PVC
kubectl delete deployment mongo -n contact-app --ignore-not-found=true
kubectl delete service mongo -n contact-app --ignore-not-found=true
kubectl delete pvc mongo-pvc -n contact-app --ignore-not-found=true
# Delete namespace
kubectl delete namespace contact-app --ignore-not-found=true
echo "Application and associated resources have been removed."

View File

@ -49,16 +49,4 @@ router.delete('/:id', async (req, res) => {
}
});
module.exports = router;
router.post('/', async (req, res) => { /* ... */ });
// Get all contacts
router.get('/', async (req, res) => { /* ... */ });
// Update a contact
router.put('/:id', async (req, res) => { /* ... */ });
// Delete a contact
router.delete('/:id', async (req, res) => { /* ... */ });
module.exports = router;

View File

@ -26,6 +26,6 @@ mongoose.connect(MONGODB_URI, {
app.use('/api/contacts', contactsRouter);
// Start the server
app.listen(PORT, () => {
app.listen(PORT, '0.0.0.0', () => {
console.log(`Server is running on http://localhost:${PORT}`);
});

View File

@ -1,81 +0,0 @@
# Contact List Web Application on Kubernetes
## Description
A simple web-based contact list app with name and number fields, using Node.js/Express and MongoDB. Data is persisted in MongoDB.
## Containers
- **contact-app**: Node.js/Express web server serving the frontend and API.
- **mongo**: Official MongoDB container for data storage.
## Kubernetes Objects
- **Namespace**: `contact-app` — all resources are grouped here.
- **Deployment**: Runs the Node.js web app.
- **StatefulSet**: Runs MongoDB with persistent storage.
- **PersistentVolume/PersistentVolumeClaim**: Stores MongoDB data.
- **Service**:
- `contact-app`: Exposes the web app on a NodePort.
- `mongo`: Headless service for MongoDB.
## Virtual Networks and Volumes
- **Headless Service**: For MongoDB pod DNS discovery.
- **Named Volume**: `/data/mongo` on the host, mounted to MongoDB for persistence.
## Container Configuration
- The Node.js app uses the `MONGODB_URI` environment variable to connect to MongoDB.
- MongoDB uses a persistent volume for `/data/db`.
## Instructions
### Prepare
```bash
chmod +x prepare-app.sh start-app.sh stop-app.sh
./prepare-app.sh
```
### Run
```bash
./start-app.sh
```
### Access the App
1. Get the NodePort:
```bash
kubectl -n contact-app get service contact-app
```
2. Open in browser:
`http://<minikube_ip>:<node_port>`
- Get Minikube IP: `minikube ip`
### Pause/Stop
```bash
./stop-app.sh
```
### Delete Volumes (optional)
```bash
rm -rf /data/mongo
```
---
## View the Application
- Open the NodePort URL in your browser.
- Add and view contacts via the web UI.
---
## Notes
- Make sure Docker is running and Minikube is started.
- All kubectl commands assume your context is set to Minikube.

View File

@ -1,11 +0,0 @@
#!/bin/bash
# filepath: prepare-app.sh
# Use Minikube's Docker daemon
eval $(minikube docker-env)
# Build Docker image for the Node.js app
docker build -t contact-app:latest .
# Create local directory for MongoDB data if not exists
sudo mkdir -p /data/mongo

View File

@ -1,7 +0,0 @@
#!/bin/bash
# filepath: start-app.sh
kubectl apply -f namespace.yaml
kubectl apply -f statefulset.yaml
kubectl apply -f deployment.yaml
kubectl apply -f service.yaml

View File

@ -1,73 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo
namespace: contact-app
labels:
app: mongo
spec:
ports:
- port: 27017
name: mongo
clusterIP: None
selector:
app: mongo
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: mongo-pv
namespace: contact-app
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/data/mongo"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mongo-pvc
namespace: contact-app
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongo
namespace: contact-app
spec:
serviceName: "mongo"
replicas: 1
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: mongo:6.0
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-storage
mountPath: /data/db
volumeClaimTemplates:
- metadata:
name: mongo-storage
namespace: contact-app
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -1,7 +0,0 @@
#!/bin/bash
# filepath: stop-app.sh
kubectl delete -f service.yaml
kubectl delete -f deployment.yaml
kubectl delete -f statefulset.yaml
kubectl delete -f namespace.yaml