Below you will find pages that utilize the taxonomy term “Jsonnet”
Deploying to K3s
A simple deployment of cAdvisor to K3s and to confirm the ability to expose Ingresses using Tailscale Kubernetes Operator (TLS) and – since it’s already installed with K3s – Traefik (non-TLS).
local image = "gcr.io/cadvisor/cadvisor:latest";
local labels = {
app: "cadvisor",
};
local name = std.extVar("NAME");
local node_ip = std.extVar("NODE_IP");
local port = 8080;
local deployment = {
apiVersion: "apps/v1",
kind: "Deployment",
metadata: {
name: name,
labels: labels,
},
spec: {
replicas: 1,
selector: {
matchLabels: labels,
},
template: {
metadata: {
labels: labels,
},
spec: {
containers: [
{
name: name,
image: image,
ports: [
{
name: "http",
containerPort: 8080,
protocol: "TCP",
},
],
resources: {
limits: {
memory: "500Mi",
},
requests: {
cpu: "250m",
memory: "250Mi",
},
},
securityContext: {
allowPrivilegeEscalation: false,
privileged: false,
readOnlyRootFilesystem: true,
runAsGroup: 1000,
runAsNonRoot: true,
runAsUser: 1000,
},
},
],
},
},
},
};
local ingresses = [
{
// Tailscale Ingress TLS (non-public)
apiVersion: "networking.k8s.io/v1",
kind: "Ingress",
metadata: {
name: "tailscale",
labels: labels,
},
spec: {
ingressClassName: "tailscale",
defaultBackend: {
service: {
name: name,
port: {
number: port,
},
},
},
tls: [
{
hosts: [
name,
],
},
],
},
},
{
// Traefik Ingress non-TLS (non-public)
apiVersion: "networking.k8s.io/v1",
kind: "Ingress",
metadata: {
name: "traefik",
labels: labels,
},
spec: {
ingressClassName: "traefik",
rules: [
{
host: std.format(
"%(name)s.%(node_ip)s.nip.io", {
name: name,
node_ip: node_ip,
},
),
http: {
paths: [
{
path: "/",
pathType: "Prefix",
backend: {
service: {
name: name,
port: {
number: port,
},
},
},
},
],
},
},
],
},
},
];
local prometheusrule = {
// Alternatively parameterize the duration
local duration = 10,
apiVersion: "monitoring.coreos.com/v1",
kind: "PrometheusRule",
metadata: {
name: name,
labels: labels,
},
spec: {
groups: [
{
name: name,
rules: [
{
alert: "TestcAdvsiorDown",
annotations: {
summary: "Test cAdvisor instance is down or not scraping metrics.",
description: std.format(
"The cAdvisor instance {{ $labels.instance }} has not been scraping metrics for more than %(duration)s minutes.", {
duration: duration,
},
),
},
expr: "absent(cadvisor_version_info{namespace=\"k3s-test\"}) or (cadvisor_version_info{namespace=\"k3s-test\"}!=1)",
"for": std.format(
"%(duration)sm", {
duration: duration,
},
),
labels: labels {
severity: "warning",
},
},
],
},
],
},
};
local service = {
apiVersion: "v1",
kind: "Service",
metadata: {
name: name,
labels: labels,
},
spec: {
selector: labels,
ports: [
{
name: "http",
port: port,
targetPort: port,
protocol: "TCP",
},
],
},
};
local serviceaccount = {
apiVersion: "v1",
kind: "ServiceAccount",
metadata: {
name: name,
labels: labels,
},
};
local servicemonitor = {
apiVersion: "monitoring.coreos.com/v1",
kind: "ServiceMonitor",
metadata: {
name: name,
labels: labels,
},
spec: {
selector: {
matchLabels: labels,
},
endpoints: [
{
interval: "120s",
path: "/metrics",
port: "http",
scrapeTimeout: "30s",
},
],
},
};
// Output
{
apiVersion: "v1",
kind: "List",
items: [
deployment,
prometheusrule,
service,
serviceaccount,
servicemonitor,
] + ingresses,
}
I like to have a script that applies Jsonnet to the file:
Gemini CLI (3/3)
Update 2025-07-08
Gemini CLI supports HTTP-based MCP server integration
So, it’s possible to replace the .gemini/settings.json included in the original post with:
{
"theme": "Default",
"mcpServers": {
"ackal-mcp-server": {
"httpUrl": "http://localhost:7777/mcp",
"timeout": 5000
},
"prometheus-mcp-server": {
"httpUrl": "https://prometheus.{tailnet}/mcp",
"timeout": 5000
}
},
"selectedAuthType": "gemini-api-key"
}
This solution permits the addition of headers too for e.g. including Authorization
Original
Okay, so not “Gemini Code Assist” but sufficiently similar that I think it warrants the “3/3” appellation.
Configuring Envoy to proxy Google Cloud Run v2
I’m building an emulator for Cloud Run. As I considered the solution, I assumed (more later) that I could implement Google’s gRPC interface for Cloud Run and use Envoy to proxy HTTP/REST requests to the gRPC service using Envoy’s gRPC-JSON transcoder.
Google calls this process Transcoding HTTP/JSON to gRPC which I think it a better description.
Google’s Cloud Run v2 (v1 is no longer published to the googleapis repo) service.proto includes the following Services definition for CreateService:
Migrating Prometheus Exporters to Kubernetes
I have built Prometheus Exporters for multiple cloud platforms to track resources deployed across clouds:
- Prometheus Exporter for Azure
- Prometheus Exporter for crt.sh
- Prometheus Exporter for Fly.io
- Prometheus Exporter for GoatCounter
- Prometheus Exporter for Google Analytics
- Prometheus Exporter for Google Cloud
- Prometheus Exporter for Koyeb
- Prometheus Exporter for Linode
- Prometheus Exporter for PorkBun
- Prometheus Exporter for updown.io
- Prometheus Exporter for Vultr
Additionally, I’ve written two status service exporters:
These exporters are all derived from an exemplar DigitalOcean Exporter written by metalmatze for which I maintain a fork.