320 lines
10 KiB
Jsonnet
320 lines
10 KiB
Jsonnet
|
|
local kube = import "k8s/configs/base.libsonnet";
|
||
|
|
local images = import "k8s/configs/images.libsonnet";
|
||
|
|
local templates = import "k8s/configs/templates/templates.libsonnet";
|
||
|
|
|
||
|
|
local Env = {
|
||
|
|
puid: 65534,
|
||
|
|
pgid: 65534,
|
||
|
|
tz: "America/Los_Angeles",
|
||
|
|
// Type: kube.NameVal
|
||
|
|
others: [],
|
||
|
|
};
|
||
|
|
|
||
|
|
local Service = kube.simpleFieldStruct([
|
||
|
|
"suffix",
|
||
|
|
// type: SERVICE.spec
|
||
|
|
"spec"
|
||
|
|
]);
|
||
|
|
|
||
|
|
local ServiceAccount = kube.simpleFieldStruct([
|
||
|
|
"suffix",
|
||
|
|
// type: SERVICE_ACCOUNT.spec
|
||
|
|
"spec"
|
||
|
|
]);
|
||
|
|
|
||
|
|
local Pvc = kube.simpleFieldStruct(["name", "bindName", "mountPath"]) {
|
||
|
|
mountSubPath: null,
|
||
|
|
};
|
||
|
|
|
||
|
|
local ConfigMap = kube.simpleFieldStruct(["name", "bindName", "mountPath"]) {
|
||
|
|
mountSubPath: null,
|
||
|
|
items: null,
|
||
|
|
};
|
||
|
|
|
||
|
|
local Secret = kube.simpleFieldStruct(["name", "secretName"]) {
|
||
|
|
mountPath: null
|
||
|
|
};
|
||
|
|
|
||
|
|
local HostPath = kube.simpleFieldStruct(["name", "hostPath", "mountPath"]) {};
|
||
|
|
|
||
|
|
local EmptyDir = kube.simpleFieldStruct(["name", "medium", "mountPath"]) {};
|
||
|
|
|
||
|
|
local AppParams = kube.simpleFieldStruct(["name", "baseAppName", "imageName", "schedule"]) {
|
||
|
|
namespace: "",
|
||
|
|
replicaCount: 1,
|
||
|
|
env: Env,
|
||
|
|
args: [],
|
||
|
|
gatekeeperSidecar: null,
|
||
|
|
imagePullSecrets: [],
|
||
|
|
nodeSelector: null,
|
||
|
|
command: null,
|
||
|
|
// type: [kube.DeployUtil.ContainerPort]
|
||
|
|
ports: [],
|
||
|
|
// type: [Pvc]
|
||
|
|
pvcs: [],
|
||
|
|
// type: [ConfigMap]
|
||
|
|
configMaps: [],
|
||
|
|
// type: [HostPath],
|
||
|
|
hostPaths: [],
|
||
|
|
// type: [EmptyDir],
|
||
|
|
emptyDirs: [],
|
||
|
|
// type: [Secret]
|
||
|
|
secrets: [],
|
||
|
|
// type: [Service]
|
||
|
|
services: [],
|
||
|
|
// type: [ServiceAccount],
|
||
|
|
serviceAccounts: [],
|
||
|
|
// type: DEPLOYMENT.spec.template.spec.containers.livenewssProbe
|
||
|
|
livenessProbe: null,
|
||
|
|
// type: DEPLOYMENT.spec.template.spec.containers.readinessProbe
|
||
|
|
readinessProbe: null,
|
||
|
|
filePath: null,
|
||
|
|
templatePath: null,
|
||
|
|
isPrivileged: false,
|
||
|
|
scCapabilities: {},
|
||
|
|
labels: {
|
||
|
|
},
|
||
|
|
resources: {
|
||
|
|
requests: {
|
||
|
|
cpu: "50m",
|
||
|
|
memory: "300Mi",
|
||
|
|
},
|
||
|
|
limits: {
|
||
|
|
cpu: "100m",
|
||
|
|
memory: "600Mi",
|
||
|
|
},
|
||
|
|
},
|
||
|
|
};
|
||
|
|
|
||
|
|
local selector(params) = {
|
||
|
|
name: params.name,
|
||
|
|
phase: "prod",
|
||
|
|
};
|
||
|
|
|
||
|
|
local linuxserverAnnotations(params) = templates.annotations(params.filePath, std.thisFile)
|
||
|
|
{
|
||
|
|
"infra.linuxserver.templatePath": kube.asWorkspacePath(params.templatePath),
|
||
|
|
};
|
||
|
|
|
||
|
|
local linuxserverLabels(params) = {
|
||
|
|
"infra.linuxserver.appName": params.name,
|
||
|
|
} + params.labels;
|
||
|
|
|
||
|
|
local unpackServices(params) = {
|
||
|
|
local nskube = kube.UsingNamespace(params.namespace),
|
||
|
|
[service.suffix]: nskube.Service(params.name + "-" + service.suffix) {
|
||
|
|
metadata+: {
|
||
|
|
annotations+: linuxserverAnnotations(params),
|
||
|
|
labels+: linuxserverLabels(params),
|
||
|
|
},
|
||
|
|
spec+: service.spec {
|
||
|
|
selector: selector(params),
|
||
|
|
}
|
||
|
|
} for service in params.services
|
||
|
|
};
|
||
|
|
|
||
|
|
local unpackServiceAccounts(params) = {
|
||
|
|
local nskube = kube.UsingNamespace(params.namespace),
|
||
|
|
[serviceAccount.suffix]: nskube.ServiceAccount(if serviceAccount.suffix == "" then params.name else params.name + "-" + serviceAccount.suffix) {
|
||
|
|
metadata+: {
|
||
|
|
annotations+: linuxserverAnnotations(params),
|
||
|
|
labels+: linuxserverLabels(params),
|
||
|
|
},
|
||
|
|
# TODO: Does this need to be parameterizeable?
|
||
|
|
automountServiceAccountToken: true
|
||
|
|
} for serviceAccount in params.serviceAccounts
|
||
|
|
};
|
||
|
|
|
||
|
|
local sequenceEnv(env) = env.others + [
|
||
|
|
kube.NameVal("PUID", std.toString(env.puid)),
|
||
|
|
kube.NameVal("PGID", std.toString(env.pgid)),
|
||
|
|
kube.NameVal("TZ", env.tz),
|
||
|
|
];
|
||
|
|
|
||
|
|
local sequenceVolumeMounts(pvcs, secrets, configMaps, hostPaths, emptyDirs) = [kube.DeployUtil.VolumeMount(pvc.name, pvc.mountPath) {
|
||
|
|
subPath: pvc.mountSubPath,
|
||
|
|
} for pvc in pvcs] + [kube.DeployUtil.VolumeMount(secret.name, secret.mountPath) {
|
||
|
|
} for secret in secrets if secret.mountPath != null] + [kube.DeployUtil.VolumeMount(configMap.name, configMap.mountPath) {
|
||
|
|
subPath: configMap.mountSubPath,
|
||
|
|
} for configMap in configMaps] + [kube.DeployUtil.VolumeMount(hostPath.name, hostPath.mountPath) {
|
||
|
|
} for hostPath in hostPaths] + [kube.DeployUtil.VolumeMount(emptyDir.name, emptyDir.mountPath) {
|
||
|
|
} for emptyDir in emptyDirs ];
|
||
|
|
|
||
|
|
local sequenceVolumes(pvcs, secrets, configMaps, hostPaths, emptyDirs) = [kube.DeployUtil.VolumeClaimRef(pvc.name, pvc.bindName) for pvc in pvcs] + [{
|
||
|
|
name: secret.name,
|
||
|
|
secret: {
|
||
|
|
secretName: secret.secretName,
|
||
|
|
},
|
||
|
|
} for secret in secrets] + [{
|
||
|
|
name: configMap.name,
|
||
|
|
configMap: {
|
||
|
|
name: configMap.bindName,
|
||
|
|
items: configMap.items,
|
||
|
|
},
|
||
|
|
} for configMap in configMaps] + [{
|
||
|
|
name: hostPath.name,
|
||
|
|
hostPath: {
|
||
|
|
path: hostPath.hostPath,
|
||
|
|
},
|
||
|
|
} for hostPath in hostPaths] + [{
|
||
|
|
name: emptyDir.name,
|
||
|
|
emptyDir: {
|
||
|
|
medium: emptyDir.medium,
|
||
|
|
},
|
||
|
|
} for emptyDir in emptyDirs];
|
||
|
|
|
||
|
|
local SidecarContainers(params) = if params.gatekeeperSidecar == null then [] else [
|
||
|
|
params.gatekeeperSidecar.SidecarSpec.Container(params, params.gatekeeperSidecar.params),
|
||
|
|
];
|
||
|
|
|
||
|
|
local SidecarVolumes(params) = if params.gatekeeperSidecar == null then [] else [
|
||
|
|
params.gatekeeperSidecar.SidecarSpec.Volume(params, params.gatekeeperSidecar.params),
|
||
|
|
];
|
||
|
|
|
||
|
|
local App(params) = unpackServiceAccounts(params) + unpackServices(params) + {
|
||
|
|
local nskube = kube.UsingNamespace(params.namespace),
|
||
|
|
gatekeeperService: if params.gatekeeperSidecar == null then {} else params.gatekeeperSidecar.SidecarSpec.Service(params, params.gatekeeperSidecar.params),
|
||
|
|
gatekeeperConfigMap: if params.gatekeeperSidecar == null then {} else params.gatekeeperSidecar.SidecarSpec.ConfigMap(params, params.gatekeeperSidecar.params),
|
||
|
|
gatekeeperIngresss: if params.gatekeeperSidecar == null then {} else params.gatekeeperSidecar.SidecarSpec.Ingress(params, params.gatekeeperSidecar.params),
|
||
|
|
deployment: nskube.Deployment(params.name) {
|
||
|
|
metadata+: {
|
||
|
|
annotations+: linuxserverAnnotations(params),
|
||
|
|
labels+: linuxserverLabels(params),
|
||
|
|
},
|
||
|
|
spec+: {
|
||
|
|
strategy: kube.DeployUtil.SimpleRollingUpdate(),
|
||
|
|
replicas: params.replicaCount,
|
||
|
|
selector: {
|
||
|
|
matchLabels: selector(params),
|
||
|
|
},
|
||
|
|
template: {
|
||
|
|
metadata: {
|
||
|
|
labels+: linuxserverLabels(params) + selector(params),
|
||
|
|
annotations+: linuxserverAnnotations(params) {
|
||
|
|
"seccomp.security.alpha.kubernetes.io/pod": 'docker/default',
|
||
|
|
},
|
||
|
|
},
|
||
|
|
spec+: {
|
||
|
|
hostAliases: [
|
||
|
|
{
|
||
|
|
ip: "192.168.0.120",
|
||
|
|
hostnames: [
|
||
|
|
"k8s.dominion.lan"
|
||
|
|
],
|
||
|
|
},
|
||
|
|
],
|
||
|
|
nodeSelector: params.nodeSelector,
|
||
|
|
imagePullSecrets: [
|
||
|
|
{
|
||
|
|
name: imagePullSecret,
|
||
|
|
},
|
||
|
|
for imagePullSecret in params.imagePullSecrets],
|
||
|
|
containers: [
|
||
|
|
{
|
||
|
|
name: params.baseAppName,
|
||
|
|
image: images.Prod[params.imageName],
|
||
|
|
securityContext: {
|
||
|
|
privileged: params.isPrivileged,
|
||
|
|
capabilities: params.scCapabilities,
|
||
|
|
},
|
||
|
|
env: sequenceEnv(params.env),
|
||
|
|
command: params.command,
|
||
|
|
//command: ["/bin/sh"],
|
||
|
|
//args: ["-c", "sleep 3600"],
|
||
|
|
args: params.args,
|
||
|
|
ports: params.ports,
|
||
|
|
livenessProbe: params.livenessProbe,
|
||
|
|
readinessProbe: params.readinessProbe,
|
||
|
|
resources: params.resources,
|
||
|
|
volumeMounts: sequenceVolumeMounts(params.pvcs, params.secrets, params.configMaps, params.hostPaths, params.emptyDirs)
|
||
|
|
},
|
||
|
|
] + SidecarContainers(params),
|
||
|
|
volumes: sequenceVolumes(params.pvcs, params.secrets, params.configMaps, params.hostPaths, params.emptyDirs) + SidecarVolumes(params),
|
||
|
|
}
|
||
|
|
},
|
||
|
|
},
|
||
|
|
}
|
||
|
|
};
|
||
|
|
|
||
|
|
local Cron(params) = unpackServices(params) + {
|
||
|
|
local nskube = kube.UsingNamespace(params.namespace),
|
||
|
|
gatekeeperService: if params.gatekeeperSidecar == null then {} else params.gatekeeperSidecar.SidecarSpec.Service(params, params.gatekeeperSidecar.params),
|
||
|
|
gatekeeperConfigMap: if params.gatekeeperSidecar == null then {} else params.gatekeeperSidecar.SidecarSpec.ConfigMap(params, params.gatekeeperSidecar.params),
|
||
|
|
gatekeeperIngresss: if params.gatekeeperSidecar == null then {} else params.gatekeeperSidecar.SidecarSpec.Ingress(params, params.gatekeeperSidecar.params),
|
||
|
|
cron: nskube.CronJob(params.name) {
|
||
|
|
metadata+: {
|
||
|
|
annotations+: linuxserverAnnotations(params),
|
||
|
|
labels+: linuxserverLabels(params),
|
||
|
|
},
|
||
|
|
spec+: {
|
||
|
|
schedule: params.schedule,
|
||
|
|
jobTemplate: {
|
||
|
|
spec: {
|
||
|
|
//strategy: kube.DeployUtil.SimpleRollingUpdate(),
|
||
|
|
//replicas: params.replicaCount,
|
||
|
|
template: {
|
||
|
|
metadata: {
|
||
|
|
labels+: linuxserverLabels(params) + selector(params),
|
||
|
|
annotations+: linuxserverAnnotations(params) {
|
||
|
|
"seccomp.security.alpha.kubernetes.io/pod": 'docker/default',
|
||
|
|
},
|
||
|
|
},
|
||
|
|
spec+: {
|
||
|
|
restartPolicy: "OnFailure",
|
||
|
|
nodeSelector: params.nodeSelector,
|
||
|
|
imagePullSecrets: [
|
||
|
|
{
|
||
|
|
name: imagePullSecret,
|
||
|
|
},
|
||
|
|
for imagePullSecret in params.imagePullSecrets],
|
||
|
|
containers: [
|
||
|
|
{
|
||
|
|
name: params.baseAppName,
|
||
|
|
image: images.Prod[params.imageName],
|
||
|
|
securityContext: {
|
||
|
|
privileged: params.isPrivileged,
|
||
|
|
capabilities: params.scCapabilities,
|
||
|
|
},
|
||
|
|
env: sequenceEnv(params.env),
|
||
|
|
command: params.command,
|
||
|
|
//command: ["/bin/sh"],
|
||
|
|
//args: ["-c", "sleep 3600"],
|
||
|
|
args: params.args,
|
||
|
|
ports: [
|
||
|
|
{
|
||
|
|
name: port.name,
|
||
|
|
containerPort: port.containerPort,
|
||
|
|
protocol: "TCP",
|
||
|
|
}
|
||
|
|
for port in params.ports
|
||
|
|
],
|
||
|
|
livenessProbe: params.livenessProbe,
|
||
|
|
readinessProbe: params.readinessProbe,
|
||
|
|
resources: params.resources,
|
||
|
|
volumeMounts: sequenceVolumeMounts(params.pvcs, params.secrets, params.configMaps, params.hostPaths, params.emptyDirs)
|
||
|
|
},
|
||
|
|
] + SidecarContainers(params),
|
||
|
|
volumes: sequenceVolumes(params.pvcs, params.secrets, params.configMaps, params.hostPaths, params.emptyDirs) + SidecarVolumes(params),
|
||
|
|
}
|
||
|
|
},
|
||
|
|
},
|
||
|
|
},
|
||
|
|
},
|
||
|
|
},
|
||
|
|
};
|
||
|
|
|
||
|
|
|
||
|
|
{
|
||
|
|
AppParams: AppParams,
|
||
|
|
App(params): App(params),
|
||
|
|
Cron(params): Cron(params),
|
||
|
|
Pvc: Pvc,
|
||
|
|
ConfigMap: ConfigMap,
|
||
|
|
ServiceAccount: ServiceAccount,
|
||
|
|
HostPath: HostPath,
|
||
|
|
EmptyDir: EmptyDir,
|
||
|
|
Secret: Secret,
|
||
|
|
Service: Service,
|
||
|
|
Env: Env,
|
||
|
|
}
|