commit a68a4b1f9a8ea05ddee16dd398bfbbefa44a94a2
Author: Andreas Gruhler <andreas.gruhler@adfinis-sygroup.ch>
Date: Tue, 13 Aug 2019 21:51:14 +0200
init
Diffstat:
11 files changed, 838 insertions(+), 0 deletions(-)
diff --git a/.gitignore b/.gitignore
@@ -0,0 +1,8 @@
+**/*_override.tf
+.env
+.terraform
+terraform.tfstate*
+inventory
+id_rsa
+ssh_config
+qemu-config.yml
diff --git a/Readme.md b/Readme.md
@@ -0,0 +1,79 @@
+# Proxmox VE (PVE) Terraform and Ansible Workflow
+
+## 1 Preparation: Unicast MAC Address and TF Files
+Generate a unicast MAC foreach VM:
+```
+network {
+ ..
+ macaddr = "A2:15:1C:5C:D8:31"
+ ..
+}
+```
+
+For instance, use a bash script to do so:
+```
+#!/bin/sh
+function macaddr() {
+ echo $RANDOM | md5sum | sed 's/^\(..\)\(..\)\(..\)\(..\)\(..\).*$/02:\1:\2:\3:\4:\5/'
+}
+```
+
+Also, make any changes to the `.tf` files for your infrastructure.
+
+## 2 Run Terraform
+
+Run Terraform:
+```
+terraform plan
+terraform apply
+```
+
+## 3 Terraform Outputs
+
+Save following outputs:
+```
+terraform output inventory > inventory
+terraform output qemu_config > qemu-config.yml
+
+# inspect the name of the key file, see instructions below
+terraform output ssh_keyfile
+```
+
+Adjust variables in `group_vars/all.yml`:
+* `ssh.identity_file`: Output of `terraform output ssh_keyfile`
+* also, set `ssh.proxy_jump` and `user` if needed
+* make sure `pve_api` points to your compiled pve api binary (https://github.com/Telmate/proxmox-api-go)
+
+Make sure that `id_rsa` matches both:
+1. the name of `identity_file` in the file `ssh_config`
+2. and `terraform output ssh_keyfile`
+
+## 4 Ansible
+
+Run playbook:
+```
+ansible-playbook playbook.yml -i inventory
+```
+
+## 5 Troubleshooting, Tips & Tricks
+
+### 5.1 Recreate
+To recreate some of the infra:
+```
+terraform apply -target="proxmox_vm_qemu.mongodb2" -target="proxmox_vm_qemu.mongodb1"
+```
+
+### 5.2 Delete
+To delete some of the the infra:
+```
+~/gocode/src/github.com/Telmate/proxmox-api-go/proxmox-api-go destroy 116
+terraform refresh
+```
+
+### 5.3 Retrive private key without running Terraform
+If needed, retrieve the SSH key (again) without re-applying changes:
+```
+terraform output ssh_key > id_rsa
+```
+
+Terraform takes care of writing this private key file the first time you run `terraform apply`, however, you might want to retrieve the key again without re-running Terraform.
+\ No newline at end of file
diff --git a/defaults/all.yml b/defaults/all.yml
@@ -0,0 +1,8 @@
+---
+ pve_api: 'proxmox-api-go'
+
+ ssh:
+ user: root
+ identity_file: '~/ssh/id_rsa'
+ proxy_jump: proxyhost
+ include_config: '~/.ssh/config'
+\ No newline at end of file
diff --git a/defaults/qemu.yml b/defaults/qemu.yml
@@ -0,0 +1,2 @@
+---
+ansible_ssh_common_args: "-F ssh_config"
diff --git a/get-ips.yml b/get-ips.yml
@@ -0,0 +1,26 @@
+---
+
+- name: remove vm config from global qemu config
+ set_fact:
+ qemu_config: '{{ qemu_config | difference([item]) }}'
+
+- name: get qemu network interfaces from Proxmox api
+ command: '{{ pve_api }} getNetworkInterfaces {{ item.id }}'
+ register: network_interfaces
+ check_mode: no
+
+- name: extract public interface
+ set_fact: network_interfaces_list='{{ network_interfaces.stdout | from_json }}'
+
+- debug: var=network_interfaces_list
+
+- name: extract ip4 from public/second interface
+ set_fact: ip4_addr='{{ network_interfaces_list[1]["IPAddresses"][0] }}'
+
+- name: combine ip4 address with vm config item
+ set_fact:
+ item_with_ip: '{{ item | combine({"ip4": ip4_addr}) }}'
+
+- name: append item to global qemu config
+ set_fact:
+ qemu_config: '{{ qemu_config + [item_with_ip] }}'
+\ No newline at end of file
diff --git a/group_vars/all.yml b/group_vars/all.yml
@@ -0,0 +1,8 @@
+---
+ pve_api: '/home/andi/gocode/src/github.com/Telmate/proxmox-api-go/proxmox-api-go'
+
+ ssh:
+ user: root
+ identity_file: './id_rsa'
+ proxy_jump: proxmox
+ include_config: '~/.ssh/config'
+\ No newline at end of file
diff --git a/inventory.tpl b/inventory.tpl
@@ -0,0 +1,7 @@
+[local]
+localhost ansible_connection=local
+
+[qemu]
+%{ for host in hosts ~}
+${host.fqdn}
+%{ endfor ~}
+\ No newline at end of file
diff --git a/playbook.yml b/playbook.yml
@@ -0,0 +1,54 @@
+---
+
+- hosts: local
+ vars:
+ qemu_config: "{{ lookup('file', 'qemu-config.yml') | from_yaml }}"
+ tasks:
+
+ - name: get ips for invenotry hostnames
+ include_tasks: 'get-ips.yml'
+ loop_control:
+ index_var: i
+ loop: '{{ qemu_config }}'
+
+ - name: copy ip4 adress to qemu config file
+ copy:
+ content: '{{ qemu_config | to_nice_yaml }}'
+ dest: 'qemu-config.yml'
+
+ - name: create ssh config
+ template:
+ src: 'templates/ssh_config.j2'
+ dest: 'ssh_config'
+
+ - name: create group_vars folder
+ file:
+ path: 'group_vars'
+ state: directory
+
+ - name: add ssh_config to qemu vm group_vars
+ copy:
+ content: |
+ ---
+ ansible_ssh_common_args: "-F ssh_config"
+ dest: 'group_vars/qemu.yml'
+
+- hosts: qemu
+ tasks:
+ - name: set hostname
+ command: 'hostnamectl set-hostname {{ inventory_hostname }}'
+ register: hostname_update
+
+ - name: restart network to register hostname on dns server
+ service:
+ name: network
+ state: restarted
+ when: hostname_update.changed
+
+ - name: set ssh private key
+ copy:
+ src: '{{ ssh.identity_file }}'
+ dest: '/root/.ssh/id_rsa'
+ owner: '{{ ssh.user }}'
+ group: '{{ ssh.user }}'
+ mode: '0600'
+\ No newline at end of file
diff --git a/qemu-config.yml.tpl b/qemu-config.yml.tpl
@@ -0,0 +1,12 @@
+---
+
+# qemu_hosts:
+# %{ for host in hosts ~}
+# - fqdn: ${host.fqdn}
+# id: ${split("/",host.id)[2]}
+# %{ endfor ~}
+
+%{ for host in hosts ~}
+- fqdn: ${host.fqdn}
+ id: ${split("/",host.id)[2]}
+%{ endfor ~}
+\ No newline at end of file
diff --git a/templates/ssh_config.j2 b/templates/ssh_config.j2
@@ -0,0 +1,17 @@
+{% if ssh.include_config | default(false, true) %}
+Include {{ ssh.include_config }}
+{% endif %}
+
+{% for host in qemu_config %}
+Host {{ host.fqdn }}
+ HostName {{ host.ip4 }}
+{% if ssh.proxy_jump | default(false, true) %}
+ ProxyJump {{ ssh.proxy_jump }}
+{% endif %}
+{% if ssh.identity_file | default(false, true) %}
+ IdentityFile {{ ssh.identity_file }}
+{% endif %}
+{% if ssh.user | default(false, true) %}
+ User {{ ssh.user }}
+{% endif %}
+{% endfor %}
+\ No newline at end of file
diff --git a/vms.tf b/vms.tf
@@ -0,0 +1,608 @@
+# variable "mac_id" {
+# default = "0"
+# }
+
+# resource "random_id" "macaddr" {
+# keepers = {
+# # Generate a new id each time we switch to a new MAC id
+# mac_id = "${var.mac_id}"
+# }
+# byte_length = 1
+# }
+
+# network {
+# # Read the MAC id "through" the mac_id resource to ensure that
+# # both will change together.
+# # macaddr = "${random_id.macaddr.hex}"
+# }
+
+
+
+# Can we use "count" for multiple instancances and seed the Mac addr
+# from a pool of mac addresses ?
+
+
+resource "tls_private_key" "id_rsa" {
+ algorithm = "RSA"
+}
+
+resource "local_file" "ssh_key" {
+ sensitive_content = "${tls_private_key.id_rsa.private_key_pem}"
+ filename = "${path.module}/id_rsa"
+ provisioner "local-exec" {
+ command = "chmod 600 ${path.module}/id_rsa"
+ }
+}
+
+resource "proxmox_vm_qemu" "bastion0" {
+ name = "bastion0"
+ desc = "Dacadoo jumphost"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:6f:c0:8f:1e:4a"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+
+ # provisioner "file" {
+ # content = "${tls_private_key.id_rsa.private_key_pem}"
+ # destination = "/home/${self.ciuser}/.ssh/id_rsa"
+ # }
+
+ # provisioner "remote-exec" {
+ # inline = [
+ # "chmod 600 /home/${self.ciuser}/.ssh/id_rsa"
+ # ]
+ # }
+}
+
+resource "proxmox_vm_qemu" "elastic0" {
+ name = "elastic0"
+ desc = "Elasticsearch with Logstash and Kibana"
+
+ cores = 2
+ sockets = 2
+ memory = 4096
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 100
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:0e:1e:34:c7:23"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "kubernetes0" {
+ name = "kubernetes0"
+ desc = "Kubernets master"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:0c:21:fc:c7:9b"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "kubernetes1" {
+ name = "kubernetes1"
+ desc = "Kubernets worker"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:4f:82:ed:7a:27"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "mongodb0" {
+ name = "mongodb0"
+ desc = "MongoDB Node 0"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:d1:b5:ed:45:4f"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "mongodb1" {
+ name = "mongodb1"
+ desc = "MongoDB Node 1"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:f5:99:e8:9c:5c"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "mongodb2" {
+ name = "mongodb2"
+ desc = "MongoDB Node 2"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:94:38:0f:05:3d"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "consul0" {
+ name = "consul0"
+ desc = "Consul Node 0"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:65:50:da:ae:af"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "consul1" {
+ name = "consul1"
+ desc = "Consul Node 1"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:7a:5b:7a:25:f3"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "consul2" {
+ name = "consul2"
+ desc = "Consul Node 2"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:20:34:63:b8:1e"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "vault0" {
+ name = "vault0"
+ desc = "Vault Node 0"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:05:4e:c7:b6:9d"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "vault1" {
+ name = "vault1"
+ desc = "Vault Node 1"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:79:99:60:ba:ab"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+resource "proxmox_vm_qemu" "vault2" {
+ name = "vault2"
+ desc = "Vault Node 2"
+
+ cores = 2
+ sockets = 2
+ memory = 2048
+
+ bootdisk = "scsi0"
+ scsihw = "virtio-scsi-pci"
+ disk {
+ id = 0
+ size = 40
+ type = "scsi"
+ storage = "local-lvm"
+ storage_type = "lvm"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr0"
+ macaddr = "02:c2:ea:eb:f0:b9"
+ }
+
+ target_node = "wolke4"
+ pool = "dacadoo"
+ clone = "CentOS7-GenericCloud"
+ agent = 1
+
+ os_type = "cloud-init"
+ ipconfig0 = "ip=dhcp"
+ ciuser = "root"
+ cipassword = "root"
+ sshkeys = "${tls_private_key.id_rsa.public_key_openssh}"
+}
+
+# locals {
+# ids = "[]"
+# }
+
+# data "templatefile"${file("${path.module}/inventory.tpl")}"
+# vars = {
+# ids =
+# }
+# }inventory_hostname
+
+
+
+# resource "null_resource" "update_inventory" {
+
+# triggers = {
+# template = "${data.template_file.inventory.rendered}"
+# }
+
+# provisioner "local-exec" {
+# command = "echo '${templatefile("${path.module}/inventory.tpl", { ids = [proxmox_vm_qemu.bastion0.id, proxmox_vm_qemu.elastic0.id] })}' > inventory"
+# }
+# }
+
+locals {
+ qemu_hosts = [
+ {
+ id: proxmox_vm_qemu.bastion0.id,
+ fqdn: proxmox_vm_qemu.bastion0.name },
+ {
+ id: proxmox_vm_qemu.elastic0.id,
+ fqdn: proxmox_vm_qemu.elastic0.name },
+ {
+ id: proxmox_vm_qemu.kubernetes0.id,
+ fqdn: proxmox_vm_qemu.kubernetes0.name },
+ {
+ id: proxmox_vm_qemu.kubernetes1.id,
+ fqdn: proxmox_vm_qemu.kubernetes1.name },
+ {
+ id: proxmox_vm_qemu.mongodb0.id,
+ fqdn: proxmox_vm_qemu.mongodb0.name },
+ {
+ id: proxmox_vm_qemu.mongodb1.id,
+ fqdn: proxmox_vm_qemu.mongodb1.name },
+ {
+ id: proxmox_vm_qemu.mongodb2.id,
+ fqdn: proxmox_vm_qemu.mongodb2.name },
+ {
+ id: proxmox_vm_qemu.consul0.id,
+ fqdn: proxmox_vm_qemu.consul0.name },
+ {
+ id: proxmox_vm_qemu.consul1.id,
+ fqdn: proxmox_vm_qemu.consul1.name },
+ {
+ id: proxmox_vm_qemu.consul2.id,
+ fqdn: proxmox_vm_qemu.consul2.name },
+ {
+ id: proxmox_vm_qemu.vault0.id,
+ fqdn: proxmox_vm_qemu.vault0.name },
+ {
+ id: proxmox_vm_qemu.vault1.id,
+ fqdn: proxmox_vm_qemu.vault1.name },
+ ]
+}
+
+output "inventory" {
+ value = "${templatefile("${path.module}/inventory.tpl", { hosts = local.qemu_hosts })}"
+}
+
+output "qemu_config" {
+ value = "${templatefile("${path.module}/qemu-config.yml.tpl", { hosts = local.qemu_hosts })}"
+}
+
+output "ssh_key" {
+ value = "${tls_private_key.id_rsa.private_key_pem}"
+ sensitive = true
+}
+
+output "ssh_keyfile" {
+ value = "${local_file.ssh_key.filename}"
+}
+\ No newline at end of file