Created
February 10, 2016 19:23
-
-
Save infernix/0377af0bc9012e3d5e5e to your computer and use it in GitHub Desktop.
Create a VMware VCSA appliance under KVM with libvirt and ansible
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- | |
- name: Generate a KVM enabled VMWare VCSA VM under libvirt | |
hosts: 127.0.0.1 | |
connection: local | |
vars: | |
- reqpkgs_apt: | |
- bsdtar | |
- libguestfs-tools | |
- qemu-utils | |
- virtinst | |
- reqpkgs_yum: | |
- bsdtar | |
- libguestfs-tools | |
- qemu-img | |
- virt-install | |
- vcsaiso: 'VMware-VCSA-all-6.0.0-3040890.iso' | |
- vcsasettings: '{ | |
"deployment.node.type": "embedded", | |
"appliance.ssh.enabled": "True", | |
"appliance.root.passwd": "VMWare123!", | |
"vmdir.domain-name": "vsphere.local", | |
"vmdir.site-name": "vagrant", | |
"vmdir.password": "VMWare123!", | |
"appliance.net.addr.family": "ipv4", | |
"appliance.net.addr": "10.99.99.254", | |
"appliance.net.pnid": "10.99.99.254", | |
"appliance.net.prefix": "24", | |
"appliance.net.mode": "static", | |
"appliance.net.dns.servers": "10.99.99.1", | |
"appliance.net.gateway": "10.99.99.1" | |
}' | |
- libvirt: | |
name: 'vcsa' | |
pool: 'vagrant' | |
network: 'vagrant-libvirt' | |
- disks: | |
disk1: 12288 | |
disk2: 1420 | |
disk3: 25600 | |
disk4: 25600 | |
disk5: 10240 | |
disk6: 10240 | |
disk7: 5120 | |
disk8: 10240 | |
disk9: 1024 | |
disk10: 10240 | |
disk11: 5120 | |
tasks: | |
- name: Ensure required packages are installed (Debian) | |
apt: pkg={{item}} state=present | |
become_user: root | |
with_items: reqpkgs_apt | |
when: ansible_pkg_mgr == "apt" | |
- name: Ensure libguestfs packages are installed (CentOS) | |
apt: pkg={{item}} state=present | |
become_user: root | |
with_items: reqpkgs_yum | |
when: ansible_pkg_mgr == "yum" | |
- name: Download VMWare VCSA iso to /tmp/ | |
get_url: url=https://localhost/vagrant/{{ vcsaiso }} | |
dest=/tmp/{{ vcsaiso }} | |
force=no | |
- name: Extract vcsa/vmware-vcsa to stdout and untar *vmdk directly into /tmp | |
shell: bsdtar -xvOf /tmp/{{ vcsaiso }} vcsa/vmware-vcsa | tar xv -C /tmp/ -xvf - | |
- name: Convert VMware-vCenter-Server-Appliance-6.0.0.10000-3018521_OVF10-disk1.vmdk to vcsa6-disk1.qcow2 | |
shell: qemu-img convert -O qcow2 /tmp/VMware-vCenter-Server-Appliance-6.0.0.10000-3018521_OVF10-disk1.vmdk /tmp/{{libvirt.name}}-disk1.qcow2 | |
- name: Convert VMware-vCenter-Server-Appliance-6.0.0.10000-3018521_OVF10-disk2.vmdk to vcsa6-disk2.qcow2 | |
shell: qemu-img convert -O qcow2 /tmp/VMware-vCenter-Server-Appliance-6.0.0.10000-3018521_OVF10-disk2.vmdk /tmp/{{libvirt.name}}-disk2.qcow2 | |
- name: Generate a vcsa settings.json file | |
copy: content='{{vcsasettings}}' dest=/tmp/settings.json | |
tags: json | |
- name: Update the initrd inside the VCSA OS disk (-A is overkill but will work) and inject settings.json | |
shell: 'guestfish add /tmp/{{libvirt.name}}-disk1.qcow2 : run : mount /dev/sda3 / : mount /dev/sda1 /boot : command "sed -i s/mptspi/ahci/ /etc/sysconfig/kernel" : command "sed -i s/vmxnet/e1000/ /etc/sysconfig/kernel" : command "sed -i s/ata_piix/libata/ /etc/sysconfig/kernel" : command "mkinitrd" : mkdir /var/install : copy-in /tmp/settings.json /var/install' | |
tags: guestfish | |
- name: Destroy the existing VCSA VM | |
shell: virsh -c qemu:///system destroy {{libvirt.name}} | |
ignore_errors: true | |
tags: libvirt | |
- name: Undefine the existing VCSA VM | |
shell: virsh -c qemu:///system undefine {{libvirt.name}} | |
ignore_errors: true | |
tags: libvirt | |
- name: Delete any existing volumes for the VCSA VM in the storage pool | |
shell: virsh -c qemu:///system vol-delete --pool {{libvirt.pool}} {{libvirt.name}}-disk{{item}} | |
with_sequence: count=11 | |
ignore_errors: true | |
tags: libvirt | |
- name: Create empty disk images | |
shell: virsh -c qemu:///system vol-create-as {{libvirt.pool}} {{libvirt.name}}-{{item.key}} {{item.value}}M --format qcow2 | |
with_dict: disks | |
tags: libvirt | |
- name: Import disk1 into the libvirt pool | |
shell: virsh -c qemu:///system vol-upload --pool {{libvirt.pool}} {{libvirt.name}}-disk1 /tmp/{{libvirt.name}}-disk1.qcow2 | |
tags: libvirt | |
- name: Import disk2 into the libvirt pool | |
shell: virsh -c qemu:///system vol-upload --pool {{libvirt.pool}} {{libvirt.name}}-disk2 /tmp/{{libvirt.name}}-disk2.qcow2 | |
tags: libvirt | |
- name: Create the VCSA VM | |
shell: virt-install --connect qemu:///system -n vcsa --ram 8192 --vcpus 2 --cpu host-passthrough --import --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk1,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk2,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk3,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk4,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk5,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk6,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk7,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk8,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk9,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk10,bus=sata --disk vol={{libvirt.pool}}/{{libvirt.name}}-disk11,bus=sata --os-variant sles11 --network model=e1000,network={{libvirt.network}} --wait 0 | |
tags: libvirt |
That's SUSE's hypervisor compliance checker. To disable it, login to the VCSA and run:
chkconfig --set boot.compliance off
just wanted to say, this was suuuper helpful!
while i am not using ansible directly, i was able to use the steps here as a starting point and adapt the workflow to be able to bring-up VCSA v7.0.3 in libvirt/KVM (on AlmaLinux 8.10)..
there were some slight things i had to adapt (namely, the guestfish commands; the filesystem layout changed since this ansible workbook was created in the newer version of the VCSA).
I've created a separate full-length gist describing my entire workflow.. thanks again!!
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hi @infernix, Do you know if it is possible to disable the splashscreen that appears during the early installation phase and says smth about "detected KVM hypervisor instead of ESXi, please confirm by a press of a button"?