Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

packer not taking kickstart file #18

Closed
Gokul-C opened this issue Sep 29, 2023 · 2 comments
Closed

packer not taking kickstart file #18

Gokul-C opened this issue Sep 29, 2023 · 2 comments

Comments

@Gokul-C
Copy link

Gokul-C commented Sep 29, 2023

Hi, I am trying to auto-install the ubuntu22.04 live server with packer hyperv-iso builder.
A few times it worked without any issues and now suddenly it stopped working and it is not taking the kickstart file and popping GUI to select the language. When Packer starts It automatically serves HTTP and after that, I checked in the browser with IP:PORT I can see the user-data and meta-data files present. I don't know what went wrong. Can you please help me here?

packer version 1.9.4

Operating system and Environment details
Windows 10

The variables file :

boot_command=["c<wait>","linux /casper/vmlinuz --- autoinstall ds=\"nocloud-net;seedfrom=http://{{.HTTPIP}}:{{.HTTPPort}}/\"","<enter><wait>","initrd /casper/initrd","<enter><wait>","boot","<enter>"]
disk_additional_size=["150000"]
disk_size="70000"
http_directory="cidata"
iso_checksum_type="sha256"
iso_checksum="a4acfda10b18da50e2ec50ccaf860d7f20b389df8765611142305c0e911d16fd"
iso_url="https://releases.ubuntu.com/jammy/ubuntu-22.04.3-live-server-amd64.iso"
output_directory="output-ubuntu2204"
#output_vagrant="./vbox/packer-ubuntu2204-g2.box"
provision_script_options="-z false -h true -p false"
ssh_password="ubuntu"
ssh_username="ubuntu"
switch_name="packer-hyperv-iso"
#vagrantfile_template="./vagrant/hv_ubuntu2204_g2.template"
vlan_id=""
vm_name="packer-ubuntu2204-g2"
uefi_file="./Boot-scripts/uefi.sh"
provision_file="./Boot-scripts/provision.sh"
motd_file="./Boot-scripts/motd.sh"
zeroing_file="./Boot-scripts/zeroing.sh"
neofetch_file="./Boot-scripts/prepare_neofetch.sh"

Template file:

variable "ansible_override" {
  type    = string
  default = ""
}

variable "boot_command" {
}

variable "disk_size" {
  type    = string
  default = "70000"
}

variable "disk_additional_size" {
  type    = list(number)
  default = ["1024"]
}

variable "memory" {
  type    = string
  default = "1024"
}

variable "cpus" {
  type    = string
  default = "1"
}

variable "iso_checksum" {
  type    = string
  default = ""
}

variable "iso_checksum_type" {
  type    = string
  default = "none"
}

variable "iso_url" {
  type    = string
  default = ""
}

variable "output_directory" {
  type    = string
  default = ""
}

variable "provision_script_options" {
  type    = string
  default = ""
}

variable "output_vagrant" {
  type    = string
  default = ""
}

variable "ssh_password" {
  type    = string
  default = ""
  sensitive = true
}

variable "switch_name" {
  type    = string
  default = ""
}

variable "vagrantfile_template" {
  type    = string
  default = ""
}

variable "vlan_id" {
  type    = string
  default = ""
}

variable "vm_name" {
  type    = string
  default = ""
}

variable "http_directory" {
  type    = string
  default = ""
}

variable "ssh_username" {
  type    = string
  default = "ubuntu"
}

variable  "uefi_file" {
  type    = string
  default = ""
}

variable  "provision_file" {
  type    = string
  default = ""
}

variable  "motd_file" {
  type    = string
  default = ""
}

variable  "neofetch_file" {
  type    = string
  default = ""
}

variable  "zeroing_file" {
  type    = string
  default = ""
}

source "hyperv-iso" "vm" {
  boot_command          = "${var.boot_command}"
  boot_wait             = "1s"
  communicator          = "ssh"
  cpus                  = "${var.cpus}"
  disk_block_size       = "1"
  disk_size             = "${var.disk_size}"
  enable_dynamic_memory = "true"
  enable_secure_boot    = false
  generation            = 2
  guest_additions_mode  = "disable"
  http_directory        = "${var.http_directory}"
  iso_checksum          = "${var.iso_checksum_type}:${var.iso_checksum}"
  iso_url               = "${var.iso_url}"
  memory                = "${var.memory}"
  output_directory      = "${var.output_directory}"
  shutdown_command      = "echo 'ubuntu' | sudo -S shutdown -P now"
  shutdown_timeout      = "30m"
  ssh_password          = "${var.ssh_password}"
  ssh_timeout           = "4h"
  ssh_username          = "${var.ssh_username}"
  switch_name           = "${var.switch_name}"
  temp_path             = "."
  vlan_id               = "${var.vlan_id}"
  vm_name               = "${var.vm_name}"
}

build {
  sources = ["source.hyperv-iso.vm"]

  
  provisioner "file" {
    destination = "/tmp/uefi.sh"
    source      = "${var.uefi_file}"
  }

  provisioner "file" {
    destination = "/tmp/provision.sh"
    source      = "${var.provision_file}"
  }

  provisioner "file" {
    destination = "/tmp/scvmmguestagent.1.0.3.1028.x64.tar"
    source      = "./Boot-scripts/scvmmguestagent.1.0.3.1028.x64.tar"
  }

  provisioner "file" {
    destination = "/tmp/install"
    source      = "./Boot-scripts/install.sh"
  }

  provisioner "shell" {
    execute_command   = "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'"
    expect_disconnect = true
    inline            = ["chmod +x /tmp/provision.sh", "chmod +x /tmp/uefi.sh", "mv /tmp/uefi.sh /usr/local/bin/uefi.sh", "/tmp/provision.sh ${var.provision_script_options}", "sync;sync;reboot"]
    inline_shebang    = "/bin/sh -x"
  }

  provisioner "file" {
    destination = "/tmp/motd.sh"
    source      = "${var.motd_file}"
  }

  provisioner "file" {
    destination = "/tmp/prepare_neofetch.sh"
    source      = "${var.neofetch_file}"
  }

  provisioner "file" {
    destination = "/tmp/zeroing.sh"
    source      = "${var.zeroing_file}"
  }

  provisioner "shell" {
    execute_command = "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'"
    inline          = ["echo Last Phase",
    "chmod +x /tmp/prepare_neofetch.sh",
    "chmod +x /usr/local/bin/uefi.sh",
    "chmod +x /tmp/zeroing.sh",
    "/tmp/prepare_neofetch.sh",
    "/tmp/zeroing.sh",
    "/bin/rm -rfv /tmp/*",
    "/bin/rm -f /etc/ssh/*key*",
    "/usr/bin/ssh-keygen -A",
    "echo 'packerVersion: ${packer.version}' >>/etc/packerinfo"]
    inline_shebang  = "/bin/sh -x"
  }
  provisioner "shell" {
    execute_command = "echo '${var.ssh_password}' | {{.Vars}} sudo -S -E bash '{{.Path}}'"
    script = "./Shell-Scripts/azure.sh"
  }
  provisioner "shell" {
    execute_command = "echo '${var.ssh_password}' | {{.Vars}} sudo -S -E bash '{{.Path}}'"
    script = "./Shell-Scripts/cleanup.sh"
  }
  

}

Kickstart/cloud-init file:

#cloud-config
autoinstall:
  version: 1
  indentity:
    hostname: ubuntu2204
    password: $6$rounds=4096$JFVw4zdHh/ahPcjO$NFG4XVjVbcB7kX9KR7nf9zSXTHZnfVqztfC3wPQJbR6tLkFmxEEKuk7lr8Kp6p7z6cdb9qsqINnEtteJYk5h21
    username: ubuntu
  early-commands:
    - systemctl stop ssh # otherwise packer tries to connect and exceed max attempts
  network:
    network:
      version: 2
      ethernets:
        eth0:
          dhcp4: yes
          dhcp-identifier: mac
  update: no
  apt:
    geoip: true
    preserve_sources_list: false
    primary:
      - arches: [amd64]
        uri: "http://archive.ubuntu.com/ubuntu/"
  packages:
    - mc
    - curl
    - wget
    - sudo
    - tar
    - bzip2
    - build-essential
    - linux-image-virtual
    - linux-tools-virtual
#    - linux-cloud-tools-virtual
    - net-tools
    - qemu-guest-agent
  locale: en_US.UTF-8
  timezone: Europe/Copenhagen
  ssh:
    install-server: true
    allow-pw: true
  user-data:
    disable_root: false
    lock-passwd: false
    ssh_pwauth: true
    hostname: ubuntu2204
    password: $6$rounds=4096$JFVw4zdHh/ahPcjO$NFG4XVjVbcB7kX9KR7nf9zSXTHZnfVqztfC3wPQJbR6tLkFmxEEKuk7lr8Kp6p7z6cdb9qsqINnEtteJYk5h21
    username: ubuntu
  late-commands:
    #- echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > /target/etc/sudoers.d/ubuntu
    - sed -ie 's/GRUB_CMDLINE_LINUX=.*/GRUB_CMDLINE_LINUX="net.ifnames=0 ipv6.disable=1 biosdevname=0 elevator=noop"/' /target/etc/default/grub
    - sed -ie 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /target/etc/ssh/sshd_config
    - sed -ie 's/#PubkeyAuthentication yes/PubkeyAuthentication yes/' /target/etc/ssh/sshd_config
    - sed -ie 's/#AllowAgentForwarding yes/AllowAgentForwarding yes/' /target/etc/ssh/sshd_config
    - curtin in-target -- update-grub2
    # virtual package is required for some cloud images. Lack of it causes failure in communication.
    - curtin in-target -- apt-get -y install linux-cloud-tools-virtual||true
    - curtin in-target -- systemctl enable qemu-guest-agent
storage:
  config:
    - type: disk
      id: disk0
      ptable: gpt
      wipe: superblock
      grub_device: true
      match:
        size: largest
    - id: efi-partition # create partitions on disk (like sda1)
      type: partition
      device: disk0
      size: 512MB
      flag: boot # EFI system partition needs boot flag
    - type: partition
      id: boot-partition
      device: disk0
      size: 2GB
    - type: partition
      device: disk0
      id: root-partition
      size: -1
    - id: efi-partition-fs # format partitions on disk
      type: format
      volume: efi-partition
      fstype: fat32
      label: EFI
    - id: boot-partition-fs
      type: format
      fstype: ext4
      volume: boot-partition
    - id: root-partition-fs
      type: format
      fstype: ext4
      volume: root-partition
    - id: efi-partition-fs-mount # mount partitions
      type: mount
      device: efi-partition-fs
      path: /boot/efi
    - id: root-partition-fs-mount
      type: mount
      path: /
      device: root-partition-fs
    - id: boot-partition-fs-mount
      type: mount
      path: /boot
      device: boot-partition-fs
@marcinbojko
Copy link
Owner

Could you check with latest release?

@Souldiv
Copy link

Souldiv commented Nov 17, 2023

I had the same issue. My issue was because of the fact that I was running packer in wsl and when I ran packer build it started a http server in the wsl network which wasn't accessible by the hyper-v packer vm. I see that you have used a custom switch, if this is internal it might work as you might not have enabled forwarding which restricts access between networks. My suggestion is to try using Default Switch instead. Check if the IP:PORT is accessible from the network where the packer VM is created!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants