+ echo '====== automated-ec2-builds [/tmp/automated-ec2-builds] ======' ====== automated-ec2-builds [/tmp/automated-ec2-builds] ====== + cd /tmp/automated-ec2-builds + bzr info Standalone tree (format: 2a) Location: branch root: . Related branches: parent branch: http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/automated-ec2-builds/ + bzr version-info revision-id: ben.howard@canonical.com-20150406195524-oz463net2e0pd6n6 date: 2015-04-06 13:55:24 -0600 build-date: 2015-04-08 21:54:22 +0000 revno: 662 branch-nick: automated-ec2-builds + bzr log -p -r-1 ------------------------------------------------------------ revno: 662 [merge] committer: Ben Howard branch nick: auto.merge timestamp: Mon 2015-04-06 13:55:24 -0600 message: Build OVA images suitable for import into VMware, Virtualbox, Citrix, etc diff: === modified file 'build-ec2-image' --- build-ec2-image 2015-03-30 15:30:22 +0000 +++ build-ec2-image 2015-04-06 19:55:24 +0000 @@ -89,6 +89,8 @@ floppy="" ovf_create=0 ovf_cmd=( ) +ova_create=0 +ova_cmd=( ) package_img_tgz=1 create_root_tgz=0 purge_roottgz_pkgs=( ) @@ -118,8 +120,8 @@ unpacked_d="${pub_d}/unpacked" mkdir -p "${unpacked_d}" || fail "failed to make ${unpacked_d}" ovf_d="${pub_d}" -if [ ${ovf_create:-0} -ne 0 ]; then - mkdir -p "${ovf_d}" || fail "failed to make ovf dir ${ovf_d}" +if [ ${ovf_create:-0} -ne 0 -o ${ova_create:-0} -ne 1 ]; then + mkdir -p "${ovf_d}" || fail "failed to make ovf/ova dir ${ovf_d}" fi for arch in ${arches} @@ -328,7 +330,7 @@ disk_img="${pub_d}/${img_base}-filesystem.img" if [ "${qcow2_create:-0}" -ne 0 -o "${vmdk_create:-0}" -ne 0 -o \ "${ovf_create:-0}" -ne 0 -o "${raw_img:-0}" -ne 0 -o \ - "${qemu_img:-0}" -ne 0 ]; then + "${qemu_img:-0}" -ne 0 -o "${ova_create:-0}" ]; then debug "creating raw partitioned disk image" part2disk_opts="-v" [ "${nogrub:-0}" -ne 1 ] && part2disk_opts="${part2disk_opts} -G" @@ -444,16 +446,35 @@ [ "${raw_disk:-0}" -ne 0 ] || { [ -e "${gpt_img}" ] && rm ${gpt_img}; } fi + # create the OVA container + if [ "${ova_create:-0}" -eq 1 ]; then + prefix="${img_base}" + debug "Creating OVA for ${disk_name}" + debug " OVA Filename: ${ovf_d}/${prefix}.ova" + cmd=( "${ova_cmd[@]}" ) + cmd=( "${cmd[@]//%d/${base_d}}" ) + cmd=( "${cmd[@]//%o/${ovf_d}}" ) + cmd=( "${cmd[@]//%p/${prefix}}" ) + cmd=( "${cmd[@]//%r/${disk_img}}" ) + cmd=( "${cmd[@]//%s/${img_size}}" ) + + debug "creating ova with ${cmd[@]}" + "${cmd[@]}" || fail "failed to run ova_cmd: ${cmd[@]}" + sudo chown -R "$(id -u):$(id -g)" "${ovf_d}" + chmod go+r "${ovf_d}/"* + fi + # create OVF containers for disk_type in ${ovf_disks}; do - [ "${ovf_default:-vmdk}" = "${disk_type}" ] && prefix="${img_base}" || prefix="${img_base}-${disk_type}" + [ "${ovf_default:-vmdk}" = "${disk_type}" ] && + prefix="${img_base}" || + prefix="${img_base}-${disk_type}" disk_name="${img_base}-disk1.${disk_type}" debug "Creating OVF for ${disk_type} disk: ${disk_name}" debug " OVF Filename: ${ovf_d}/${prefix}.ovf" cmd=( "${ovf_cmd[@]}" ) - # %o -> outputdir, %i -> input image, %d -> base_dir, %p -> prefix, %r -> disk image file cmd=( "${cmd[@]//%o/${ovf_d}}" ) cmd=( "${cmd[@]//%i/${unpacked_d}/${image}}" ) cmd=( "${cmd[@]//%d/${base_d}}" ) @@ -461,9 +482,9 @@ cmd=( "${cmd[@]//%r/${disk_name}}" ) cmd=( "${cmd[@]//%s/${img_size}}" ) cmd=( "${cmd[@]//%t/${disk_type}}" ) + debug "creating ovf with ${cmd[@]}" - "${cmd[@]}" || - fail "failed to run ovf_cmd: ${cmd[@]}" + "${cmd[@]}" || fail "failed to run ovf_cmd: ${cmd[@]}" sudo chown -R "$(id -u):$(id -g)" "${ovf_d}" chmod go+r "${ovf_d}/"* done === modified file 'conf/trusty-server.conf' --- conf/trusty-server.conf 2014-02-17 19:47:30 +0000 +++ conf/trusty-server.conf 2015-04-06 16:36:49 +0000 @@ -3,6 +3,7 @@ copy_out_kernels=1 ovf_cmd=( sudo "%d/ovf/diskimg2ovf" --conf=trusty-server.conf --template=ubuntu-ovf-v1-%t.tmpl "--prefix=%p" "--disk=%r" "--size=%s" "--out=%o" ) +ova_cmd=( sudo "%d/ovf/diskimg2ova" "--prefix=%p" "--disk=%r" "--size=10G" "--out=%o" ) lb_conf=( sudo "LB_BASE=%L" "PATH=%x" "%P/lb_config" "--distribution=%d" "--mode=ubuntu" "--build-with-chroot=true" "--chroot-filesystem=plain" "--system=normal" "--package-list=ubuntu-cloud-t" "--hdd-fs-label=%l" "--hdd-new-fstab" "--root-mount-by=LABEL" "--binary-images=binary-hdd-raw" "--bootloader=none" "--hdd-size=%s" "--binary-filesystem=ext4" @@ -30,6 +31,7 @@ qcow2_uefi_create=1 vmdk_uefi_create=0 ovf_default=qcow2 +ova_create=1 img_size=2G create_root_tgz=1 purge_roottgz_pkgs=( "linux-*" "grub-*" ) === modified file 'conf/utopic-server.conf' --- conf/utopic-server.conf 2014-04-24 16:29:40 +0000 +++ conf/utopic-server.conf 2015-04-06 16:36:49 +0000 @@ -3,6 +3,7 @@ copy_out_kernels=1 ovf_cmd=( sudo "%d/ovf/diskimg2ovf" --conf=utopic-server.conf --template=ubuntu-ovf-v1-%t.tmpl "--prefix=%p" "--disk=%r" "--size=%s" "--out=%o" ) +ova_cmd=( sudo "%d/ovf/diskimg2ova" "--prefix=%p" "--disk=%r" "--size=10G" "--out=%o" ) lb_conf=( sudo "LB_BASE=%L" "PATH=%x" "%P/lb_config" "--distribution=%d" "--mode=ubuntu" "--build-with-chroot=true" "--chroot-filesystem=plain" "--system=normal" "--package-list=ubuntu-cloud-t" "--hdd-fs-label=%l" "--hdd-new-fstab" "--root-mount-by=LABEL" "--binary-images=binary-hdd-raw" "--bootloader=none" "--hdd-size=%s" "--binary-filesystem=ext4" @@ -30,6 +31,7 @@ qcow2_uefi_create=1 vmdk_uefi_create=0 ovf_default=qcow2 +ova_create=1 img_size=2G create_root_tgz=1 purge_roottgz_pkgs=( "linux-*" "grub-*" ) === modified file 'conf/vivid-server.conf' --- conf/vivid-server.conf 2014-10-31 14:03:07 +0000 +++ conf/vivid-server.conf 2015-04-06 16:36:49 +0000 @@ -3,6 +3,7 @@ copy_out_kernels=1 ovf_cmd=( sudo "%d/ovf/diskimg2ovf" --conf=vivid-server.conf --template=ubuntu-ovf-v1-%t.tmpl "--prefix=%p" "--disk=%r" "--size=%s" "--out=%o" ) +ova_cmd=( sudo "%d/ovf/diskimg2ova" "--prefix=%p" "--disk=%r" "--size=10G" "--out=%o" ) lb_conf=( sudo "LB_BASE=%L" "PATH=%x" "%P/lb_config" "--distribution=%d" "--mode=ubuntu" "--build-with-chroot=true" "--chroot-filesystem=plain" "--system=normal" "--package-list=ubuntu-cloud-t" "--hdd-fs-label=%l" "--hdd-new-fstab" "--root-mount-by=LABEL" "--binary-images=binary-hdd-raw" "--bootloader=none" "--hdd-size=%s" "--binary-filesystem=ext4" @@ -30,6 +31,7 @@ qcow2_uefi_create=1 vmdk_uefi_create=0 ovf_default=qcow2 +ova_create=1 img_size=2G create_root_tgz=1 purge_roottgz_pkgs=( "linux-*" "grub-*" ) === added file 'ovf/VMDKstream.py' --- ovf/VMDKstream.py 1970-01-01 00:00:00 +0000 +++ ovf/VMDKstream.py 2015-04-06 16:36:49 +0000 @@ -0,0 +1,315 @@ +#!/usr/bin/env python +# encoding: utf-8 + +# Copyright (C) 2011 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, +# MA 02110-1301, USA. A copy of the GNU General Public License is +# also available at http://www.gnu.org/copyleft/gpl.html. +# +# First cut module to convert raw disk images into stream-optimized VMDK files +# +# See the "Specification Document" referenced in Wikipedia for more details: +# +# http://en.wikipedia.org/wiki/VMDK +# +# Divergence from spec noted below +# +# The stream-optimized format is required for importing images via the vSphere +# SOAP API + +import struct +import sys +import os +import math +import string +import zlib + +class VMDKStreamException(Exception): + def __init__(self, msg): + self.msg = msg + def __str__(self): + return self.msg + +# Header Constants +MAGIC_NUMBER = 0x564D444B # 'V' 'M' 'D' 'K' + +# Marker Constants +MARKER_EOS = 0 # end of stream +MARKER_GT = 1 # grain table +MARKER_GD = 2 # grain directory +MARKER_FOOTER = 3 # footer (repeat of header with final info) + +# Other Constants +SECTOR_SIZE = 512 + +# Descriptor Template +image_descriptor_template='''# Description file created by VMDK stream converter +version=1 +# Believe this is random +CID=7e5b80a7 +# Indicates no parent +parentCID=ffffffff +createType="streamOptimized" + +# Extent description +RDONLY #SECTORS# SPARSE "call-me-stream.vmdk" + +# The Disk Data Base +#DDB + +ddb.adapterType = "lsilogic" +ddb.geometry.cylinders = "#CYLINDERS#" +ddb.geometry.heads = "255" +ddb.geometry.sectors = "63" +ddb.toolsVersion = "2147483647" +ddb.longContentID = "8f15b3d0009d9a3f456ff7b28d324d2a" +ddb.virtualHWVersion = "7"''' + + +def create_sparse_header(inFileSectors, descriptorSize, + gdOffset = 0xFFFFFFFFFFFFFFFF): + # While theoretically variable we set these based on current VMWare + # convention + grainSize = 128 + numGTEsPerGT = 512 + overHead = 128 + formatVersion = 3 # NOTE: Conflicts with VMWare docs - determined by trial/error + + descriptorOffset = 1 + + # The following are always fixed in the "stream-optimized" format we are + # creating + compressAlgorithm = 1 + flags = 0x30001 + rgdOffset = 0 + + # We are building from scratch so an unclean shutdown is not possible + uncleanShutdown = 0 + + # Build the struct + header_list = [ MAGIC_NUMBER, formatVersion, flags, inFileSectors, + grainSize, descriptorOffset, descriptorSize, numGTEsPerGT, + rgdOffset, gdOffset, overHead, uncleanShutdown, + '\n', ' ', '\r', '\n', compressAlgorithm ] + for i in range(433): + header_list.append(0) + header_struct = "=IIIQQQQIQQQBccccH433B" + return struct.pack(header_struct, *header_list) + +def create_marker(numSectors, size, marker_type): + marker_list = [ numSectors, size, marker_type ] + for i in range(496): + marker_list.append(0) + marker_struct = "=QII496B" + return struct.pack(marker_struct, *marker_list) + +def create_grain_marker(location, size): + # The grain marker is special in that the data follows immediately after it + # without a pad + return struct.pack("=QI", location, size) + +def divro(num, den): + # Divide always rounding up and returning an integer + # Is there some nicer way to do this? + return int(math.ceil((1.0*num)/(1.0*den))) + +def pad_to_sector(stringlen): + # create a pad that, when concated onto a string of stringlen + # makes it an integer number of sectors + # return pad and length of input_string + pad in sectors + pad = "" + if stringlen % SECTOR_SIZE: + # This does need padding + for i in range(SECTOR_SIZE - (stringlen % SECTOR_SIZE)): + pad += '\0' + finallen = (stringlen + len(pad))/SECTOR_SIZE + return pad, finallen + +def sector_pointer(file_object): + # return file point in sectors + # raise an exception if not sector aligned + file_location = file_object.tell() + if file_location % SECTOR_SIZE: + raise VMDKStreamException("Asked for a sector pointer on a file whose r/w pointer is not sector aligned") + else: + return file_location / SECTOR_SIZE + + +def write_grain_table(outfile, grain_table, gtes_per_gt = 512): + # Write grain_table to outfile including header + # return the sector on which the table starts + + zero_grain_table = [ ] + for i in range(gtes_per_gt): + zero_grain_table.append(0) + + if grain_table == zero_grain_table: + # We don't need to write this and can put zeros in the directory + return 0 + else: + grain_table_marker = create_marker(numSectors = (gtes_per_gt * 4) / SECTOR_SIZE, + size = 0, marker_type = MARKER_GT) + outfile.write(grain_table_marker) + table_location = sector_pointer(outfile) + outfile.write(struct.pack("%dI" % gtes_per_gt, *grain_table)) + return table_location + +def debug_print(message): + #print message + pass + +def convert_to_stream(infilename, outfilename): + debug_print("DEBUG: opening %s to write to %s" % (infilename, outfilename)) + + infileSize = os.path.getsize(infilename) + infileSectors = divro(infileSize, 512) + debug_print("DEBUG: input file is (%s) bytes - (%s) sectors long" % (infileSize, infileSectors)) + + # Fixed by convention + # TODO: Make variable here and in header fuction + grainSectors=128 + totalGrains=divro(infileSectors, grainSectors) + debug_print("DEBUG: total grains will be (%s)" % (totalGrains)) + + # Fixed by convention + # TODO: Make variable here and in header fuction + numGTEsPerGT = 512 + totalGrainTables=divro(totalGrains, numGTEsPerGT) + debug_print("DEBUG: total Grain Tables needed will be (%s)" % (totalGrainTables)) + + grainDirectorySectors=divro(totalGrainTables*4, SECTOR_SIZE) + debug_print("DEBUG: sectors in Grain Directory will be (%s)" % (grainDirectorySectors)) + + grainDirectoryEntries=grainDirectorySectors*128 + debug_print("DEBUG: Number of entries in Grain Directory - (%s)" % (grainDirectoryEntries)) + + infileCylinders=divro(infileSectors, (63*255)) + debug_print("DEBUG: Cylinders (%s)" % infileCylinders) + + # Populate descriptor + tmpl = image_descriptor_template + tmpl = string.replace(tmpl, "#SECTORS#", str(infileSectors)) + tmpl = string.replace(tmpl, "#CYLINDERS#", str(infileCylinders)) + image_descriptor = tmpl + + image_descriptor_pad, desc_sectors = pad_to_sector(len(image_descriptor)) + debug_print("DEBUG: Descriptor takes up (%s) sectors" % desc_sectors) + image_descriptor += image_descriptor_pad + + image_header = create_sparse_header(inFileSectors = infileSectors, + descriptorSize = desc_sectors) + + outfile = open(outfilename, "wb") + outfile.write(image_header) + outfile.write(image_descriptor) + + # Fixed by convention + # TODO: Make variable here and in header function + overHead = 128 + + # Pad the output file to fill the overHead + for i in range((overHead-sector_pointer(outfile)) * SECTOR_SIZE): + outfile.write('\0') + + # grainDirectory - list of integers representing the global level 0 grain + # directory + grainDirectory = [ ] + + # currentGrainTable - list that can grow to numGTEsPerGT integers + # representing the active grain table + currentGrainTable = [ ] + + # For slightly more efficient comparison + grainSize = grainSectors * SECTOR_SIZE + zeroChunk = "" + for i in range(grainSize): + zeroChunk += '\0' + + # We are ready to start reading + infile = open(infilename, "rb") + + try: + inputSectorPointer = sector_pointer(infile) + inChunk = infile.read(grainSize) + while inChunk != "": + if inChunk == zeroChunk: + # All zeros - no need to create a grain - just mark zero in GTE + currentGrainTable.append(0) + else: + # Create a compressed grain + currentGrainTable.append(sector_pointer(outfile)) + compChunk = zlib.compress(inChunk) + grain_marker = create_grain_marker(inputSectorPointer, + len(compChunk)) + grainPad, writeSectors = pad_to_sector(len(compChunk) + len(grain_marker)) + outfile.write(grain_marker) + outfile.write(compChunk) + outfile.write(grainPad) + + if len(currentGrainTable) == numGTEsPerGT: + # Table is full + table_location = write_grain_table(outfile, currentGrainTable, + gtes_per_gt = numGTEsPerGT) + # function does zero check so we don't have to + grainDirectory.append(table_location) + currentGrainTable = [ ] + # do not update pointer unless we read a full grain last time + # incomplete grain read indicates EOF and may result in non-sector + # alignment + if len(inChunk) == grainSize: + inputSectorPointer = sector_pointer(infile) + # read the next chunk + inChunk = infile.read(grainSize) + finally: + # Write out the final grain table if needed + if len(currentGrainTable): + debug_print("Partial grain table present - padding and adding it to dir") + for i in range(numGTEsPerGT-len(currentGrainTable)): + currentGrainTable.append(0) + table_location = write_grain_table(outfile, currentGrainTable, + gtes_per_gt = numGTEsPerGT) + grainDirectory.append(table_location) + else: + debug_print("Current grain table is empty so we need not write it out") + + # pad out grain directory then write it + for i in range(grainDirectoryEntries - totalGrainTables): + grainDirectory.append(0) + grain_directory_marker = create_marker(grainDirectorySectors, 0, + MARKER_GD) + outfile.write(grain_directory_marker) + gdLocation = sector_pointer(outfile) + grainDirectoryStruct = "%dI" % grainDirectoryEntries + debug_print("Grain directory length (%d)" % (len(grainDirectory))) + debug_print("Grain directory: ") + debug_print(grainDirectory) + outfile.write(struct.pack(grainDirectoryStruct, *grainDirectory)) + + # footer marker + outfile.write(create_marker(1, 0, MARKER_FOOTER)) + + # footer + footer = create_sparse_header(inFileSectors = infileSectors, + descriptorSize = desc_sectors, + gdOffset = gdLocation) + outfile.write(footer) + + # EOS marker + outfile.write(create_marker(0, 0, MARKER_EOS)) + outfile.close() + infile.close() + +if __name__ == '__main__': + convert_to_stream(sys.argv[1], sys.argv[2]) === added file 'ovf/conf/ubuntu-ova-v1-vmdk.tmpl' --- ovf/conf/ubuntu-ova-v1-vmdk.tmpl 1970-01-01 00:00:00 +0000 +++ ovf/conf/ubuntu-ova-v1-vmdk.tmpl 2015-04-06 16:36:49 +0000 @@ -0,0 +1,144 @@ + + + + + + Virtual disk information + + + + The list of logical networks + + The VM Network network + + + + A virtual machine + @@NAME@@ + + The kind of installed guest operating system + + + Virtual hardware requirements + + Virtual Hardware Family + 0 + @@NAME@@ + vmx-10 + + + hertz * 10^6 + Number of Virtual CPUs + @@NUM_CPUS@@ virtual CPU(s) + 1 + 3 + @@NUM_CPUS@@ + + + byte * 2^20 + Memory Size + @@MEM_SIZE@@MB of memory + 2 + 4 + @@MEM_SIZE@@ + + + 0 + SCSI Controller + SCSI Controller 0 + 3 + VirtualSCSI + 6 + + + 1 + IDE Controller + VirtualIDEController 1 + 4 + 5 + + + 0 + IDE Controller + VirtualIDEController 0 + 5 + 5 + + + false + VirtualVideoCard + 6 + 24 + + + + + + + + false + VirtualVMCIDevice + 7 + vmware.vmci + 1 + + + + 0 + false + CD-ROM 1 + 8 + 4 + vmware.cdrom.remotepassthrough + 15 + + + + 0 + Hard Disk 1 + ovf:/disk/vmdisk1 + 9 + 3 + 17 + + + + 0 + false + Floppy Drive + Floppy 1 + 10 + vmware.floppy.remotedevice + 14 + + + 7 + true + VM Network + VmxNet3 ethernet adapter on "VM Network" + Ethernet 1 + 11 + VmxNet3 + 10 + + + + + + + + + + + + + + + + + + + + + + === added file 'ovf/diskimg2ova' --- ovf/diskimg2ova 1970-01-01 00:00:00 +0000 +++ ovf/diskimg2ova 2015-04-06 19:55:24 +0000 @@ -0,0 +1,158 @@ +#!/bin/bash +# +# Use VMware's open-vmdk tools to create an OVA image +# This is a little hacky, but qemu and even virtualbox cannot +# produce a proper vmware-portable OVA. +# +error() { echo "$@" 1>&2; } +debug() { error "$(date -R):" "$@"; } +cleanup() { + [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}" +} +fail() { [ $# -eq 0 ] || error "$@"; exit 1; } +Usage() { + cat <&2; fail "$@"; } + +# fullpath(file) +fp() { + case "$1" in + /*) _RET=${1};; + *) _RET="${2:-${PWD}}/${1}";; + esac + return 0 +} + +short_opts="h" +long_opts="help,config:,prefix:,size:,template:,disk:,out:" +getopt_out=$(getopt --name "${0##*/}" \ + --options "${short_opts}" --long "${long_opts}" -- "$@") && + eval set -- "${getopt_out}" || + bad_Usage + +# Set some pathing +my_dir="$(readlink -f $(dirname ${0}))" +conf="" +tmpl="${my_dir}/conf/ubuntu-ova-v1-vmdk.tmpl" +pref="sample" +size="10" + +while [ $# -ne 0 ]; do + cur=${1}; next=${2}; + case "$cur" in + -h|--help) Usage; exit 0;; + --config) conf=${next}; shift;; + --template) tmpl=${next}; shift;; + --prefix) prefix=${next}; shift;; + --size) size=${next}; shift;; + --disk) disk=${next}; shift;; + --out) out_d=${next}; shift;; + --) shift; break;; + esac + shift; +done + +[ ! -f "${tmpl}" ] && fail "did not find ${tmpl}" + +trap cleanup EXIT +TEMP_D=$(mktemp -d) +cd ${TEMP_D} + +debug "Working in ${TEMP_D}" +name="${prefix}" +vmdk_base_f="${name}-disk1.vmdk" +vmdk_f="${TEMP_D}/${vmdk_base_f}" +work_disk_f="${TEMP_D}/${name}.raw" +final_ova="${out_d}/${name}.ova" + +# Converting the disk to the write format +debug "converting ${work_disk_f} to" +debug " ${vmdk_f}" +debug " please be patient...this takes a bit" + +# Resize the disk +debug "creating transitional copy" +cp -au "${disk}" "${vmdk_f}.raw" || + fail "failed to create transitional image" + +debug "resizing disk" +qemu-img resize \ + "${vmdk_f}.raw" "${size}G" || + fail "failed to rewize transitional disk" + +debug "converting disk to vmdk...this will take a while" +python2.7 ${my_dir}/VMDKstream.py \ + "${vmdk_f}.raw" "${vmdk_f}" || + fail "failed to convert disk to VMDK" + +# Get the information that we need out of it +debug "getting file size information" +vmdk_size=$(du -b "${vmdk_f}" | cut -f1) || + fail "failed to get vmdk_size for ${vmdk_f}" +vmdk_capacity=$(qemu-img info "${vmdk_f}" \ + | awk '-F[( ]' '$1 ~ /virtual/ && $NF ~ /bytes)/ {print$(NF-1)}') || + fail "failed to get vmdk capacity for ${vmdk_f}" + +debug "generating sha1 for ${vmdk_f}" +vmdk_sha1=$(sha1sum ${vmdk_f} | cut -d' ' -f1) || + fail "failed to get vmdk SHA1" + +ovf="${TEMP_D}/${prefix}.ovf" +cp "${tmpl}" "${ovf}" || + fail "failed placement of OVF to ${ovf}" + +sed -i "${ovf}" \ + -e "s/@@NAME@@/$name/g" \ + -e "s/@@VMDK_FILE_SIZE@@/$vmdk_size/g" \ + -e "s/@@VMDK_CAPACITY@@/$vmdk_capacity/g" \ + -e "s/@@NUM_CPUS@@/2/g" \ + -e "s/@@MEM_SIZE@@/1024/g" || + fail "failed to populate OVF values" + +debug "generating sha1 for ${TEMP_D}/${prefix}.ovf" +ovf_sha1=$(sha1sum ${ovf} | cut -d' ' -f1) || + fail "failed to get ${ovf} SHA1" + +manifest="${TEMP_D}/${prefix}.mf" +cat > "${manifest}" < branch nick: master timestamp: Mon 2015-04-06 17:16:31 -0600 message: Due to changes in the AWS eco-system, 32-bit AMI's are not supported on new instance types and in new regions (i.e. eu-cerntral-1). AWS is actively encouraging users to use 64-bit instance types and to migrate their workloads to newer instance types. Starting with Ubuntu 15.04 and later, we will no longer be publishing 32-bit AMI's to AWS. diff: === modified file 'publish-build' --- publish-build 2014-06-16 16:44:43 +0000 +++ publish-build 2015-04-06 23:16:31 +0000 @@ -151,9 +151,10 @@ *) ebs_size=8; publish_ebs=1;; esac -publish_hvm=0 -publish_uefi=0 -use_sriov_hvm=0 +publish_hvm=0 # publish HVM images +publish_uefi=0 # use uefi images for HVM +use_sriov_hvm=0 # Register with sriov simple +publish_i386=1 # Publish i386 images case "${suite}" in hardy|intrepid|jaunty|karmic|maverick) publish_hvm=0;; lucid) @@ -164,9 +165,14 @@ publish_hvm=1;; saucy) publish_hvm=1; use_sriov_hvm=1;; - *) publish_hvm=1; publish_uefi=1; use_sriov_hvm=1;; + trusty|utopic) + publish_hvm=1; publish_uefi=1; use_sriov_hvm=1;; + vivid|*) + # For Vivid and later, we only publish 64-bit AMIs. + publish_i386=0; publish_hvm=1; publish_uefi=1; use_sriov_hvm=1;; esac +[ "${publish_i386:-0}" -eq 0 ] && arches="${5:-amd64}" [ "${use_sriov_hvm:-0}" -eq 1 ] && sriov_arg="--sriov" # Just output some stuff + echo '' + echo '====== live-build [/tmp/live-build] ======' ====== live-build [/tmp/live-build] ====== + cd /tmp/live-build + bzr info Standalone tree (format: 1.9-rich-root) Location: branch root: . Related branches: parent branch: http://bazaar.launchpad.net/~ubuntu-on-ec2/live-build/cloud-images/ + bzr version-info revision-id: ben.howard@canonical.com-20141208180246-js9ir7isckza6dli date: 2014-12-08 11:02:46 -0700 build-date: 2015-04-08 21:54:22 +0000 revno: 1881 branch-nick: live-build + bzr log -p -r-1 ------------------------------------------------------------ revno: 1881 committer: Ben Howard branch nick: live-build timestamp: Mon 2014-12-08 11:02:46 -0700 message: Allow for 2TB resizes diff: === modified file 'scripts/build/lb_binary_virtual-hdd-raw' --- scripts/build/lb_binary_virtual-hdd-raw 2014-02-25 20:46:27 +0000 +++ scripts/build/lb_binary_virtual-hdd-raw 2014-12-08 18:02:46 +0000 @@ -81,7 +81,7 @@ ext2|ext3|ext4) Echo_message "Creating virtual disk image..." Chroot chroot "dd if=/dev/zero of=binary-raw.img bs=1024k count=0 seek=${LB_HDD_SIZE}" - Chroot chroot "mkfs.${LB_BINARY_FILESYSTEM} -F /binary-raw.img -L ${LB_HDD_FILESYSTEM_LABEL} -U ${FS_UUID}" + Chroot chroot "mkfs.${LB_BINARY_FILESYSTEM} -F /binary-raw.img -L ${LB_HDD_FILESYSTEM_LABEL} -U ${FS_UUID} -E resize=536870912" ;; esac @@ -97,7 +97,7 @@ ext2|ext3|ext4) Echo_message "Creating virtual filesystem...(created outside chroot)" dd if=/dev/zero of=binary-raw.img bs=1024k count=0 seek=${LB_HDD_SIZE} - ${LB_ROOT_COMMAND} mkfs.${LB_BINARY_FILESYSTEM} -F binary-raw.img -L ${LB_HDD_FILESYSTEM_LABEL} -U ${FS_UUID} + ${LB_ROOT_COMMAND} mkfs.${LB_BINARY_FILESYSTEM} -F binary-raw.img -L ${LB_HDD_FILESYSTEM_LABEL} -U ${FS_UUID} -E resize=536870912 ${LB_ROOT_COMMAND} mount -o loop binary-raw.img binary-raw.tmp rsync -aXHAS \ --one-file-system \ + echo '' + echo '====== vmbuilder-0.11 [/tmp/vmbuilder-0.11] ======' ====== vmbuilder-0.11 [/tmp/vmbuilder-0.11] ====== + cd /tmp/vmbuilder-0.11 + bzr info Standalone tree (format: 2a) Location: branch root: . Related branches: parent branch: http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/0.11a/ + bzr version-info revision-id: ben.howard@canonical.com-20120605221454-crv9cc4612f907lh date: 2012-06-05 16:14:54 -0600 build-date: 2015-04-08 21:54:23 +0000 revno: 398 branch-nick: vmbuilder-0.11 + bzr log -p -r-1 ------------------------------------------------------------ revno: 398 committer: Ben Howard branch nick: vmbuilder timestamp: Tue 2012-06-05 16:14:54 -0600 message: Fix for allow vmbuilder to run on 12.04 LTS. diff: === modified file 'VMBuilder/plugins/ubuntu/dapper.py' --- VMBuilder/plugins/ubuntu/dapper.py 2011-05-18 20:49:25 +0000 +++ VMBuilder/plugins/ubuntu/dapper.py 2012-06-05 22:14:54 +0000 @@ -157,18 +157,15 @@ self.vm.addpkg += ['openssh-server'] def mount_dev_proc(self): - run_cmd('mount', '--bind', '/dev', '%s/dev' % self.destdir) - self.vm.add_clean_cmd('umount', '%s/dev' % self.destdir, ignore_fail=True) - - run_cmd('mount', '--bind', '/dev/pts', '%s/dev/pts' % self.destdir) + run_cmd('mkdir', '-p', '%s/dev/pts' % self.destdir) + run_cmd('mount', '-t', 'devpts', 'devpts-live', '%s/dev/pts' % self.destdir) self.vm.add_clean_cmd('umount', '%s/dev/pts' % self.destdir, ignore_fail=True) - self.run_in_target('mount', '-t', 'proc', 'proc', '/proc') + run_cmd('mount', '-t', 'proc', 'proc-live', '%s/proc' % self.destdir) self.vm.add_clean_cmd('umount', '%s/proc' % self.destdir, ignore_fail=True) def unmount_dev_proc(self): run_cmd('umount', '%s/dev/pts' % self.destdir) - run_cmd('umount', '%s/dev' % self.destdir) run_cmd('sh', '-c', 'grep -q "$1" /proc/mounts || exit 0; umount "$1"', 'umount_binfmt', "%s/proc/sys/fs/binfmt_misc" % self.destdir) run_cmd('umount', '%s/proc' % self.destdir) + echo ''