#!/bin/echo "This file is sourced, not run" # Lots of reusable functions. This file is sourced, not run. # Output path to cross compiler. proc cc_path { var i = '' if test ! -z $MY_CROSS_PATH { setglobal CC_PREFIX = $MY_CC_PREFIX test -z $CC_PREFIX && echo "MY_CROSS_PATH without MY_CC_PREFIX" > !2 && dienow echo -n "$MY_CROSS_PATH:" return } # Output cross it if exists, else simple. If neither exists, output simple. for i in []"$BUILD"/{,simple-}cross-compiler-"$1/bin" { test -e "$i/$1-cc" && break } echo -n "$i:" } proc base_architecture { setglobal ARCH = $1 source "$CONFIG_DIR/$1" } proc load_target { # Get target platform from first command line argument. setglobal ARCH_NAME = $1 setglobal CONFIG_DIR = ""$SOURCES/targets"" # Read the relevant config file. if test -f "$CONFIG_DIR/$1" { base_architecture $ARCH_NAME setglobal CONFIG_DIR = '' } elif test -f "$CONFIG_DIR/$1/settings" { source "$CONFIG_DIR/$1/settings" test -z $ARCH && dienow "No base_architecture" } else { echo "Supported architectures: " ls $CONFIG_DIR exit 1 } # Which platform are we building for? export WORK="$(BUILD)/temp-$ARCH_NAME" # Say "unknown" in two different ways so it doesn't assume we're NOT # cross compiling when the host and target are the same processor. (If host # and target match, the binutils/gcc/make builds won't use the cross compiler # during root-filesystem.sh, and the host compiler links binaries against the # wrong libc.) export_if_blank CROSS_HOST=$[uname -m]-walrus-linux export_if_blank CROSS_TARGET=$(ARCH)-unknown-linux # Setup directories and add the cross compiler to the start of the path. setglobal STAGE_DIR = ""$BUILD/$(STAGE_NAME)-$(ARCH_NAME)"" if test -z $KEEP_STAGEDIR { blank_tempdir $STAGE_DIR } else { mkdir -p $STAGE_DIR || dienow } env NO_CLEANUP=$(NO_CLEANUP/temp//) blank_tempdir $WORK export PATH="$[cc_path $ARCH]$PATH" test ! -z $HOST_ARCH && test $HOST_ARCH != $ARCH && setglobal PATH = ""$[cc_path $HOST_ARCH]$PATH"" export_if_blank CC_PREFIX="$(ARCH)-" setglobal DO_CROSS = ""CROSS_COMPILE=$CC_PREFIX"" return 0 } # Note that this sources the file, rather than calling it as a separate # process. That way it can set environment variables if it wants to. proc build_section { # Don't build anything statically in host-tools, glibc is broken. # See http://people.redhat.com/drepper/no_static_linking.html for # insane rant from the glibc maintainer about why he doesn't care. is_in_list $1 $BUILD_STATIC && test ! -z $ARCH && setglobal STATIC_FLAGS = '"--static'" setglobal OLDCPUS = $CPUS setglobal OLDNOCLEAN = $NO_CLEANUP is_in_list $1 $DEBUG_PACKAGE && setglobal CPUS = '1' && setglobal NO_CLEANUP = '1' if test -e "$SOURCES/sections/$1".build { setupfor $1 source "$SOURCES/sections/$1".build cleanup } else { announce $1 source "$SOURCES"/sections/"$1".sh } setglobal CPUS = $OLDCPUS setglobal NO_CLEANUP = $OLDNOCLEAN } # Find appropriate miniconfig file proc getconfig { for i in []{$ARCH_NAME,$ARCH}/miniconfig-$1 { test -f "$CONFIG_DIR/$i" && cat "$CONFIG_DIR/$i" && return } # Output baseconfig, then append $1_CONFIG (converting $1 to uppercase) cat "$SOURCES/baseconfig-$1" eval "echo \"\${$[echo $1 | tr a-z A-Z]_CONFIG}\"" } # Find all files in $STAGE_DIR newer than $CURSRC. proc recent_binary_files { setglobal PREVIOUS = '' shell {cd $STAGE_DIR || dienow find . -depth -newer "$CURSRC/BUILD-TIMESTAMP" \ | sed -e 's/^.//' -e 's/^.//' -e '/^$/d' } | while read i { setglobal TEMP = $(PREVIOUS##"$i"/) if test $shExpr('${#PREVIOUS}-${#TEMP') -ne $shExpr('${#i}+') { # Because the expanded $i might have \ chars in it, that's why. echo -n $i echo -ne '\0' } setglobal PREVIOUS = $i } } # Delete a working copy of source code once the build's done. proc cleanup { # If package build exited with an error, do not continue. test $Status -ne 0 && dienow if test ! -z $BINARY_PACKAGE_TARBALLS { setglobal TARNAME = ""$PACKAGE-$STAGE_NAME-$(ARCH_NAME)".tar.gz" test ! -z $[recent_binary_files] && echo -n Creating $TARNAME && do { recent_binary_files | xargs -0 tar -czvf \ "$BUILD/$(TARNAME)" -C $STAGE_DIR || dienow } | dotprogress } if test ! -z $NO_CLEANUP { echo "skip cleanup $PACKAGE $ifsjoin(Argv)" return } # Loop deleting directories cd $WORK || dienow for i in [$WORKDIR_LIST] { echo "cleanup $i" rm -rf $i || dienow } setglobal WORKDIR_LIST = '' } # Create a working directory under TMPDIR, deleting existing contents (if any), # and tracking created directories so cleanup can delete them automatically. proc blank_workdir { setglobal WORKDIR_LIST = ""$1 $WORKDIR_LIST"" env NO_CLEANUP= blank_tempdir "$WORK/$1" cd "$WORK/$1" || dienow } # Extract package $1 proc setupfor { export WRAPPY_LOGPATH="$BUILD/logs/cmdlines.$(ARCH_NAME).$(STAGE_NAME).setupfor" # Make sure the source is already extracted and up-to-date. extract_package $1 || exit 1 setglobal SNAPFROM = $[package_cache $1] # Delete old working copy (even in the NO_CLEANUP case) then make a new # tree of links to the package cache. echo "Snapshot '$PACKAGE'..." # Try hardlink, then symlink, then normal (noclobber) copy for LINKTYPE in [l s n] { if test -z $REUSE_CURSRC { blank_workdir $PACKAGE setglobal CURSRC = $[pwd] } cp -$(LINKTYPE)fR "$SNAPFROM/"* $CURSRC && break } cd $CURSRC || dienow export WRAPPY_LOGPATH="$BUILD/logs/cmdlines.$(ARCH_NAME).$(STAGE_NAME).$1" # Ugly bug workaround: timestamp granularity in a lot of filesystems is only # 1 second, so find -newer misses things installed in the same second, so we # make sure it's a new second before we start actually doing anything. if test ! -z $BINARY_PACKAGE_TARBALLS { touch "$CURSRC/BUILD-TIMESTAMP" || dienow setglobal TIME = $[date +%s] while true { test $TIME != $[date +%s] && break sleep .1 } } } # Given a filename.tar.ext, return the version number. proc getversion { echo $1 | sed -e 's/.*-\(\([0-9\.]\)*\([_-]rc\)*\(-pre\)*\([0-9][a-zA-Z]\)*\)*\(\.tar\..z2*\)$/'"$2"'\1/' } # Figure out what version of a package we last built proc get_download_version { getversion $[sed -n 's@URL=.*/\(.[^ ]*\).*@\1@p' "$TOP/download.sh" | grep $(1)-] } # Identify subversion or mercurial revision, or release number proc identify_release { setglobal DIR = ""$SRCDIR/$1"" if test -d $DIR { shell { cd $DIR || dienow setglobal ID = $[git log -1 --format=%H !2 >/dev/null] test ! -z $ID && echo git $ID && return setglobal ID = $[hg identify -n !2 >/dev/null] test ! -z $ID && echo hg $ID && return setglobal ID = $[svn info !2 >/dev/null | sed -n "s/^Revision: //p] test ! -z $ID && echo svn $ID && return } } echo release version $[get_download_version $1] } # Create a README identifying package versions in current build. proc do_manifest { # Grab build script version number test -z $SCRIPT_VERS && setglobal SCRIPT_VERS = ""mercurial rev $[cd $TOP; hg identify -n !2 >/dev/null]"" cat << """ Built on $[date +%F] from: Build script: Aboriginal Linux (http://landley.net/aboriginal) $SCRIPT_VERS Base packages: uClibc (http://uclibc.org) $[identify_release uClibc] BusyBox (http://busybox.net) $[identify_release busybox] Linux (http://kernel.org/pub/linux/kernel) $[identify_release linux] toybox (http://landley.net/toybox) $[identify_release toybox] Toolchain packages: Binutils (http://www.gnu.org/software/binutils/) $[identify_release binutils] GCC (http://gcc.gnu.org) $[identify_release gcc-core] gmake (http://www.gnu.org/software/make) $[identify_release make] bash (ftp://ftp.gnu.org/gnu/bash) $[identify_release bash] Optional packages: distcc (http://distcc.samba.org) $[identify_release distcc] uClibc++ (http://cxx.uclibc.org) $[identify_release uClibc++] """ } # When building with a base architecture, symlink to the base arch name. proc link_arch_name { test $ARCH == $ARCH_NAME && return 0 rm -rf "$BUILD/$2" && ln -s $1 "$BUILD/$2" || dienow } # Check if this target has a base architecture that's already been built. # If so, link to it and exit now. proc check_for_base_arch { # If we're building something with a base architecture, symlink to actual # target. if test $ARCH != $ARCH_NAME { link_arch_name $STAGE_NAME-{"$ARCH","$ARCH_NAME"} test -e $STAGE_NAME-"$ARCH".tar.gz && link_arch_name $STAGE_NAME-{"$ARCH","$ARCH_NAME"}.tar.gz if test -e "$BUILD/$STAGE_NAME-$ARCH" { announce "Using existing $(STAGE_NAME)-$ARCH" return 1 } else { mkdir -p "$BUILD/$STAGE_NAME-$ARCH" || dienow } } } proc create_stage_tarball { # Remove the temporary directory, if empty rmdir $WORK !2 >/dev/null # Handle linking to base architecture if we just built a derivative target. cd $BUILD || dienow link_arch_name $STAGE_NAME-{$ARCH,$ARCH_NAME} if test -z $NO_STAGE_TARBALLS { echo -n creating "$STAGE_NAME-$(ARCH)".tar.gz do { tar czvf "$STAGE_NAME-$(ARCH)".tar.gz "$STAGE_NAME-$(ARCH)" || dienow } | dotprogress link_arch_name $STAGE_NAME-{$ARCH,$ARCH_NAME}.tar.gz } } # Create colon-separated path for $HOSTTOOLS and all fallback directories # (Fallback directories are to support ccache and distcc on the host.) proc hosttools_path { var X = '' echo -n $HOSTTOOLS set X = '1' while [ -e "$HOSTTOOLS/fallback-$X" ] { echo -n ":$HOSTTOOLS/fallback-$X" set X = $shExpr('$X+') } } # Archive directory $1 to file $2 (plus extension), type SYSIMAGE_TYPE proc image_filesystem { echo "make $SYSIMAGE_TYPE $2" # Embed an initramfs cpio if test $SYSIMAGE_TYPE == "cpio" || test $SYSIMAGE_TYPE == "rootfs" { # Borrow gen_init_cpio.c out of package cache copy of Linux source extract_package linux && $CC "$[package_cache $PACKAGE]/usr/gen_init_cpio.c" -o "$WORK"/my_gen_init_cpio || dienow "$WORK"/my_gen_init_cpio $[ "$SOURCES"/toys/gen_initramfs_list.sh $1 || dienow test ! -e "$1"/init && echo "slink /init /sbin/init.sh 755 0 0" test ! -d "$1"/dev && echo "dir /dev 755 0 0" echo "nod /dev/console 660 0 0 c 5 1] | gzip -9 > "$2.cpio.gz" || dienow echo Initramfs generated. } elif test $SYSIMAGE_TYPE == "ext2" || test $SYSIMAGE_TYPE == "ext3" { # Generate axn ext2 filesystem image from the $1 directory, with a # temporary file defining the /dev nodes for the new filesystem. test -z $SYSIMAGE_HDA_MEGS && setglobal SYSIMAGE_HDA_MEGS = '64' # Produce a filesystem with the currently used space plus 20% for filesystem # overhead, which should always be big enough. setglobal BLOCKS = $shExpr('1024*(($(du -m -s "$1" | awk '{print $1}')*12)/10') test $BLOCKS -lt 4096 && setglobal BLOCKS = '4096' setglobal FILE = ""$2.$SYSIMAGE_TYPE"" echo "/dev d 755 0 0 - - - - -" > "$WORK/devs" && echo "/dev/console c 640 0 0 5 1 0 0 -" >> "$WORK/devs" && genext2fs -z -D "$WORK/devs" -d $1 -b $BLOCKS -i 1024 $FILE && rm "$WORK/devs" || dienow # Extend image size to HDA_MEGS if necessary, keeping it sparse. (Feeding # a larger -b size to genext2fs is insanely slow, and not particularly # sparse.) if test ! -z $SYSIMAGE_HDA_MEGS && test $shExpr('`stat -c %s "$FILE"` / (1024*1024) ') -lt $SYSIMAGE_HDA_MEGS { echo resizing image to $SYSIMAGE_HDA_MEGS resize2fs $FILE $(SYSIMAGE_HDA_MEGS)M || dienow } tune2fs -c 0 -i 0 $[test $SYS_IMAGE_TYPE = "ext3" && echo -j] $FILE || dienow echo $SYSIMAGE_TYPE generated } elif test $SYSIMAGE_TYPE == "squashfs" { mksquashfs $1 "$2.sqf" -noappend -all-root $(FORK:+-no-progress) || dienow } else { echo "Unknown image type $SYSIMAGE_TYPE" > !2 dienow } }