aboutsummaryrefslogtreecommitdiffstats
path: root/tests/files/ansible
diff options
context:
space:
mode:
authorobscuren <geffobscura@gmail.com>2015-03-02 23:43:12 +0800
committerobscuren <geffobscura@gmail.com>2015-03-02 23:43:12 +0800
commit0823254c3bc2560f8e0dd6f19d05f471f8af94f4 (patch)
tree56188e6c00336c34971839a90ece54a576483606 /tests/files/ansible
parent4f3f881c9be0b590e6e28fda295257e8d15d5f02 (diff)
parentce7204fa17560f6a76c7592bf5f23d29f22042d6 (diff)
downloaddexon-0823254c3bc2560f8e0dd6f19d05f471f8af94f4.tar
dexon-0823254c3bc2560f8e0dd6f19d05f471f8af94f4.tar.gz
dexon-0823254c3bc2560f8e0dd6f19d05f471f8af94f4.tar.bz2
dexon-0823254c3bc2560f8e0dd6f19d05f471f8af94f4.tar.lz
dexon-0823254c3bc2560f8e0dd6f19d05f471f8af94f4.tar.xz
dexon-0823254c3bc2560f8e0dd6f19d05f471f8af94f4.tar.zst
dexon-0823254c3bc2560f8e0dd6f19d05f471f8af94f4.zip
Add 'tests/files/' from commit 'ce7204fa17560f6a76c7592bf5f23d29f22042d6'
git-subtree-dir: tests/files git-subtree-mainline: 4f3f881c9be0b590e6e28fda295257e8d15d5f02 git-subtree-split: ce7204fa17560f6a76c7592bf5f23d29f22042d6
Diffstat (limited to 'tests/files/ansible')
-rw-r--r--tests/files/ansible/README.md5
-rw-r--r--tests/files/ansible/Vagrantfile78
-rw-r--r--tests/files/ansible/ec2-setup.yml9
-rw-r--r--tests/files/ansible/ec2-terminate.yml10
-rw-r--r--tests/files/ansible/ec2.ini95
-rwxr-xr-xtests/files/ansible/ec2.py727
-rw-r--r--tests/files/ansible/host-config.yml10
-rw-r--r--tests/files/ansible/roles/common/handlers/main.yml4
-rw-r--r--tests/files/ansible/roles/common/tasks/main.yml13
-rw-r--r--tests/files/ansible/roles/docker/handlers/main.yml4
-rw-r--r--tests/files/ansible/roles/docker/tasks/main.yml40
-rw-r--r--tests/files/ansible/roles/ec2/tasks/setup.yml33
-rw-r--r--tests/files/ansible/roles/ec2/tasks/terminate.yml8
-rw-r--r--tests/files/ansible/roles/ec2/vars/main.yml21
-rw-r--r--tests/files/ansible/roles/testrunner/tasks/main.yml34
-rw-r--r--tests/files/ansible/site.yml3
-rwxr-xr-xtests/files/ansible/test-files/create-docker-images.sh7
-rw-r--r--tests/files/ansible/test-files/docker-cpp/Dockerfile32
-rw-r--r--tests/files/ansible/test-files/docker-cppjit/Dockerfile46
-rw-r--r--tests/files/ansible/test-files/docker-go/Dockerfile47
-rw-r--r--tests/files/ansible/test-files/docker-python/Dockerfile23
-rwxr-xr-xtests/files/ansible/test-files/testrunner.sh56
-rw-r--r--tests/files/ansible/testrunner-config.yml12
23 files changed, 1317 insertions, 0 deletions
diff --git a/tests/files/ansible/README.md b/tests/files/ansible/README.md
new file mode 100644
index 000000000..cef407986
--- /dev/null
+++ b/tests/files/ansible/README.md
@@ -0,0 +1,5 @@
+# Automatic deployment of the random test generator
+
+Testing is done in a Vagrant virtual machine
+
+install vagrant, virtualbox, ansible, then do `vagrant up`. It should provison a basic machine. `vagrant ssh` to verify the machine is working as expected. `vagrant terminate` to reset machine to clean state.
diff --git a/tests/files/ansible/Vagrantfile b/tests/files/ansible/Vagrantfile
new file mode 100644
index 000000000..ce6ac3dde
--- /dev/null
+++ b/tests/files/ansible/Vagrantfile
@@ -0,0 +1,78 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
+VAGRANTFILE_API_VERSION ||= "2"
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ # All Vagrant configuration is done here. The most common configuration
+ # options are documented and commented below. For a complete reference,
+ # please see the online documentation at vagrantup.com.
+
+ # Every Vagrant virtual environment requires a box to build off of.
+ config.vm.box = "ubuntu/trusty64"
+ config.vm.define "random-test"
+
+ # Disable automatic box update checking. If you disable this, then
+ # boxes will only be checked for updates when the user runs
+ # `vagrant box outdated`. This is not recommended.
+ # config.vm.box_check_update = false
+
+ # Create a forwarded port mapping which allows access to a specific port
+ # within the machine from a port on the host machine. In the example below,
+ # accessing "localhost:8080" will access port 80 on the guest machine.
+ # config.vm.network "forwarded_port", guest: 80, host: 8080
+
+ # Create a private network, which allows host-only access to the machine
+ # using a specific IP.
+ # config.vm.network "private_network", ip: "192.168.33.10"
+
+ # Create a public network, which generally matched to bridged network.
+ # Bridged networks make the machine appear as another physical device on
+ # your network.
+ # config.vm.network "public_network"
+
+ # If true, then any SSH connections made will enable agent forwarding.
+ # Default value: false
+ # config.ssh.forward_agent = true
+
+ # Share an additional folder to the guest VM. The first argument is
+ # the path on the host to the actual folder. The second argument is
+ # the path on the guest to mount the folder. And the optional third
+ # argument is a set of non-required options.
+ # config.vm.synced_folder "../data", "/vagrant_data"
+
+ # Provider-specific configuration so you can fine-tune various
+ # backing providers for Vagrant. These expose provider-specific options.
+ # Example for VirtualBox:
+ #
+ # config.vm.provider "virtualbox" do |vb|
+ # # Don't boot with headless mode
+ # vb.gui = true
+ #
+ # # Use VBoxManage to customize the VM. For example to change memory:
+ # vb.customize ["modifyvm", :id, "--memory", "1024"]
+ # end
+
+
+ config.vm.provider "virtualbox" do |vb|
+ # Ubuntu / Virtualbox workaround.
+ # see http://askubuntu.com/questions/238040/how-do-i-fix-name-service-for-vagrant-client
+ vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
+
+ # cpp client needs a lot of RAM to build
+ vb.customize ["modifyvm", :id, "--memory", "2048"]
+ end
+
+ #
+ # View the documentation for the provider you're using for more
+ # information on available options.
+
+
+ # Ansible
+ config.vm.provision "ansible" do |ansible|
+ ansible.playbook = "site.yml"
+ end
+
+end
+
diff --git a/tests/files/ansible/ec2-setup.yml b/tests/files/ansible/ec2-setup.yml
new file mode 100644
index 000000000..787d8670e
--- /dev/null
+++ b/tests/files/ansible/ec2-setup.yml
@@ -0,0 +1,9 @@
+---
+- name: Provision EC2 instances
+ hosts: local
+ gather_facts: false
+ roles:
+ - ec2
+
+ tasks:
+ - include: roles/ec2/tasks/setup.yml
diff --git a/tests/files/ansible/ec2-terminate.yml b/tests/files/ansible/ec2-terminate.yml
new file mode 100644
index 000000000..8a8edc536
--- /dev/null
+++ b/tests/files/ansible/ec2-terminate.yml
@@ -0,0 +1,10 @@
+---
+- name: Terminate all ec2 instances
+ hosts: security_group_client-tests
+ remote_user: ubuntu # private key defined via ansible.cfg
+ gather_facts: false
+ roles:
+ - ec2
+
+ tasks:
+ - include: roles/ec2/tasks/terminate.yml
diff --git a/tests/files/ansible/ec2.ini b/tests/files/ansible/ec2.ini
new file mode 100644
index 000000000..064920410
--- /dev/null
+++ b/tests/files/ansible/ec2.ini
@@ -0,0 +1,95 @@
+# Ansible EC2 external inventory script settings
+#
+
+[ec2]
+
+# to talk to a private eucalyptus instance uncomment these lines
+# and edit edit eucalyptus_host to be the host name of your cloud controller
+#eucalyptus = True
+#eucalyptus_host = clc.cloud.domain.org
+
+# AWS regions to make calls to. Set this to 'all' to make request to all regions
+# in AWS and merge the results together. Alternatively, set this to a comma
+# separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
+regions = us-east-1
+regions_exclude = us-gov-west-1,cn-north-1
+
+# When generating inventory, Ansible needs to know how to address a server.
+# Each EC2 instance has a lot of variables associated with it. Here is the list:
+# http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
+# Below are 2 variables that are used as the address of a server:
+# - destination_variable
+# - vpc_destination_variable
+
+# This is the normal destination variable to use. If you are running Ansible
+# from outside EC2, then 'public_dns_name' makes the most sense. If you are
+# running Ansible from within EC2, then perhaps you want to use the internal
+# address, and should set this to 'private_dns_name'.
+destination_variable = public_dns_name
+
+# For server inside a VPC, using DNS names may not make sense. When an instance
+# has 'subnet_id' set, this variable is used. If the subnet is public, setting
+# this to 'ip_address' will return the public IP address. For instances in a
+# private subnet, this should be set to 'private_ip_address', and Ansible must
+# be run from with EC2.
+vpc_destination_variable = ip_address
+
+# To tag instances on EC2 with the resource records that point to them from
+# Route53, uncomment and set 'route53' to True.
+route53 = False
+
+# To exclude RDS instances from the inventory, uncomment and set to False.
+#rds = False
+
+# Additionally, you can specify the list of zones to exclude looking up in
+# 'route53_excluded_zones' as a comma-separated list.
+# route53_excluded_zones = samplezone1.com, samplezone2.com
+
+# By default, only EC2 instances in the 'running' state are returned. Set
+# 'all_instances' to True to return all instances regardless of state.
+all_instances = False
+
+# By default, only RDS instances in the 'available' state are returned. Set
+# 'all_rds_instances' to True return all RDS instances regardless of state.
+all_rds_instances = False
+
+# API calls to EC2 are slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+# - ansible-ec2.cache
+# - ansible-ec2.index
+cache_path = ~/.ansible/tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+# To disable the cache, set this value to 0
+cache_max_age = 300
+
+# Organize groups into a nested/hierarchy instead of a flat namespace.
+nested_groups = False
+
+# If you only want to include hosts that match a certain regular expression
+# pattern_include = stage-*
+
+# If you want to exclude any hosts that match a certain regular expression
+# pattern_exclude = stage-*
+
+# Instance filters can be used to control which instances are retrieved for
+# inventory. For the full list of possible filters, please read the EC2 API
+# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
+# Filters are key/value pairs separated by '=', to list multiple filters use
+# a list separated by commas. See examples below.
+
+# Retrieve only instances with (key=value) env=stage tag
+# instance_filters = tag:env=stage
+
+# Retrieve only instances with role=webservers OR role=dbservers tag
+# instance_filters = tag:role=webservers,tag:role=dbservers
+
+# Retrieve only t1.micro instances OR instances with tag env=stage
+# instance_filters = instance-type=t1.micro,tag:env=stage
+
+# You can use wildcards in filter values also. Below will list instances which
+# tag Name value matches webservers1*
+# (ex. webservers15, webservers1a, webservers123 etc)
+# instance_filters = tag:Name=webservers1*
diff --git a/tests/files/ansible/ec2.py b/tests/files/ansible/ec2.py
new file mode 100755
index 000000000..f1b3524e1
--- /dev/null
+++ b/tests/files/ansible/ec2.py
@@ -0,0 +1,727 @@
+#!/usr/bin/env python
+
+'''
+EC2 external inventory script
+=================================
+
+Generates inventory that Ansible can understand by making API request to
+AWS EC2 using the Boto library.
+
+NOTE: This script assumes Ansible is being executed where the environment
+variables needed for Boto have already been set:
+ export AWS_ACCESS_KEY_ID='AK123'
+ export AWS_SECRET_ACCESS_KEY='abc123'
+
+This script also assumes there is an ec2.ini file alongside it. To specify a
+different path to ec2.ini, define the EC2_INI_PATH environment variable:
+
+ export EC2_INI_PATH=/path/to/my_ec2.ini
+
+If you're using eucalyptus you need to set the above variables and
+you need to define:
+
+ export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
+
+For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
+
+When run against a specific host, this script returns the following variables:
+ - ec2_ami_launch_index
+ - ec2_architecture
+ - ec2_association
+ - ec2_attachTime
+ - ec2_attachment
+ - ec2_attachmentId
+ - ec2_client_token
+ - ec2_deleteOnTermination
+ - ec2_description
+ - ec2_deviceIndex
+ - ec2_dns_name
+ - ec2_eventsSet
+ - ec2_group_name
+ - ec2_hypervisor
+ - ec2_id
+ - ec2_image_id
+ - ec2_instanceState
+ - ec2_instance_type
+ - ec2_ipOwnerId
+ - ec2_ip_address
+ - ec2_item
+ - ec2_kernel
+ - ec2_key_name
+ - ec2_launch_time
+ - ec2_monitored
+ - ec2_monitoring
+ - ec2_networkInterfaceId
+ - ec2_ownerId
+ - ec2_persistent
+ - ec2_placement
+ - ec2_platform
+ - ec2_previous_state
+ - ec2_private_dns_name
+ - ec2_private_ip_address
+ - ec2_publicIp
+ - ec2_public_dns_name
+ - ec2_ramdisk
+ - ec2_reason
+ - ec2_region
+ - ec2_requester_id
+ - ec2_root_device_name
+ - ec2_root_device_type
+ - ec2_security_group_ids
+ - ec2_security_group_names
+ - ec2_shutdown_state
+ - ec2_sourceDestCheck
+ - ec2_spot_instance_request_id
+ - ec2_state
+ - ec2_state_code
+ - ec2_state_reason
+ - ec2_status
+ - ec2_subnet_id
+ - ec2_tenancy
+ - ec2_virtualization_type
+ - ec2_vpc_id
+
+These variables are pulled out of a boto.ec2.instance object. There is a lack of
+consistency with variable spellings (camelCase and underscores) since this
+just loops through all variables the object exposes. It is preferred to use the
+ones with underscores when multiple exist.
+
+In addition, if an instance has AWS Tags associated with it, each tag is a new
+variable named:
+ - ec2_tag_[Key] = [Value]
+
+Security groups are comma-separated in 'ec2_security_group_ids' and
+'ec2_security_group_names'.
+'''
+
+# (c) 2012, Peter Sankauskas
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+import sys
+import os
+import argparse
+import re
+from time import time
+import boto
+from boto import ec2
+from boto import rds
+from boto import route53
+import ConfigParser
+from collections import defaultdict
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+class Ec2Inventory(object):
+ def _empty_inventory(self):
+ return {"_meta" : {"hostvars" : {}}}
+
+ def __init__(self):
+ ''' Main execution path '''
+
+ # Inventory grouped by instance IDs, tags, security groups, regions,
+ # and availability zones
+ self.inventory = self._empty_inventory()
+
+ # Index of hostname (address) to instance ID
+ self.index = {}
+
+ # Read settings and parse CLI arguments
+ self.read_settings()
+ self.parse_cli_args()
+
+ # Cache
+ if self.args.refresh_cache:
+ self.do_api_calls_update_cache()
+ elif not self.is_cache_valid():
+ self.do_api_calls_update_cache()
+
+ # Data to print
+ if self.args.host:
+ data_to_print = self.get_host_info()
+
+ elif self.args.list:
+ # Display list of instances for inventory
+ if self.inventory == self._empty_inventory():
+ data_to_print = self.get_inventory_from_cache()
+ else:
+ data_to_print = self.json_format_dict(self.inventory, True)
+
+ print data_to_print
+
+
+ def is_cache_valid(self):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + self.cache_max_age) > current_time:
+ if os.path.isfile(self.cache_path_index):
+ return True
+
+ return False
+
+
+ def read_settings(self):
+ ''' Reads the settings from the ec2.ini file '''
+
+ config = ConfigParser.SafeConfigParser()
+ ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
+ ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
+ config.read(ec2_ini_path)
+
+ # is eucalyptus?
+ self.eucalyptus_host = None
+ self.eucalyptus = False
+ if config.has_option('ec2', 'eucalyptus'):
+ self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
+ if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
+ self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
+
+ # Regions
+ self.regions = []
+ configRegions = config.get('ec2', 'regions')
+ configRegions_exclude = config.get('ec2', 'regions_exclude')
+ if (configRegions == 'all'):
+ if self.eucalyptus_host:
+ self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
+ else:
+ for regionInfo in ec2.regions():
+ if regionInfo.name not in configRegions_exclude:
+ self.regions.append(regionInfo.name)
+ else:
+ self.regions = configRegions.split(",")
+
+ # Destination addresses
+ self.destination_variable = config.get('ec2', 'destination_variable')
+ self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
+
+ # Route53
+ self.route53_enabled = config.getboolean('ec2', 'route53')
+ self.route53_excluded_zones = []
+ if config.has_option('ec2', 'route53_excluded_zones'):
+ self.route53_excluded_zones.extend(
+ config.get('ec2', 'route53_excluded_zones', '').split(','))
+
+ # Include RDS instances?
+ self.rds_enabled = True
+ if config.has_option('ec2', 'rds'):
+ self.rds_enabled = config.getboolean('ec2', 'rds')
+
+ # Return all EC2 and RDS instances (if RDS is enabled)
+ if config.has_option('ec2', 'all_instances'):
+ self.all_instances = config.getboolean('ec2', 'all_instances')
+ else:
+ self.all_instances = False
+ if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
+ self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
+ else:
+ self.all_rds_instances = False
+
+ # Cache related
+ cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+
+ self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
+ self.cache_path_index = cache_dir + "/ansible-ec2.index"
+ self.cache_max_age = config.getint('ec2', 'cache_max_age')
+
+ # Configure nested groups instead of flat namespace.
+ if config.has_option('ec2', 'nested_groups'):
+ self.nested_groups = config.getboolean('ec2', 'nested_groups')
+ else:
+ self.nested_groups = False
+
+ # Do we need to just include hosts that match a pattern?
+ try:
+ pattern_include = config.get('ec2', 'pattern_include')
+ if pattern_include and len(pattern_include) > 0:
+ self.pattern_include = re.compile(pattern_include)
+ else:
+ self.pattern_include = None
+ except ConfigParser.NoOptionError, e:
+ self.pattern_include = None
+
+ # Do we need to exclude hosts that match a pattern?
+ try:
+ pattern_exclude = config.get('ec2', 'pattern_exclude');
+ if pattern_exclude and len(pattern_exclude) > 0:
+ self.pattern_exclude = re.compile(pattern_exclude)
+ else:
+ self.pattern_exclude = None
+ except ConfigParser.NoOptionError, e:
+ self.pattern_exclude = None
+
+ # Instance filters (see boto and EC2 API docs)
+ self.ec2_instance_filters = defaultdict(list)
+ if config.has_option('ec2', 'instance_filters'):
+ for x in config.get('ec2', 'instance_filters', '').split(','):
+ filter_key, filter_value = x.split('=')
+ self.ec2_instance_filters[filter_key].append(filter_value)
+
+ def parse_cli_args(self):
+ ''' Command line argument processing '''
+
+ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
+ parser.add_argument('--list', action='store_true', default=True,
+ help='List instances (default: True)')
+ parser.add_argument('--host', action='store',
+ help='Get all the variables about a specific instance')
+ parser.add_argument('--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
+ self.args = parser.parse_args()
+
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls to each region, and save data in cache files '''
+
+ if self.route53_enabled:
+ self.get_route53_records()
+
+ for region in self.regions:
+ self.get_instances_by_region(region)
+ if self.rds_enabled:
+ self.get_rds_instances_by_region(region)
+
+ self.write_to_cache(self.inventory, self.cache_path_cache)
+ self.write_to_cache(self.index, self.cache_path_index)
+
+
+ def get_instances_by_region(self, region):
+ ''' Makes an AWS EC2 API call to the list of instances in a particular
+ region '''
+
+ try:
+ if self.eucalyptus:
+ conn = boto.connect_euca(host=self.eucalyptus_host)
+ conn.APIVersion = '2010-08-31'
+ else:
+ conn = ec2.connect_to_region(region)
+
+ # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
+ if conn is None:
+ print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
+ sys.exit(1)
+
+ reservations = []
+ if self.ec2_instance_filters:
+ for filter_key, filter_values in self.ec2_instance_filters.iteritems():
+ reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
+ else:
+ reservations = conn.get_all_instances()
+
+ for reservation in reservations:
+ for instance in reservation.instances:
+ self.add_instance(instance, region)
+
+ except boto.exception.BotoServerError, e:
+ if not self.eucalyptus:
+ print "Looks like AWS is down again:"
+ print e
+ sys.exit(1)
+
+ def get_rds_instances_by_region(self, region):
+ ''' Makes an AWS API call to the list of RDS instances in a particular
+ region '''
+
+ try:
+ conn = rds.connect_to_region(region)
+ if conn:
+ instances = conn.get_all_dbinstances()
+ for instance in instances:
+ self.add_rds_instance(instance, region)
+ except boto.exception.BotoServerError, e:
+ if not e.reason == "Forbidden":
+ print "Looks like AWS RDS is down: "
+ print e
+ sys.exit(1)
+
+ def get_instance(self, region, instance_id):
+ ''' Gets details about a specific instance '''
+ if self.eucalyptus:
+ conn = boto.connect_euca(self.eucalyptus_host)
+ conn.APIVersion = '2010-08-31'
+ else:
+ conn = ec2.connect_to_region(region)
+
+ # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
+ if conn is None:
+ print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
+ sys.exit(1)
+
+ reservations = conn.get_all_instances([instance_id])
+ for reservation in reservations:
+ for instance in reservation.instances:
+ return instance
+
+ def add_instance(self, instance, region):
+ ''' Adds an instance to the inventory and index, as long as it is
+ addressable '''
+
+ # Only want running instances unless all_instances is True
+ if not self.all_instances and instance.state != 'running':
+ return
+
+ # Select the best destination address
+ if instance.subnet_id:
+ dest = getattr(instance, self.vpc_destination_variable)
+ else:
+ dest = getattr(instance, self.destination_variable)
+
+ if not dest:
+ # Skip instances we cannot address (e.g. private VPC subnet)
+ return
+
+ # if we only want to include hosts that match a pattern, skip those that don't
+ if self.pattern_include and not self.pattern_include.match(dest):
+ return
+
+ # if we need to exclude hosts that match a pattern, skip those
+ if self.pattern_exclude and self.pattern_exclude.match(dest):
+ return
+
+ # Add to index
+ self.index[dest] = [region, instance.id]
+
+ # Inventory: Group by instance ID (always a group of 1)
+ self.inventory[instance.id] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', instance.id)
+
+ # Inventory: Group by region
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+ else:
+ self.push(self.inventory, region, dest)
+
+ # Inventory: Group by availability zone
+ self.push(self.inventory, instance.placement, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, region, instance.placement)
+
+ # Inventory: Group by instance type
+ type_name = self.to_safe('type_' + instance.instance_type)
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by key pair
+ if instance.key_name:
+ key_name = self.to_safe('key_' + instance.key_name)
+ self.push(self.inventory, key_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'keys', key_name)
+
+ # Inventory: Group by VPC
+ if instance.vpc_id:
+ self.push(self.inventory, self.to_safe('vpc_id_' + instance.vpc_id), dest)
+
+ # Inventory: Group by security group
+ try:
+ for group in instance.groups:
+ key = self.to_safe("security_group_" + group.name)
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+ except AttributeError:
+ print 'Package boto seems a bit older.'
+ print 'Please upgrade boto >= 2.3.0.'
+ sys.exit(1)
+
+ # Inventory: Group by tag keys
+ for k, v in instance.tags.iteritems():
+ key = self.to_safe("tag_" + k + "=" + v)
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+ self.push_group(self.inventory, self.to_safe("tag_" + k), key)
+
+ # Inventory: Group by Route53 domain names if enabled
+ if self.route53_enabled:
+ route53_names = self.get_instance_route53_names(instance)
+ for name in route53_names:
+ self.push(self.inventory, name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'route53', name)
+
+ # Global Tag: instances without tags
+ if len(instance.tags) == 0:
+ self.push(self.inventory, 'tag_none', dest)
+
+ # Global Tag: tag all EC2 instances
+ self.push(self.inventory, 'ec2', dest)
+
+ self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+
+
+ def add_rds_instance(self, instance, region):
+ ''' Adds an RDS instance to the inventory and index, as long as it is
+ addressable '''
+
+ # Only want available instances unless all_rds_instances is True
+ if not self.all_rds_instances and instance.status != 'available':
+ return
+
+ # Select the best destination address
+ #if instance.subnet_id:
+ #dest = getattr(instance, self.vpc_destination_variable)
+ #else:
+ #dest = getattr(instance, self.destination_variable)
+ dest = instance.endpoint[0]
+
+ if not dest:
+ # Skip instances we cannot address (e.g. private VPC subnet)
+ return
+
+ # Add to index
+ self.index[dest] = [region, instance.id]
+
+ # Inventory: Group by instance ID (always a group of 1)
+ self.inventory[instance.id] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', instance.id)
+
+ # Inventory: Group by region
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+ else:
+ self.push(self.inventory, region, dest)
+
+ # Inventory: Group by availability zone
+ self.push(self.inventory, instance.availability_zone, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, region, instance.availability_zone)
+
+ # Inventory: Group by instance type
+ type_name = self.to_safe('type_' + instance.instance_class)
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by security group
+ try:
+ if instance.security_group:
+ key = self.to_safe("security_group_" + instance.security_group.name)
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+
+ except AttributeError:
+ print 'Package boto seems a bit older.'
+ print 'Please upgrade boto >= 2.3.0.'
+ sys.exit(1)
+
+ # Inventory: Group by engine
+ self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
+
+ # Inventory: Group by parameter group
+ self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
+
+ # Global Tag: all RDS instances
+ self.push(self.inventory, 'rds', dest)
+
+ self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+
+
+ def get_route53_records(self):
+ ''' Get and store the map of resource records to domain names that
+ point to them. '''
+
+ r53_conn = route53.Route53Connection()
+ all_zones = r53_conn.get_zones()
+
+ route53_zones = [ zone for zone in all_zones if zone.name[:-1]
+ not in self.route53_excluded_zones ]
+
+ self.route53_records = {}
+
+ for zone in route53_zones:
+ rrsets = r53_conn.get_all_rrsets(zone.id)
+
+ for record_set in rrsets:
+ record_name = record_set.name
+
+ if record_name.endswith('.'):
+ record_name = record_name[:-1]
+
+ for resource in record_set.resource_records:
+ self.route53_records.setdefault(resource, set())
+ self.route53_records[resource].add(record_name)
+
+
+ def get_instance_route53_names(self, instance):
+ ''' Check if an instance is referenced in the records we have from
+ Route53. If it is, return the list of domain names pointing to said
+ instance. If nothing points to it, return an empty list. '''
+
+ instance_attributes = [ 'public_dns_name', 'private_dns_name',
+ 'ip_address', 'private_ip_address' ]
+
+ name_list = set()
+
+ for attrib in instance_attributes:
+ try:
+ value = getattr(instance, attrib)
+ except AttributeError:
+ continue
+
+ if value in self.route53_records:
+ name_list.update(self.route53_records[value])
+
+ return list(name_list)
+
+
+ def get_host_info_dict_from_instance(self, instance):
+ instance_vars = {}
+ for key in vars(instance):
+ value = getattr(instance, key)
+ key = self.to_safe('ec2_' + key)
+
+ # Handle complex types
+ # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
+ if key == 'ec2__state':
+ instance_vars['ec2_state'] = instance.state or ''
+ instance_vars['ec2_state_code'] = instance.state_code
+ elif key == 'ec2__previous_state':
+ instance_vars['ec2_previous_state'] = instance.previous_state or ''
+ instance_vars['ec2_previous_state_code'] = instance.previous_state_code
+ elif type(value) in [int, bool]:
+ instance_vars[key] = value
+ elif type(value) in [str, unicode]:
+ instance_vars[key] = value.strip()
+ elif type(value) == type(None):
+ instance_vars[key] = ''
+ elif key == 'ec2_region':
+ instance_vars[key] = value.name
+ elif key == 'ec2__placement':
+ instance_vars['ec2_placement'] = value.zone
+ elif key == 'ec2_tags':
+ for k, v in value.iteritems():
+ key = self.to_safe('ec2_tag_' + k)
+ instance_vars[key] = v
+ elif key == 'ec2_groups':
+ group_ids = []
+ group_names = []
+ for group in value:
+ group_ids.append(group.id)
+ group_names.append(group.name)
+ instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
+ instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
+ else:
+ pass
+ # TODO Product codes if someone finds them useful
+ #print key
+ #print type(value)
+ #print value
+
+ return instance_vars
+
+ def get_host_info(self):
+ ''' Get variables about a specific host '''
+
+ if len(self.index) == 0:
+ # Need to load index from cache
+ self.load_index_from_cache()
+
+ if not self.args.host in self.index:
+ # try updating the cache
+ self.do_api_calls_update_cache()
+ if not self.args.host in self.index:
+ # host might not exist anymore
+ return self.json_format_dict({}, True)
+
+ (region, instance_id) = self.index[self.args.host]
+
+ instance = self.get_instance(region, instance_id)
+ return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
+
+ def push(self, my_dict, key, element):
+ ''' Push an element onto an array that may not have been defined in
+ the dict '''
+ group_info = my_dict.setdefault(key, [])
+ if isinstance(group_info, dict):
+ host_list = group_info.setdefault('hosts', [])
+ host_list.append(element)
+ else:
+ group_info.append(element)
+
+ def push_group(self, my_dict, key, element):
+ ''' Push a group as a child of another group. '''
+ parent_group = my_dict.setdefault(key, {})
+ if not isinstance(parent_group, dict):
+ parent_group = my_dict[key] = {'hosts': parent_group}
+ child_groups = parent_group.setdefault('children', [])
+ if element not in child_groups:
+ child_groups.append(element)
+
+ def get_inventory_from_cache(self):
+ ''' Reads the inventory from the cache file and returns it as a JSON
+ object '''
+
+ cache = open(self.cache_path_cache, 'r')
+ json_inventory = cache.read()
+ return json_inventory
+
+
+ def load_index_from_cache(self):
+ ''' Reads the index from the cache file sets self.index '''
+
+ cache = open(self.cache_path_index, 'r')
+ json_index = cache.read()
+ self.index = json.loads(json_index)
+
+
+ def write_to_cache(self, data, filename):
+ ''' Writes data in JSON format to a file '''
+
+ json_data = self.json_format_dict(data, True)
+ cache = open(filename, 'w')
+ cache.write(json_data)
+ cache.close()
+
+
+ def to_safe(self, word):
+ ''' Converts 'bad' characters in a string to underscores so they can be
+ used as Ansible groups '''
+
+ return re.sub("[^A-Za-z0-9\-]", "_", word)
+
+
+ def json_format_dict(self, data, pretty=False):
+ ''' Converts a dict to a JSON object and dumps it as a formatted
+ string '''
+
+ if pretty:
+ return json.dumps(data, sort_keys=True, indent=2)
+ else:
+ return json.dumps(data)
+
+
+# Run the script
+Ec2Inventory()
+
diff --git a/tests/files/ansible/host-config.yml b/tests/files/ansible/host-config.yml
new file mode 100644
index 000000000..c4c8e6eb9
--- /dev/null
+++ b/tests/files/ansible/host-config.yml
@@ -0,0 +1,10 @@
+---
+- name: Provision the operation system for tests
+ # testing
+ # hosts: all
+ # live
+ hosts: tag_Name_test_runner
+ remote_user: ubuntu
+ roles:
+ - docker
+ - common
diff --git a/tests/files/ansible/roles/common/handlers/main.yml b/tests/files/ansible/roles/common/handlers/main.yml
new file mode 100644
index 000000000..767fc7ba6
--- /dev/null
+++ b/tests/files/ansible/roles/common/handlers/main.yml
@@ -0,0 +1,4 @@
+---
+- name: restart sshd
+ sudo: true
+ service: name=ssh state=restarted
diff --git a/tests/files/ansible/roles/common/tasks/main.yml b/tests/files/ansible/roles/common/tasks/main.yml
new file mode 100644
index 000000000..6c0c7a119
--- /dev/null
+++ b/tests/files/ansible/roles/common/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+- name: install package dependencies
+ sudo: true
+ apt: name={{ item }}
+ with_items:
+ - parallel
+ - htop
+
+- name: checkout test repo
+ git:
+ repo: https://github.com/ethereum/tests.git
+ version: develop
+ dest: git
diff --git a/tests/files/ansible/roles/docker/handlers/main.yml b/tests/files/ansible/roles/docker/handlers/main.yml
new file mode 100644
index 000000000..767fc7ba6
--- /dev/null
+++ b/tests/files/ansible/roles/docker/handlers/main.yml
@@ -0,0 +1,4 @@
+---
+- name: restart sshd
+ sudo: true
+ service: name=ssh state=restarted
diff --git a/tests/files/ansible/roles/docker/tasks/main.yml b/tests/files/ansible/roles/docker/tasks/main.yml
new file mode 100644
index 000000000..c434b34a5
--- /dev/null
+++ b/tests/files/ansible/roles/docker/tasks/main.yml
@@ -0,0 +1,40 @@
+---
+- name: update package list
+ sudo: true
+ apt: update_cache=true
+
+- name: install docker dependencies
+ sudo: true
+ apt: name={{ item }} install_recommends=false
+ with_items:
+ # Docker has serious problems on EC2: http://www.danstutzman.com/2014/07/speed-up-docker-on-ec2
+ # and https://github.com/docker/docker/issues/4036
+ - linux-generic
+ - python-pip
+
+- name: Kernel update needs a restart
+ sudo: true
+ command: shutdown -r now
+ async: 0
+ poll: 0
+ ignore_errors: true
+
+- name: waiting for server to come back
+ local_action: wait_for host={{ inventory_hostname }} port=22
+ state=started
+ sudo: false
+
+- name: install docker
+ sudo: true
+ # install script from https://docs.docker.com/installation/ubuntulinux/
+ # TODO this is not idempotent
+ shell: curl -sSL https://get.docker.com/ubuntu/ | sudo sh
+
+- name: install docker python API
+ sudo: true
+ pip: name=docker-py
+
+- name: enable docker for standard user
+ sudo: true
+ user: name={{ ansible_ssh_user }} groups=docker append=yes
+ notify: restart sshd
diff --git a/tests/files/ansible/roles/ec2/tasks/setup.yml b/tests/files/ansible/roles/ec2/tasks/setup.yml
new file mode 100644
index 000000000..0876d8d2d
--- /dev/null
+++ b/tests/files/ansible/roles/ec2/tasks/setup.yml
@@ -0,0 +1,33 @@
+---
+- name: create default security group
+ ec2_group:
+ name: "{{ security_group }}"
+ region: "{{ region }}"
+ description: "{{ project_description }}"
+ rules:
+ # ssh
+ - proto: tcp
+ from_port: 22
+ to_port: 22
+ cidr_ip: "{{ ip_access_range }}"
+ rules_egress:
+ - proto: all
+ cidr_ip: "{{ ip_access_range }}"
+
+
+- name: start ec2 instances
+ ec2:
+ group: "{{ security_group }}"
+ instance_type: "{{ instance_type }}"
+ image: "{{ image }}"
+ wait: true
+ region: "{{ region }}"
+ key_name: "{{ keypair }}"
+ instance_tags:
+ Name: test_runner
+ count_tag:
+ Name: test_runner
+ exact_count: "{{ total_no_instances }}"
+# volumes:
+# - device_name: /dev/xvda
+# volume_size: "{{ volume_size_gb }}"
diff --git a/tests/files/ansible/roles/ec2/tasks/terminate.yml b/tests/files/ansible/roles/ec2/tasks/terminate.yml
new file mode 100644
index 000000000..e3d3df672
--- /dev/null
+++ b/tests/files/ansible/roles/ec2/tasks/terminate.yml
@@ -0,0 +1,8 @@
+---
+- name: Terminate ec2 instances
+ local_action: ec2
+ state=absent
+ instance_ids={{ ec2_id }}
+ region={{ region }}
+ wait=true
+
diff --git a/tests/files/ansible/roles/ec2/vars/main.yml b/tests/files/ansible/roles/ec2/vars/main.yml
new file mode 100644
index 000000000..ea260c927
--- /dev/null
+++ b/tests/files/ansible/roles/ec2/vars/main.yml
@@ -0,0 +1,21 @@
+---
+# default config for ec2 instances
+
+instance_type: c4.xlarge
+security_group: client-tests
+
+# image: ami-d6e7c084
+image: ami-9eaa1cf6
+# region: ap-southeast-1
+region: us-east-1
+keypair: christoph
+# keypair: client-tests
+volume_size_gb: 50
+
+# limit access to AWS to these clients in CDIR notation
+ip_access_range: 0.0.0.0/0
+
+
+project_description: https://github.com/ethereum/tests
+
+total_no_instances: 1
diff --git a/tests/files/ansible/roles/testrunner/tasks/main.yml b/tests/files/ansible/roles/testrunner/tasks/main.yml
new file mode 100644
index 000000000..41c195f24
--- /dev/null
+++ b/tests/files/ansible/roles/testrunner/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+- name: update C++ client
+ docker_image:
+ path: /home/{{ ansible_ssh_user }}/git/ansible/test-files/docker-cppjit
+ name: ethereum/cppjit-testrunner
+ state: build
+ async: 1200
+ poll: 5
+
+- name: update Go client
+ docker_image:
+ path: /home/{{ ansible_ssh_user }}/git/ansible/test-files/docker-go
+ name: ethereum/go-testrunner
+ state: build
+ async: 1200
+ poll: 5
+
+- name: update Python client
+ docker_image:
+ path: /home/{{ ansible_ssh_user }}/git/ansible/test-files/docker-python
+ name: ethereum/python-testrunner
+ state: build
+ async: 1200
+ poll: 5
+
+- name: Run infinite tests
+ shell: seq {{ ansible_processor_vcpus }} | parallel --max-args=0 /home/{{ ansible_ssh_user }}/git/ansible/test-files/testrunner.sh
+ # run for two months
+ async: "{{ 60 * 60 * 24 * 30 * 2 }}"
+ poll: 0
+ register: log_runner
+
+- name: verify previous task
+ async_status: jid={{ log_runner.ansible_job_id }}
diff --git a/tests/files/ansible/site.yml b/tests/files/ansible/site.yml
new file mode 100644
index 000000000..cc04daa94
--- /dev/null
+++ b/tests/files/ansible/site.yml
@@ -0,0 +1,3 @@
+---
+- include: host-config.yml
+- include: testrunner-config.yml
diff --git a/tests/files/ansible/test-files/create-docker-images.sh b/tests/files/ansible/test-files/create-docker-images.sh
new file mode 100755
index 000000000..06728c6d7
--- /dev/null
+++ b/tests/files/ansible/test-files/create-docker-images.sh
@@ -0,0 +1,7 @@
+#!/bin/bash -x
+
+# creates the necessary docker images to run testrunner.sh locally
+
+docker build --tag="ethereum/cppjit-testrunner" docker-cppjit
+docker build --tag="ethereum/python-testrunner" docker-python
+docker build --tag="ethereum/go-testrunner" docker-go
diff --git a/tests/files/ansible/test-files/docker-cpp/Dockerfile b/tests/files/ansible/test-files/docker-cpp/Dockerfile
new file mode 100644
index 000000000..a3b0e4ca6
--- /dev/null
+++ b/tests/files/ansible/test-files/docker-cpp/Dockerfile
@@ -0,0 +1,32 @@
+# adjusted from https://github.com/ethereum/cpp-ethereum/blob/develop/docker/Dockerfile
+FROM ubuntu:14.04
+
+ENV DEBIAN_FRONTEND noninteractive
+RUN apt-get update
+RUN apt-get upgrade -y
+
+# Ethereum dependencies
+RUN apt-get install -qy build-essential g++-4.8 git cmake libboost-all-dev libcurl4-openssl-dev wget
+RUN apt-get install -qy automake unzip libgmp-dev libtool libleveldb-dev yasm libminiupnpc-dev libreadline-dev scons
+RUN apt-get install -qy libjsoncpp-dev libargtable2-dev
+
+# NCurses based GUI (not optional though for a succesful compilation, see https://github.com/ethereum/cpp-ethereum/issues/452 )
+RUN apt-get install -qy libncurses5-dev
+
+# Qt-based GUI
+# RUN apt-get install -qy qtbase5-dev qt5-default qtdeclarative5-dev libqt5webkit5-dev
+
+# Ethereum PPA
+RUN apt-get install -qy software-properties-common
+RUN add-apt-repository ppa:ethereum/ethereum
+RUN apt-get update
+RUN apt-get install -qy libcryptopp-dev libjson-rpc-cpp-dev
+
+# Build Ethereum (HEADLESS)
+RUN git clone --depth=1 --branch develop https://github.com/ethereum/cpp-ethereum
+RUN mkdir -p cpp-ethereum/build
+RUN cd cpp-ethereum/build && cmake .. -DCMAKE_BUILD_TYPE=Release -DHEADLESS=1 && make -j $(cat /proc/cpuinfo | grep processor | wc -l) && make install
+RUN ldconfig
+
+ENTRYPOINT ["/cpp-ethereum/build/test/createRandomTest"]
+
diff --git a/tests/files/ansible/test-files/docker-cppjit/Dockerfile b/tests/files/ansible/test-files/docker-cppjit/Dockerfile
new file mode 100644
index 000000000..b9b9b68ba
--- /dev/null
+++ b/tests/files/ansible/test-files/docker-cppjit/Dockerfile
@@ -0,0 +1,46 @@
+# adjusted from https://github.com/ethereum/cpp-ethereum/blob/develop/docker/Dockerfile
+FROM ubuntu:14.04
+
+ENV DEBIAN_FRONTEND noninteractive
+RUN apt-get update
+RUN apt-get upgrade -y
+
+# Ethereum dependencies
+RUN apt-get install -qy build-essential g++-4.8 git cmake libboost-all-dev libcurl4-openssl-dev wget
+RUN apt-get install -qy automake unzip libgmp-dev libtool libleveldb-dev yasm libminiupnpc-dev libreadline-dev scons
+RUN apt-get install -qy libjsoncpp-dev libargtable2-dev
+
+# NCurses based GUI (not optional though for a succesful compilation, see https://github.com/ethereum/cpp-ethereum/issues/452 )
+RUN apt-get install -qy libncurses5-dev
+
+# Qt-based GUI
+# RUN apt-get install -qy qtbase5-dev qt5-default qtdeclarative5-dev libqt5webkit5-dev
+
+RUN sudo apt-get -y install software-properties-common
+
+# LLVM-3.5
+RUN wget -O - http://llvm.org/apt/llvm-snapshot.gpg.key|sudo apt-key add -
+RUN echo "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.5 main\ndeb-src http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.5 main" > /etc/apt/sources.list.d/llvm-trusty.list
+RUN apt-get update
+RUN apt-get install -qy llvm-3.5 libedit-dev
+
+# Fix llvm-3.5 cmake paths
+RUN mkdir -p /usr/lib/llvm-3.5/share/llvm && ln -s /usr/share/llvm-3.5/cmake /usr/lib/llvm-3.5/share/llvm/cmake
+
+
+# Ethereum PPA
+RUN apt-get install -qy software-properties-common
+RUN add-apt-repository ppa:ethereum/ethereum
+RUN apt-get update
+RUN apt-get install -qy libcryptopp-dev libjson-rpc-cpp-dev
+
+# this is a workaround, to make sure that docker's cache is invalidated whenever the git repo changes
+ADD https://api.github.com/repos/ethereum/cpp-ethereum/git/refs/heads/develop unused.txt
+
+# Build Ethereum (HEADLESS)
+RUN git clone --depth=1 --branch develop https://github.com/ethereum/cpp-ethereum
+RUN mkdir -p cpp-ethereum/build
+RUN cd cpp-ethereum/build && cmake .. -DCMAKE_BUILD_TYPE=Debug -DVMTRACE=1 -DPARANOIA=1 -DEVMJIT=1 && make -j $(cat /proc/cpuinfo | grep processor | wc -l) && make install
+RUN ldconfig
+
+ENTRYPOINT ["/cpp-ethereum/build/test/checkRandomTest"]
diff --git a/tests/files/ansible/test-files/docker-go/Dockerfile b/tests/files/ansible/test-files/docker-go/Dockerfile
new file mode 100644
index 000000000..a5a2f0f23
--- /dev/null
+++ b/tests/files/ansible/test-files/docker-go/Dockerfile
@@ -0,0 +1,47 @@
+# Adjusted from https://github.com/ethereum/go-ethereum/blob/develop/Dockerfile
+FROM ubuntu:14.04
+
+## Environment setup
+ENV HOME /root
+ENV GOPATH /root/go
+ENV PATH /golang/bin:/root/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games
+ENV PKG_CONFIG_PATH /opt/qt54/lib/pkgconfig
+
+RUN mkdir -p /root/go
+ENV DEBIAN_FRONTEND noninteractive
+
+## Install base dependencies
+RUN apt-get update && apt-get upgrade -y
+RUN apt-get install -y git mercurial build-essential software-properties-common pkg-config libgmp3-dev libreadline6-dev libpcre3-dev libpcre++-dev mesa-common-dev libglu1-mesa-dev
+
+## Install Qt5.4 dependencies from PPA
+RUN add-apt-repository ppa:beineri/opt-qt54-trusty -y
+RUN apt-get update -y
+RUN apt-get install -y qt54quickcontrols qt54webengine
+
+## Build and install latest Go
+RUN git clone https://go.googlesource.com/go golang
+RUN cd golang && git checkout go1.4.1
+RUN cd golang/src && ./make.bash && go version
+
+## Fetch and install QML
+RUN go get -u -v -d github.com/obscuren/qml
+WORKDIR $GOPATH/src/github.com/obscuren/qml
+RUN git checkout v1
+RUN go install -v
+
+# this is a workaround, to make sure that docker's cache is invalidated whenever the git repo changes
+ADD https://api.github.com/repos/ethereum/go-ethereum/git/refs/heads/develop unused.txt
+
+## Fetch and install go-ethereum
+RUN go get -u -v -d github.com/ethereum/go-ethereum/...
+WORKDIR $GOPATH/src/github.com/ethereum/go-ethereum
+
+RUN git checkout develop
+RUN git pull
+
+
+RUN ETH_DEPS=$(go list -f '{{.Imports}} {{.TestImports}} {{.XTestImports}}' github.com/ethereum/go-ethereum/... | sed -e 's/\[//g' | sed -e 's/\]//g' | sed -e 's/C //g'); if [ "$ETH_DEPS" ]; then go get $ETH_DEPS; fi
+RUN go install -v ./cmd/ethtest
+
+ENTRYPOINT ["ethtest"]
diff --git a/tests/files/ansible/test-files/docker-python/Dockerfile b/tests/files/ansible/test-files/docker-python/Dockerfile
new file mode 100644
index 000000000..e83faf3d6
--- /dev/null
+++ b/tests/files/ansible/test-files/docker-python/Dockerfile
@@ -0,0 +1,23 @@
+FROM ubuntu:14.04
+
+ENV DEBIAN_FRONTEND noninteractive
+RUN apt-get update
+RUN apt-get upgrade -y
+
+RUN apt-get install -qy curl git python2.7 python-pip python-dev
+
+# this is a workaround, to make sure that docker's cache is invalidated whenever the git repo changes
+ADD https://api.github.com/repos/ethereum/pyethereum/git/refs/heads/develop unused.txt
+
+RUN git clone --branch develop --recursive https://github.com/ethereum/pyethereum.git
+
+RUN cd pyethereum && curl https://bootstrap.pypa.io/bootstrap-buildout.py | python
+
+RUN cd pyethereum && bin/buildout
+
+#default port for incoming requests
+EXPOSE 30303
+
+WORKDIR /pyethereum
+
+ENTRYPOINT ["bin/python", "tests/test_vm.py"]
diff --git a/tests/files/ansible/test-files/testrunner.sh b/tests/files/ansible/test-files/testrunner.sh
new file mode 100755
index 000000000..30a813e12
--- /dev/null
+++ b/tests/files/ansible/test-files/testrunner.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+# create random virtual machine test
+
+mkdir --parents ~/testout
+cd ~/testout
+while [ 1 ]
+do
+ TEST="$(docker run --rm --entrypoint=\"/cpp-ethereum/build/test/createRandomTest\" ethereum/cppjit-testrunner)"
+ # echo "$TEST"
+
+ # test pyethereum
+ OUTPUT_PYTHON="$(docker run --rm ethereum/python-testrunner "$TEST")"
+ RESULT_PYTHON=$?
+
+ # test go
+ OUTPUT_GO="$(docker run --rm ethereum/go-testrunner "$TEST")"
+ RESULT_GO=$?
+
+ # test cpp-jit
+ OUTPUT_CPPJIT="$(docker run --rm ethereum/cppjit-testrunner "$TEST")"
+ RESULT_CPPJIT=$?
+
+ # go fails
+ if [ "$RESULT_GO" -ne 0 ]; then
+ echo Failed:
+ echo Output_GO:
+ echo $OUTPUT_GO
+ echo Test:
+ echo "$TEST"
+ echo "$TEST" > FailedTest.json
+ mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")GO.json # replace with scp to central server
+ fi
+
+ # python fails
+ if [ "$RESULT_PYTHON" -ne 0 ]; then
+ echo Failed:
+ echo Output_PYTHON:
+ echo $OUTPUT_PYTHON
+ echo Test:
+ echo "$TEST"
+ echo "$TEST" > FailedTest.json
+ mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")PYTHON.json
+ fi
+
+ # cppjit fails
+ if [ "$RESULT_CPPJIT" -ne 0 ]; then
+ echo Failed:
+ echo Output_CPPJIT:
+ echo $OUTPUT_CPPJIT
+ echo Test:
+ echo "$TEST"
+ echo "$TEST" > FailedTest.json
+ mv FailedTest.json $(date -d "today" +"%Y%m%d%H%M")CPPJIT.json
+ fi
+done
diff --git a/tests/files/ansible/testrunner-config.yml b/tests/files/ansible/testrunner-config.yml
new file mode 100644
index 000000000..e9cf497f6
--- /dev/null
+++ b/tests/files/ansible/testrunner-config.yml
@@ -0,0 +1,12 @@
+---
+- name: preparing and running tests
+ # testing
+ # hosts: all
+ # live
+ hosts: tag_Name_test_runner
+
+ # TODO use the right user for configuring, until credentials set, stay with default vagrant user
+ remote_user: ubuntu
+
+ roles:
+ - testrunner