diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4f760d1
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+.idea/
+build/
+dist/
+conf.json
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..8b9a999
--- /dev/null
+++ b/README.md
@@ -0,0 +1,46 @@
+# Client tool for collecting statistics from multiple accounts #
+
+Create a config file e.g. named `conf.json` with the following contents:
+
+ {
+ "alias_one": {"username": "user@company.com", "password": "secret", "api_endpoint":"https://wdc.cloudsigma.com/api/2.0/"},
+ "alias_two": {"username": "another@company.com", "password": "secret2", "api_endpoint":"https://wdc.cloudsigma.com/api/2.0/"}
+ }
+
+Please verify the accounts are pointed to the right cloud location URL. Here is a list of the current cloud location API endpoints:
+
+ * https://wdc.cloudsigma.com/api/2.0/
+ * https://lvs.cloudsigma.com/api/2.0/
+ * https://zrh.cloudsigma.com/api/2.0/
+
+Then you can invoke the stats collection using the command:
+
+ $ cloudsigma_stats conf.json
+ {
+ "Metrics": {
+ "NumberOfVirtualMachines": {
+ "alias_one": 0,
+ "alias_two": 0
+ },
+ "NumberOfUsedCPUCores": {
+ "alias_one": 0,
+ "alias_two": 0
+ },
+ "MemoryUsage": {
+ "alias_one": 0,
+ "alias_two": 0
+ },
+ "NumberOfVMImages": {
+ "alias_one": 1,
+ "alias_two": 0
+ }
+ },
+ "Cloud name": "CloudSigma"
+ }
+
+If the exit code of the command is 0, the standard output will contain a valid JSON output.
+
+If the `cloudsigma_stats` command is executed without parameters, it prints a template for the config file.
+
+If any error occurs (invalid config file, wrong URL, wrong credentials, problem with connection), the command
+will exit with non-zero return code and will print an error trace on the standard output.
diff --git a/cloudsigma_stats.py b/cloudsigma_stats.py
new file mode 100644
index 0000000..3a48847
--- /dev/null
+++ b/cloudsigma_stats.py
@@ -0,0 +1,39 @@
+import json
+import cloudsigma
+import sys
+
+
+if len(sys.argv) <= 1:
+ print >>sys.stderr, "Pass the config file as the first argument. \n" \
+ "Here is an example template for a config file. Please specify account email, password and cloud location:"
+ print >>sys.stderr, """
+ {
+ "alias_one": {"username": "user@company.com", "password": "secret", "api_endpoint":"https://wdc.cloudsigma.com/api/2.0/"},
+ "alias_two": {"username": "another@company.com", "password": "secret2", "api_endpoint":"https://wdc.cloudsigma.com/api/2.0/"}
+ }
+ """
+ sys.exit(-1)
+
+users = json.load(open(sys.argv[1], 'r'))
+
+metrics = {
+ 'MemoryUsage': {},
+ 'NumberOfUsedCPUCores': {},
+ 'NumberOfVMImages': {},
+ 'NumberOfVirtualMachines': {},
+}
+
+for alias, user in users.iteritems():
+ started_servers = [server for server in cloudsigma.resource.Server(**user).list_detail() if server['status'] == 'running']
+ drives = cloudsigma.resource.Drive(**user).list()
+ metrics['MemoryUsage'][alias] = sum(guest['mem'] for guest in started_servers)
+ metrics['NumberOfUsedCPUCores'][alias] = sum(guest['smp'] for guest in started_servers)
+ metrics['NumberOfVirtualMachines'][alias] = len(started_servers)
+ metrics['NumberOfVMImages'][alias] = len(drives)
+
+dump = {
+ 'Cloud name': 'CloudSigma',
+ 'Metrics': metrics,
+}
+
+print json.dumps(dump, indent=True)
diff --git a/cloudsigma_stats.spec b/cloudsigma_stats.spec
new file mode 100644
index 0000000..7b63661
--- /dev/null
+++ b/cloudsigma_stats.spec
@@ -0,0 +1,17 @@
+# -*- mode: python -*-
+a = Analysis(['cloudsigma_stats.py', 'cloudsigma_stats.spec'],
+ pathex=['/home/nikola/src/cloudsme_stats'],
+ hiddenimports=[],
+ hookspath=None,
+ runtime_hooks=None)
+pyz = PYZ(a.pure)
+exe = EXE(pyz,
+ a.scripts,
+ a.binaries + [('requests/cacert.pem', '/home/nikola/.virtualenvs/cloudsme_stats/lib/python2.7/site-packages/requests/cacert.pem', 'DATA'), ],
+ a.zipfiles,
+ a.datas,
+ name='cloudsigma_stats',
+ debug=False,
+ strip=None,
+ upx=True,
+ console=True )
diff --git a/pycloudsigma-master/.gitignore b/pycloudsigma-master/.gitignore
new file mode 100644
index 0000000..ed7dd55
--- /dev/null
+++ b/pycloudsigma-master/.gitignore
@@ -0,0 +1,5 @@
+*.pyc
+*.swp
+build/
+dist/
+src/cloudsigma.egg-info
diff --git a/pycloudsigma-master/MANIFEST.in b/pycloudsigma-master/MANIFEST.in
new file mode 100644
index 0000000..f9bd145
--- /dev/null
+++ b/pycloudsigma-master/MANIFEST.in
@@ -0,0 +1 @@
+include requirements.txt
diff --git a/pycloudsigma-master/README.md b/pycloudsigma-master/README.md
new file mode 100644
index 0000000..1322b0a
--- /dev/null
+++ b/pycloudsigma-master/README.md
@@ -0,0 +1,295 @@
+# CloudSigma's Python Library
+
+## Config file
+
+In order for the CloudSigma library to interact with the API, you need to provide your credentials. These are set in the file `~/.cloudsigma.conf`. Here's a sample version of the file that talks to the Las Vegas datacenter. If you instead want to use the Zürich datacenter, simply replace 'lvs' with 'zrh' in the api_endpoint and ws_endpoint. Please note that this is not required in order to read back meta data on the server.
+
+
+```python
+api_endpoint = https://lvs.cloudsigma.com/api/2.0/
+ws_endpoint = wss://direct.lvs.cloudsigma.com/websocket
+username = user@domain.com
+password = secret
+
+# Only needed for the integration/unit tests.
+persistent_drive_name=foobar
+persistent_drive_ssh_password=sshsecret
+```
+
+Since this file includes credentials, it is highly recommended that you set the permission of the file to 600 (`chmod 600 ~/.cloudsigma.conf`)
+
+
+## Installation
+
+### Ubuntu
+
+```bash
+sudo apt-get -y install python-pip
+pip install cloudsigma
+```
+
+### CentOS / RHEL
+
+In order to install the CloudSigma module, you first need to install the [EPEL](https://fedoraproject.org/wiki/EPEL) repository, in order to install PIP. The below instructions are for RHEL 6.x / CentOS 6.x. Details for installing the repository, please visit the EPEL site.
+
+```bash
+yum install -y wget
+wget http://mirrors.servercentral.net/fedora/epel/6/i386/epel-release-6-8.noarch.rpm
+rpm -Uvh epel-release-6-8.noarch.rpm
+yum install -y python-pip
+pip install cloudsigma
+```
+
+## Using pycloudsigma
+
+### Imports and definitions
+
+```python
+import cloudsigma
+from pprint import pprint
+
+drive = cloudsigma.resource.Drive()
+server = cloudsigma.resource.Server()
+```
+
+#### Create a drive
+
+```python
+test_disk = { 'name': 'test_drive', 'size': 1073741824 * 1, 'media': 'disk'}
+my_test_disk = drive.create(test_disk)
+```
+
+Print back the result:
+
+```python
+pprint(my_test_disk)
+{u'affinities': [],
+ u'allow_multimount': False,
+ u'jobs': [],
+ u'licenses': [],
+ u'media': u'disk',
+ u'meta': {},
+ u'mounted_on': [],
+ u'name': u'test_drive',
+ u'owner': {u'resource_uri': u'/api/2.0/user/b4b9XXX-ba52-4ad0-9837-a2672652XXX/',
+ u'uuid': u'b4b9XXX-ba52-4ad0-9837-a2672652XXX'},
+ u'resource_uri': u'/api/2.0/drives/5c33e407-23b9-XXX-b007-3a302eXXXX/',
+ u'size': 1073741824,
+ u'status': u'creating',
+ u'storage_type': None,
+ u'tags': [],
+ u'uuid': u'5c33e407-23b9-XXX-b007-3a302eXXXX'}
+```
+
+### Create a server without a drive
+
+```python
+test_server = { 'name': 'My Test Server', 'cpu': 1000, 'mem': 512 * 1024 ** 2, 'vnc_password': 'test_server' }
+my_test_server = server.create(test_server)
+```
+
+Print back the result:
+
+```python
+pprint(my_test_server)
+{u'context': True,
+ u'cpu': 1000,
+ u'cpus_instead_of_cores': False,
+ u'drives': [],
+ u'enable_numa': False,
+ u'hv_relaxed': False,
+ u'hv_tsc': False,
+ u'mem': 536870912,
+ u'meta': {},
+ u'name': u'My Test Server',
+ u'nics': [],
+ u'owner': {u'resource_uri': u'/api/2.0/user/b4b9XXX-ba52-4ad0-9837-a2672652XXX/',
+ u'uuid': u'b4b9XXX-ba52-4ad0-9837-a2672652XXX'},
+ u'requirements': [],
+ u'resource_uri': u'/api/2.0/servers/4d5bXXX-4da0-43cd-83aY-18b047014XXXX/',
+ u'runtime': None,
+ u'smp': 1,
+ u'status': u'stopped',
+ u'tags': [],
+ u'uuid': u'4d5bXXX-4da0-43cd-83aY-18b047014XXXX',
+ u'vnc_password': u'test_server'}
+```
+
+### Attach a drive the drive and a NIC to the server
+
+We could of course have attached this above, but in order to keep things simple, let's do this in to phases.
+
+Attach the drive:
+
+```python
+my_test_server['drives'] = [ { 'boot_order': 1, 'dev_channel': '0:0', 'device': 'virtio', 'drive': my_test_disk['uuid'] } ]
+```
+
+Attach a public network interface:
+
+```python
+my_test_server['nics'] = [ { 'ip_v4_conf': { 'conf': 'dhcp', 'ip': None }, 'model': 'virtio', 'vlan': None} ]
+```
+
+**Optional**: Add a user-defined SSH key:
+
+```python
+my_test_server['meta'] = { 'ssh_public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDoHuFV7Skbu9G1iVokXBdB+zN4wEbqGKijlExUPmxuB6MXDBWCmXUEmMRLerTm3a8QMA+8Vyech0/TWQscYvs8xzM/HkRAqKwhhjPMRlfHgy+QKjRD8P989AYMnNcSYe8DayElFXoLYKwsDmoUcsnbf5H+f6agiBkWqz5odb8fvc2rka0X7+p3tDyKFJRt2OugPqLR9fhWddie65DBxAcycnScoqLW0+YAxakfWlKDUqwerIjuRN2VJ7T7iHywcXhvAU060CEtpWW7bE9T/PIoj/N753QDLYrmqtvqAQqU0Ss5rIqS8bYJXyM0zTKwIuncek+k+b9ButBf/Nx5ehjN vagrant@precise64'}
+```
+
+Push the settings:
+
+```python
+server.update(my_test_server['uuid'], my_test_server)
+```
+
+### Start the server
+
+```python
+server.start(my_test_server['uuid'])
+```
+
+### Stop the server
+
+```python
+server.stop(my_test_server['uuid'])
+```
+
+### Reading Meta Data
+
+CloudSigma supports the notion of exposing meta data to guests. Using the Python library, this can be done very easily. **Please note** that you do not need `~/.cloudsigma.conf` in order to use this feature, as the data is read directly from `/dev/ttyS1`. More information on how to this works can be found [here](https://lvs.cloudsigma.com/docs/server_context.html#setting-up-the-virtual-serial-port).
+
+By default, various system information is exposed, but it is also possible to push user-defined data, such as an SSH-key to the guest.
+
+Here's snippet that demonstrates how to read the meta meta data from a given server using the python library:
+
+```python
+import cloudsigma
+metadata = cloudsigma.metadata.GetServerMetadata().get()
+
+from pprint import pprint
+pprint(metadata)
+{u'cpu': 1000,
+ u'cpus_instead_of_cores': False,
+ u'drives': [{u'boot_order': 1,
+ u'dev_channel': u'0:0',
+ u'device': u'virtio',
+ u'drive': {u'affinities': [],
+ u'allow_multimount': False,
+ u'licenses': [],
+ u'media': u'disk',
+ u'meta': {u'description': u'This is my test disk.'},
+ u'name': u'SomeName',
+ u'size': 21474836480,
+ u'tags': [],
+ u'uuid': u'19757XXX-8173-46ba-8822-YYYYc6bZZZZ'}}],
+ u'enable_numa': False,
+ u'hv_relaxed': False,
+ u'hv_tsc': False,
+ u'mem': 536870912,
+ u'meta': {u'description': u'This is my test server.'},
+ u'name': u'MyTestServer',
+ u'nics': [{u'boot_order': None,
+ u'ip_v4_conf': {u'conf': u'dhcp',
+ u'ip': {u'gateway': u'123.123.123.123',
+ u'meta': {},
+ u'nameservers': [u'123.123.123.123',
+ u'123.123.123.123',
+ u'8.8.8.8'],
+ u'netmask': 24,
+ u'tags': [],
+ u'uuid': u'123.123.123.123'}},
+ u'ip_v6_conf': None,
+ u'mac': u'22:bd:c4:XX:XX:XX',
+ u'model': u'virtio',
+ u'vlan': None}],
+ u'requirements': [],
+ u'smp': 1,
+ u'tags': [],
+ u'uuid': u'6cc0XXX-d024-4ecf-b0de-83dbc29ZZZ',
+ u'vnc_password': u'NotMyPassword'}
+```
+
+If you get a permission error while running the above command, make sure you have access to read from `/dev/ttyS1`. For instance, on Ubuntu, the default owner is `root` and the group is set to `dialout`. Hence you need to either change the permission, or add your user to the group `sudo usermod -a -G dialout $(whoami)`. Please note that you will need to logout and log in again for the permission change to take effect.
+
+For more examples on how to read and write meta data, please visit our [API documentation](https://autodetect.cloudsigma.com/docs/meta.html#examples).
+
+## Sample: Install SSH key from meta data
+
+In the example above, we pushed an SSH key as meta data to a server. That's great, but what if we want to put this to use? Don't worry, we got you covered.
+
+The code snippet below assumes that you have installed your SSH key into the server's meta data with the key 'ssh_public_key'.
+
+```python
+import cloudsigma
+import os
+import stat
+
+metadata = cloudsigma.metadata.GetServerMetadata().get()
+ssh_key = metadata['meta']['ssh_public_key']
+
+# Define paths
+home = os.path.expanduser("~")
+ssh_path = os.path.join(home, '.ssh')
+authorized_keys = os.path.join(ssh_path, 'authorized_keys')
+
+
+def get_permission(path):
+ return oct(os.stat(ssh_path)[stat.ST_MODE])[-4:]
+
+if not os.path.isdir(ssh_path):
+ print 'Creating folder %s' % ssh_path
+ os.makedirs(ssh_path)
+
+if get_permission(ssh_path) != 0700:
+ print 'Setting permission for %s' % ssh_path
+ os.chmod(ssh_path, 0700)
+
+# We'll have to assume that there might be other keys installed.
+# We could do something fancy, like checking if the key is installed already,
+# but in order to keep things simple, we'll simply append the key.
+with open(authorized_keys, 'a') as auth_file:
+ auth_file.write(ssh_key + '\n')
+
+if get_permission(authorized_keys) != 0600:
+ print 'Setting permission for %s' % authorized_keys
+ os.chmod(authorized_keys, 0600)
+```
+
+[Download](https://raw.github.com/cloudsigma/pycloudsigma/master/samples/set_ssh_key.py)
+
+## Sample: Monitor websocket activity
+
+Here's a sample application that listens to activity on the websocket. You can run this application to see activity from the web interface.
+
+```python
+from cloudsigma.generic import GenericClient
+from cloudsigma.resource import Websocket
+from cloudsigma.errors import ClientError, PermissionError
+
+ws = Websocket(timeout=None)
+client = GenericClient()
+
+print "Display Websocket activity.\nExit with ^C."
+
+while True:
+ try:
+ get_action = ws.ws.recv()
+ action_uri = get_action['resource_uri']
+ print 'Received Action: %s' % get_action
+ print 'Result:\n%s' % client.get(action_uri)
+ except ClientError as e:
+ if e.args[0] == 404:
+ print "Resource %s was deleted" % action_uri
+ else:
+ print 'Error retrieving: %s' % e
+ except PermissionError as e:
+ print "No permissions for resource %s" % action_uri
+```
+
+[Download](https://raw.github.com/cloudsigma/pycloudsigma/master/samples/monitor_websocket_activity.py)
+
+## Running the tests
+
+There must be a VM available by the name that matches `persistent_drive_name`. This VM should be a server with SSH installed, where one can be log in as `root` with the password set in `persistent_drive_ssh_password`.
+
diff --git a/pycloudsigma-master/cloudsigma.conf b/pycloudsigma-master/cloudsigma.conf
new file mode 100644
index 0000000..4dedf09
--- /dev/null
+++ b/pycloudsigma-master/cloudsigma.conf
@@ -0,0 +1,7 @@
+api_endpoint = https://lvs.cloudsigma.com/api/2.0/
+ws_endpoint = wss://direct.lvs.cloudsigma.com/websocket
+username = user@domain.com
+password = secret
+
+persistent_drive_name=foobar
+persistent_drive_ssh_password=sshsecret
diff --git a/pycloudsigma-master/requirements.txt b/pycloudsigma-master/requirements.txt
new file mode 100644
index 0000000..1841896
--- /dev/null
+++ b/pycloudsigma-master/requirements.txt
@@ -0,0 +1,6 @@
+configobj>=4.7
+requests>=1.2.3
+websocket-client>=0.9.0
+simplejson>=2.5.2
+nose>=1.1.2
+pyserial
diff --git a/pycloudsigma-master/samples/monitor_websocket_activity.py b/pycloudsigma-master/samples/monitor_websocket_activity.py
new file mode 100644
index 0000000..3ed8f1b
--- /dev/null
+++ b/pycloudsigma-master/samples/monitor_websocket_activity.py
@@ -0,0 +1,22 @@
+from cloudsigma.generic import GenericClient
+from cloudsigma.resource import Websocket
+from cloudsigma.errors import ClientError, PermissionError
+
+ws = Websocket(timeout=None)
+client = GenericClient()
+
+print "Display Websocket activity.\nExit with ^C."
+
+while True:
+ try:
+ get_action = ws.ws.recv()
+ action_uri = get_action['resource_uri']
+ print 'Received Action: %s' % get_action
+ print 'Result:\n%s' % client.get(action_uri)
+ except ClientError as e:
+ if e.args[0] == 404:
+ print "Resource %s was deleted" % action_uri
+ else:
+ print 'Error retrieving: %s' % e
+ except PermissionError as e:
+ print "No permissions for resource %s" % action_uri
diff --git a/pycloudsigma-master/samples/server_bill.py b/pycloudsigma-master/samples/server_bill.py
new file mode 100644
index 0000000..a59272b
--- /dev/null
+++ b/pycloudsigma-master/samples/server_bill.py
@@ -0,0 +1,77 @@
+import bisect
+from collections import defaultdict
+from datetime import timedelta, datetime
+import dateutil.parser
+from decimal import Decimal
+import re
+
+import cloudsigma.resource as cr
+
+start_time = datetime(2014, 01, 1)
+end_time = datetime(2014, 02, 1)
+
+
+
+
+def get_usages(usage_client, ledger_time, start_date, usage_list, bisect_list):
+ if not usage_list:
+ return usage_client.list(dict(poll_time__lt=ledger_time, poll_time__gt=start_date.isoformat()))
+ else:
+ i = bisect.bisect_left(bisect_list, start_date)
+ res = []
+ while i != len(bisect_list):
+ if usage_list[i]['poll_time'] >= ledger_time:
+ break
+ res.append(usage_list[i])
+ i += 1
+ return res
+
+
+
+def get_per_server_usage(start_time, end_time):
+ server_client = cr.Server()
+ server_list = server_client.list_detail()
+ server_resources = {}
+ for server in server_list:
+ server_resources[server['uuid']] = server['uuid']
+ for drive in server['drives']:
+ server_resources[drive['drive']['uuid']] = server['uuid']
+ usage_client = cr.Usage()
+ ledger_client = cr.Ledger()
+ server_billing = defaultdict(int)
+ interval = (end_time - start_time).days
+
+ ledger_list = ledger_client.list(dict(time__gt=end_time - timedelta(days=interval), time__lt=end_time))
+ usage_list = []
+ i = 0
+ for i in range(7, interval, 7):
+ usage_list.extend(usage_client.list(dict(poll_time__gt=end_time - timedelta(days=i),
+ poll_time__lt=end_time - timedelta(days=i - 7))))
+ if interval % 7 != 0:
+ usage_list.extend(usage_client.list(dict(poll_time__gt=end_time - timedelta(days=interval),
+ poll_time__lt=end_time - timedelta(days=i))))
+ usage_list = list(sorted(usage_list, key=lambda x:x['poll_time']))
+ bisect_list = [dateutil.parser.parse(u['poll_time']) for u in usage_list]
+ for ledger in ledger_list:
+ if not ledger['billing_cycle']:
+ continue
+
+ match = re.search('Burst: .* of ([^ ]*) .*', ledger['reason'])
+ if not match:
+ continue
+ ledger['resource'] = match.group(1)
+ poll_time = dateutil.parser.parse(ledger['time'])
+ start_date = poll_time - timedelta(seconds=ledger['interval'] - 1)
+ usages = get_usages(usage_client, ledger['time'], start_date, usage_list, bisect_list)
+ for usage in usages:
+ if usage['resource'] != ledger['resource']:
+ continue
+ server = server_resources.get(usage['uuid'])
+ if server:
+ server_billing[server] += Decimal(usage['amount']) / Decimal(ledger['resource_amount']) * Decimal(ledger['amount'])
+
+ return server_billing
+
+if __name__ == '__main__':
+ for server, amount in get_per_server_usage(start_time, end_time).iteritems():
+ print "%s - %.2f" % (server, amount)
diff --git a/pycloudsigma-master/samples/set_ssh_key.py b/pycloudsigma-master/samples/set_ssh_key.py
new file mode 100644
index 0000000..38b5181
--- /dev/null
+++ b/pycloudsigma-master/samples/set_ssh_key.py
@@ -0,0 +1,33 @@
+import cloudsigma
+import os
+import stat
+
+metadata = cloudsigma.metadata.GetServerMetadata().get()
+ssh_key = metadata['meta']['ssh_public_key']
+
+# Define paths
+home = os.path.expanduser("~")
+ssh_path = os.path.join(home, '.ssh')
+authorized_keys = os.path.join(ssh_path, 'authorized_keys')
+
+
+def get_permission(path):
+ return oct(os.stat(ssh_path)[stat.ST_MODE])[-4:]
+
+if not os.path.isdir(ssh_path):
+ print 'Creating folder %s' % ssh_path
+ os.makedirs(ssh_path)
+
+if get_permission(ssh_path) != 0700:
+ print 'Setting permission for %s' % ssh_path
+ os.chmod(ssh_path, 0700)
+
+# We'll have to assume that there might be other keys installed.
+# We could do something fancy, like checking if the key is installed already,
+# but in order to keep things simple, we'll simply append the key.
+with open(authorized_keys, 'a') as auth_file:
+ auth_file.write(ssh_key + '\n')
+
+if get_permission(authorized_keys) != 0600:
+ print 'Setting permission for %s' % authorized_keys
+ os.chmod(authorized_keys, 0600)
diff --git a/pycloudsigma-master/setup.py b/pycloudsigma-master/setup.py
new file mode 100755
index 0000000..206dbdf
--- /dev/null
+++ b/pycloudsigma-master/setup.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+from setuptools import setup
+
+execfile('src/cloudsigma/version.py')
+
+
+with open('requirements.txt') as f:
+ required = f.read().splitlines()
+
+
+setup(
+ name='cloudsigma',
+ version=__version__,
+ packages=[
+ 'cloudsigma',
+ ],
+ package_dir={
+ '': 'src'
+ },
+ package_data={
+ 'templates': [
+ 'request_template',
+ 'response_template',
+ ]
+ },
+ author='CloudSigma AG',
+ author_email='dev-support@cloudsigma.com',
+ url='https://github.com/cloudsigma/pycloudsigma',
+ install_requires=required,
+ description="CloudSigma's official python library.",
+ keywords=[
+ 'cloud',
+ 'cloudsigma',
+ 'api',
+ 'cloud computing'
+ ],
+ classifiers=[
+ "Programming Language :: Python",
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: System Administrators",
+ "Intended Audience :: Developers",
+ ],
+ long_description="""\
+====================================
+CloudSigma's official python library
+====================================
+
+pycloudsigma allows you to easily manage your entire infrastructure from within Python.
+
+Creating a server is a simple as:
+
+::
+
+ import cloudsigma
+ server = cloudsigma.resource.Server()
+ test_server = { 'name': 'My Test Server', 'cpu': 1000, 'mem': 512 * 1024 ** 2, 'vnc_password': 'test_server' }
+ my_test_server = server.create(test_server)
+
+
+For more examples, please visit pycloudsigma_.
+
+For more detailed information about CloudSigma's, please visit the `API documentation `_.
+
+.. _pycloudsigma: https://github.com/cloudsigma/pycloudsigma/blob/master/README.md
+ """
+)
diff --git a/pycloudsigma-master/src/cloudsigma/__init__.py b/pycloudsigma-master/src/cloudsigma/__init__.py
new file mode 100644
index 0000000..c31f399
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/__init__.py
@@ -0,0 +1,8 @@
+from cloudsigma.version import __version__
+from cloudsigma import bulk
+from cloudsigma import conf
+from cloudsigma import errors
+from cloudsigma import generic
+from cloudsigma import resource
+from cloudsigma import scenarios
+from cloudsigma import metadata
diff --git a/pycloudsigma-master/src/cloudsigma/bulk.py b/pycloudsigma-master/src/cloudsigma/bulk.py
new file mode 100644
index 0000000..c02abef
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/bulk.py
@@ -0,0 +1,188 @@
+import logging
+import time
+
+from .conf import config
+from .resource import Drive, Server, LibDrive
+from .generic import GenericClient
+
+LOG = logging.getLogger(__name__)
+
+class BulkBase(object):
+ """
+ Common base class for all stress operations.
+ """
+ def __init__(self, id_prefix):
+ """
+ @param id_prefix: a string prefix that is used in created artifacts names
+ """
+ self.id_prefix = id_prefix
+ self.id_counter = 0
+
+ self.c = GenericClient()
+ self.c_drive = Drive()
+ self.c_libdrive = LibDrive()
+ self.c_guest = Server()
+
+ def get_name(self):
+ """
+ Generates name for an artifact
+ """
+ self.id_counter += 1
+ return "%s-%.5d" % (self.id_prefix, self.id_counter)
+
+ def filter_by_name_uuid(self, resp, name_or_uuid):
+ def _filter(d):
+ return (d['uuid'] == name_or_uuid) or (name_or_uuid in d['name'])
+
+ candidates = filter(_filter, resp)
+ return candidates
+
+class DrivesBulk(BulkBase):
+ CREATE_DRIVE_MEDIA = config.get('CREATE_DRIVE_MEDIA', 'disk')
+ CREATE_DRIVE_SIZE = config.get('CREATE_DRIVE_SIZE', 10*1024**3)
+ CREATE_DRIVE_DESCRIPTION = config.get('CREATE_DRIVE_DESCRIPTION', 'some descr')
+
+ def __init__(self, media=CREATE_DRIVE_MEDIA, size=CREATE_DRIVE_SIZE,
+ description=CREATE_DRIVE_DESCRIPTION,
+ *args, **kwargs):
+ super(DrivesBulk, self).__init__(*args, **kwargs)
+
+ self.media = media
+ self.size = size
+ self.description = description
+
+ def generate_definition(self):
+ return {
+ "media": self.media,
+ "name": self.get_name(),
+ "size": self.size,
+ "meta": {
+ "description": self.description,
+ }
+ }
+
+ def create(self, count):
+ """Creates a number of new drives
+
+ @param count: the amount to be created
+ """
+
+ drives = []
+ for _ in range(count):
+ d = self.generate_definition()
+ req = {
+ "objects": [d,],
+ }
+ resp = self.c_drive.create(req)
+ LOG.info('Created drive %r', resp['name'])
+ drives.append(resp)
+ return drives
+
+ def delete(self, uuid, name):
+ self.c_drive.delete(uuid)
+ LOG.info('Deleted drive %r', name)
+
+ def wipe(self):
+ """Deletes all artifacts created by this identification prefix
+ """
+ resp = self.get_list()
+ for d in resp:
+ self.delete(d['uuid'], d['name'])
+
+ def clone(self, count, source_name_or_uuid):
+ """Creates a number of new drives, cloning from the given original.
+
+ The source drive is first looked-up in the drives of the current account and then in the drives library
+
+ @param count: the amount to be created
+ @param source_name_or_uuid: either the UUID of the source or substring match of its name
+ """
+ source_drive = self.lookup(source_name_or_uuid)
+
+ drives = []
+ for _ in range(count):
+ d = {
+ "media": source_drive['media'],
+ "name": self.get_name(),
+ "size": source_drive['size'],
+ "meta": source_drive['meta'],
+ "affinities": source_drive['affinities'],
+ }
+ resp = self.c_drive.clone(source_drive['uuid'], d)
+ LOG.info('Cloned drive %r from %r', resp['name'], source_drive['name'])
+ drives.append(resp)
+
+ # Wait for all drives to finish clonning
+ drives_uuids = [d['uuid'] for d in drives]
+
+ def is_clonning_finished():
+
+ existing_drives = self.get_detail()
+ current_scenario_drives = [d for d in existing_drives if d['uuid'] in drives_uuids]
+ current_scenario_drives_statuses = [d['status'] for d in current_scenario_drives]
+
+ return current_scenario_drives_statuses
+
+ statuses = is_clonning_finished()
+ while 'cloning_dst' in statuses:
+ time.sleep(10)
+ drives_statuses_string = '\n'.join(['{}: {}'.format(uuid, status) for uuid, status in zip(drives_uuids, statuses)])
+ LOG.info('Waiting for all drives cloning from {} to finish cloning:\n{}'.format(source_drive['uuid'],
+ drives_statuses_string))
+ statuses = is_clonning_finished()
+
+ # All finished print final statuses
+ drives_statuses_string = '\n'.join(['{}: {}'.format(uuid, status) for uuid, status in zip(drives_uuids, statuses)])
+ LOG.info('Finished cloning {} to drives:\n{}'.format(source_drive['uuid'], drives_statuses_string))
+
+ return drives
+
+ def clone_all(self, count=1):
+ src_drives = self.get_detail()
+ drives = []
+ for drv in src_drives:
+ if drv['status'] == 'unavailable':
+ continue
+ for i in range(int(count)):
+ d = {
+ "media": drv['media'],
+ "name": 'clone_%s_%i' % (drv['name'], i),
+ "size": drv['size'],
+ "meta": drv['meta'],
+ "affinities": drv['affinities'],
+ }
+ resp = self.c_drive.clone(drv['uuid'], d)
+ LOG.info('Cloned drive %r from %r', resp['name'], drv['name'])
+ drives.append(resp)
+ return drives
+
+ def get_list(self):
+ """Queries the drives in this account with the given prefix
+ """
+ resp = self.c_drive.list(query_params={"fields":'name,uuid'})
+ resp = filter(lambda x: x['name'].startswith(self.id_prefix), resp)
+ return resp
+
+ def get_detail(self):
+ resp = self.c_drive.list_detail()
+ resp = filter(lambda x: x['name'].startswith(self.id_prefix), resp)
+ return resp
+
+ def lookup(self, name_or_uuid):
+ resp = self.c_drive.list_detail()
+ candidates = self.filter_by_name_uuid(resp, name_or_uuid)
+ if not candidates:
+ resp = self.c_drive.list_library_drives()
+ candidates = self.filter_by_name_uuid(resp, name_or_uuid)
+ if len(candidates) == 0:
+ raise Exception("Could not find %s with lookup key %s" % (
+ self.__class__.__name__, name_or_uuid))
+ return candidates[0]
+
+ def get_by_uuids(self, uuids):
+ """Queries the drives in this account with the given prefix
+ """
+ resp = self.c_drive.list_detail(query_params={"fields":'name,uuid,status'})
+ resp = filter(lambda x: x['uuid'] in uuids, resp)
+ return resp
+
diff --git a/pycloudsigma-master/src/cloudsigma/conf.py b/pycloudsigma-master/src/cloudsigma/conf.py
new file mode 100644
index 0000000..4de10f8
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/conf.py
@@ -0,0 +1,9 @@
+"""Configuration of the client."""
+import os
+
+CONFIG_LOCATION = os.getenv('CLOUDSIGMA_CONFIG',
+ os.path.join(os.path.expanduser('~'), '.cloudsigma.conf')
+ )
+
+from configobj import ConfigObj
+config = ConfigObj(CONFIG_LOCATION)
diff --git a/pycloudsigma-master/src/cloudsigma/errors.py b/pycloudsigma-master/src/cloudsigma/errors.py
new file mode 100644
index 0000000..ec3fd09
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/errors.py
@@ -0,0 +1,18 @@
+class ApiClientError(Exception):
+ pass
+
+
+class PermissionError(ApiClientError):
+ pass
+
+
+class ClientError(ApiClientError):
+ pass
+
+
+class ServerError(ApiClientError):
+ pass
+
+
+class AuthError(ApiClientError):
+ pass
diff --git a/pycloudsigma-master/src/cloudsigma/generic.py b/pycloudsigma-master/src/cloudsigma/generic.py
new file mode 100644
index 0000000..32d76a4
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/generic.py
@@ -0,0 +1,136 @@
+import requests
+import urlparse
+import simplejson
+import logging
+
+from websocket import create_connection
+from .conf import config
+from . import errors
+
+LOG = logging.getLogger(__name__)
+
+
+class GenericClient(object):
+ """Handles all low level HTTP, authentication, parsing and error handling.
+ """
+ LOGIN_METHOD_BASIC = 'basic'
+ LOGIN_METHOD_SESSION = 'session'
+ LOGIN_METHODS = (
+ LOGIN_METHOD_BASIC,
+ LOGIN_METHOD_SESSION,
+ )
+
+ def __init__(self, api_endpoint=None, username=None, password=None, login_method=LOGIN_METHOD_BASIC):
+ self.api_endpoint = api_endpoint if api_endpoint else config['api_endpoint']
+ self.username = username if username else config['username']
+ self.password = password if password else config['password']
+ self.login_method = config.get('login_method', login_method)
+ assert self.login_method in self.LOGIN_METHODS, 'Invalid value %r for login_method' % (login_method,)
+
+ self._session = None
+ self.resp = None
+ self.response_hook = None
+
+ def _login_session(self):
+ raise NotImplementedError()
+
+ def _get_full_url(self, url):
+ api_endpoint = urlparse.urlparse(self.api_endpoint)
+ if url.startswith(api_endpoint.path):
+ full_url = list(api_endpoint)
+ full_url[2] = url
+ full_url = urlparse.urlunparse(full_url)
+ else:
+ if url[0] == '/':
+ url = url[1:]
+ full_url = urlparse.urljoin(self.api_endpoint, url)
+
+ if not full_url.endswith("/"):
+ full_url += "/"
+
+ return full_url
+
+ def _process_response(self, resp, return_list=False):
+ resp_data = None
+ if resp.status_code in (200, 201, 202):
+ resp_data = resp.json().copy()
+ if 'objects' in resp_data:
+ resp_data = resp_data['objects']
+ if len(resp_data) == 1 and not return_list:
+ resp_data = resp_data[0]
+ elif resp.status_code == 401:
+ raise errors.AuthError()
+ elif resp.status_code == 403:
+ raise errors.PermissionError(resp.text)
+ elif resp.status_code / 100 == 4:
+ raise errors.ClientError(resp.status_code, resp.text)
+ elif resp.status_code / 100 == 5:
+ raise errors.ServerError(resp.status_code, resp.text)
+
+ return resp_data
+
+ def _get_req_args(self, body=None, query_params=None):
+ kwargs = {}
+ if self.login_method == self.LOGIN_METHOD_BASIC:
+ kwargs['auth'] = (self.username, self.password)
+
+ kwargs['headers'] = {
+ 'content-type': 'application/json',
+ 'user-agent': 'CloudSigma turlo client',
+ }
+
+ if query_params:
+ if 'params' not in kwargs:
+ kwargs['params'] = {}
+ kwargs['params'].update(query_params)
+
+ if self.response_hook is not None:
+ kwargs['hooks'] = {
+ 'response': self.response_hook
+ }
+
+ return kwargs
+
+ @property
+ def http(self):
+ if self._session:
+ return self._session
+ return requests
+
+ def get(self, url, query_params=None, return_list=False):
+ kwargs = self._get_req_args(query_params=query_params)
+ self.resp = self.http.get(self._get_full_url(url), **kwargs)
+ return self._process_response(self.resp, return_list)
+
+ def put(self, url, data, query_params=None, return_list=False):
+ kwargs = self._get_req_args(body=data, query_params=query_params)
+ self.resp = self.http.put(self._get_full_url(url), data=simplejson.dumps(data), **kwargs)
+ return self._process_response(self.resp, return_list)
+
+ def post(self, url, data, query_params=None, return_list=False):
+ kwargs = self._get_req_args(body=data, query_params=query_params)
+ self.resp = self.http.post(self._get_full_url(url), data=simplejson.dumps(data), **kwargs)
+ return self._process_response(self.resp, return_list)
+
+ def delete(self, url, query_params=None):
+ self.resp = self.http.delete(self._get_full_url(url), **self._get_req_args(query_params=query_params))
+ return self._process_response(self.resp)
+
+
+class WebsocketClient(object):
+ def __init__(self, cookie, timeout=10):
+ self.conn = create_connection(config['ws_endpoint'], timeout=timeout, header=['Cookie: async_auth=%s' % (cookie,)])
+
+ def recv(self, timeout=None, return_raw=False):
+ if timeout is None:
+ ret = self.conn.recv()
+ else:
+ old_timeout = self.conn.gettimeout()
+ self.conn.settimeout(timeout)
+ try:
+ ret = self.conn.recv()
+ finally:
+ self.conn.settimeout(old_timeout)
+ if not return_raw:
+ ret = simplejson.loads(ret)
+ return ret
diff --git a/pycloudsigma-master/src/cloudsigma/metadata.py b/pycloudsigma-master/src/cloudsigma/metadata.py
new file mode 100644
index 0000000..f1abeb7
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/metadata.py
@@ -0,0 +1,23 @@
+import serial
+import json
+
+
+class GetServerMetadata:
+ result = None
+
+ def __init__(self):
+
+ # Open serial device (should always be ttyS1)
+ ser = serial.Serial('/dev/ttyS1', timeout=1)
+
+ # Trigger a read from the serial device
+ ser.write('<\n\n>')
+
+ # Read the data and convert it to json
+ data = ser.readlines()
+ self.result = json.loads(data[0])
+
+ ser.close()
+
+ def get(self):
+ return self.result
diff --git a/pycloudsigma-master/src/cloudsigma/resource.py b/pycloudsigma-master/src/cloudsigma/resource.py
new file mode 100644
index 0000000..65f340e
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/resource.py
@@ -0,0 +1,489 @@
+import socket
+import time
+from cloudsigma.generic import GenericClient, WebsocketClient
+
+
+class ResourceBase(object):
+ resource_name = None
+
+ def __init__(self, *args, **kwargs):
+ self.c = GenericClient(*args, **kwargs)
+
+
+ def attach_response_hook(self, func):
+ self.c.response_hook = func
+
+ def detach_response_hook(self):
+ self.c.response_hook = None
+
+ def _get_url(self):
+ assert self.resource_name, 'Descendant class must set the resource_name field'
+ return '/%s/' % (self.resource_name,)
+
+ def get(self, uuid=None):
+ url = self._get_url()
+ if uuid is not None:
+ url += uuid
+ return self.c.get(url, return_list=False)
+
+ def get_schema(self):
+ url = self._get_url() + 'schema'
+ return self.c.get(url)
+
+ def get_from_url(self, url):
+ return self.c.get(url, return_list=False)
+
+ def list(self, query_params=None):
+ url = self._get_url()
+ _query_params = {
+ 'limit': 0, # get all results, do not use pagination
+ }
+ if query_params:
+ _query_params.update(query_params)
+ return self.c.get(url, query_params=_query_params, return_list=True)
+
+ def list_detail(self, query_params=None):
+ url = self._get_url() + 'detail/'
+ _query_params = {
+ 'limit': 0, # get all results, do not use pagination
+ }
+ if query_params:
+ _query_params.update(query_params)
+ return self.c.get(url, query_params=_query_params, return_list=True)
+
+ def _pepare_data(self, data):
+ res_data = data
+ if isinstance(data, (list, tuple)):
+ res_data = {'objects': data}
+ elif isinstance(data, (dict,)):
+ if not data.has_key('objects'):
+ res_data = {'objects': [data]}
+ else:
+ raise TypeError('%r is not should be of type list, tuple or dict' % data)
+ return res_data
+
+ def create(self, data, query_params=None):
+ query_params = query_params or {}
+ url = self._get_url()
+ return self.c.post(url, self._pepare_data(data), return_list=False, query_params=query_params)
+
+ def update(self, uuid, data):
+ url = self._get_url() + uuid + '/'
+ return self.c.put(url, data, return_list=False)
+
+ def delete(self, uuid, query_params=None):
+ url = self._get_url() + uuid
+ return self.c.delete(url, query_params=query_params)
+
+ def _action(self, uuid, action, data=None, query_params=None):
+ query_params = query_params or {}
+ q_params = {'do': action}
+ q_params.update(query_params)
+
+ if uuid is None:
+ url = self._get_url() + 'action/'
+ else:
+ url = self._get_url() + uuid + '/action/'
+ return self.c.post(url,
+ data,
+ query_params=q_params,
+ return_list=False
+ )
+
+
+class Profile(ResourceBase):
+ resource_name = 'profile'
+
+ def get(self):
+ return self.c.get(self._get_url(), return_list=False)
+
+ def update(self, data):
+ return self.c.put(self._get_url(), data, return_list=False)
+
+
+class GlobalContext(ResourceBase):
+ resource_name = 'global_context'
+
+ def get(self):
+ return self.c.get(self._get_url(), return_list=False)
+
+ def update(self, data):
+ return self.c.post(self._get_url(), data, return_list=False)
+
+
+class LibDrive(ResourceBase):
+ resource_name = 'libdrives'
+
+
+class Drive(ResourceBase):
+ resource_name = 'drives'
+
+ def clone(self, uuid, data=None, avoid=None):
+ """
+ Clone a drive.
+
+ :param uuid:
+ Source drive for the clone.
+ :param data:
+ Clone drive options. Refer to API docs for possible options.
+ :param avoid:
+ A list of drive or server uuids to avoid for the clone. Avoid attempts to put the clone on a different
+ physical storage host from the drives in *avoid*. If a server uuid is in *avoid* it is internally expanded
+ to the drives attached to the server.
+ :return:
+ Cloned drive definition.
+ """
+ data = data or {}
+ query_params = {}
+ if avoid:
+ if isinstance(avoid, basestring):
+ avoid = [avoid]
+ query_params['avoid'] = ','.join(avoid)
+
+ return self._action(uuid, 'clone', data, query_params=query_params)
+
+ def resize(self, uuid, data=None):
+ """
+ Resize a drive. Raises an error if drive is mounted on a running server or unavailable.
+ :param uuid:
+ UUID of the drive.
+ :param data:
+ Drive definition containing the new size.
+ :return:
+ """
+ data = data or {}
+ return self._action(uuid, 'resize', data)
+
+ def create(self, data, avoid=None):
+ """
+ Create a drive.
+
+ :param data:
+ Drive definition.
+ :param avoid:
+ A list of drive or server uuids to avoid for the new drive. Avoid attempts to put the drive on a different
+ physical storage host from the drives in *avoid*. If a server uuid is in *avoid* it is internally expanded
+ to the drives attached to the server.
+ :return:
+ New drive definition.
+ """
+ query_params = {}
+ if avoid:
+ if isinstance(avoid, basestring):
+ avoid = [avoid]
+ query_params['avoid'] = ','.join(avoid)
+ return super(Drive, self).create(data, query_params=query_params)
+
+class Server(ResourceBase):
+ resource_name = 'servers'
+
+ def start(self, uuid, allocation_method=None):
+ data = {}
+ if allocation_method:
+ data = {'allocation_method': str(allocation_method)}
+ return self._action(uuid, 'start', data)
+
+ def stop(self, uuid):
+ return self._action(uuid,
+ 'stop',
+ data={} # Workaround API issue - see TUR-1346
+ )
+
+ def restart(self, uuid):
+ return self._action(uuid,
+ 'restart',
+ data={} # Workaround API issue - see TUR-1346
+ )
+
+ def shutdown(self, uuid):
+ return self._action(uuid,
+ 'shutdown',
+ data={} # Workaround API issue - see TUR-1346
+ )
+
+ def runtime(self, uuid):
+ url = self._get_url() + uuid + '/runtime/'
+ return self.c.get(url, return_list=False)
+
+ def open_vnc(self, uuid):
+ return self._action(uuid, 'open_vnc', data={})
+
+ def close_vnc(self, uuid):
+ return self._action(uuid, 'close_vnc', data={})
+
+ def clone(self, uuid, data=None, avoid=None):
+ """
+ Clone a server. Attached disk drives get cloned and attached to the new server, and attached cdroms get
+ attached to the new server (without cloning).
+
+ :param uuid:
+ Source server for the clone.
+ :param data:
+ Clone server options. Refer to API docs for possible options.
+ :param avoid:
+ A list of drive or server uuids to avoid for the clone. Avoid attempts to put the cloned drives on a
+ different physical storage host from the drives in *avoid*. If a server uuid is in *avoid* it is internally
+ expanded to the drives attached to the server.
+ :return:
+ Cloned server definition.
+ """
+ data = data or {}
+ query_params = {}
+ if avoid:
+ if isinstance(avoid, basestring):
+ avoid = [avoid]
+ query_params['avoid'] = ','.join(avoid)
+
+ return self._action(uuid, 'clone', data=data, query_params=query_params)
+
+ def delete(self, uuid, recurse=None):
+ """
+ Deletes a server.
+
+ :param uuid:
+ uuid of the server to delete
+ :param recurse:
+ option to recursively delete attached drives. Possible values are 'all_drives', 'disks'. It is
+ possible to use one of the supplied convenience functions: delete_with_all_drives, delete_with_disks,
+ delete_with_cdroms
+
+ :return:
+ """
+ query_params = {}
+ if recurse is not None:
+ query_params.update(recurse=recurse)
+ if not query_params:
+ query_params = None
+
+ return super(Server, self).delete(uuid, query_params=query_params)
+
+ def delete_with_all_drives(self, uuid):
+ """
+ Deletes a server with all attached drives.
+ :param uuid: uuid of the server to delete
+ :return:
+ """
+ return self.delete(uuid, recurse='all_drives')
+
+ def delete_with_disks(self, uuid):
+ """
+ Deletes a server with all attached drives with media='disk'.
+ :param uuid: uuid of the server to delete
+ :return:
+ """
+ return self.delete(uuid, recurse='disks')
+
+ def delete_with_cdroms(self, uuid):
+ """
+ Deletes a server with all attached drives with media='cdrom'.
+ :param uuid: uuid of the server to delete
+ :return:
+ """
+ return self.delete(uuid, recurse='cdroms')
+
+class VLAN(ResourceBase):
+ resource_name = 'vlans'
+
+
+class IP(ResourceBase):
+ resource_name = 'ips'
+
+
+class FirewallPolicy(ResourceBase):
+ resource_name = 'fwpolicies'
+
+
+class Subscriptions(ResourceBase):
+ resource_name = 'subscriptions'
+
+ def extend(self, uuid, data=None):
+ return self._action(uuid, 'extend', data or {})
+
+class SubscriptionCalculator(Subscriptions):
+ resource_name = 'subscriptioncalculator'
+
+class Ledger(ResourceBase):
+ resource_name = 'ledger'
+
+class Usage(ResourceBase):
+ resource_name = 'usage'
+
+
+class Balance(ResourceBase):
+ resource_name = 'balance'
+
+
+class Discount(ResourceBase):
+ resource_name = 'discount'
+
+
+class Pricing(ResourceBase):
+ resource_name = 'pricing'
+
+
+class AuditLog(ResourceBase):
+ resource_name = 'logs'
+
+
+class Licenses(ResourceBase):
+ resource_name = 'licenses'
+
+class Capabilites(ResourceBase):
+ resource_name = 'capabilities'
+
+class Accounts(ResourceBase):
+ resource_name = 'accounts'
+
+ def authenticate_asynchronous(self):
+ return self._action(None, 'authenticate_asynchronous', data={}) # data empty see TUR-1346
+
+
+class CurrentUsage(ResourceBase):
+ resource_name = 'currentusage'
+
+
+class Snapshot(ResourceBase):
+ resource_name = 'snapshots'
+
+ def clone(self, uuid, data=None, avoid=None):
+ """
+ Clone a drive.
+
+ :param uuid:
+ Source drive for the clone.
+ :param data:
+ Clone drive options. Refer to API docs for possible options.
+ :param avoid:
+ A list of drive or server uuids to avoid for the clone. Avoid attempts to put the clone on a different
+ physical storage host from the drives in *avoid*. If a server uuid is in *avoid* it is internally expanded
+ to the drives attached to the server.
+ :return:
+ Cloned drive definition.
+ """
+ data = data or {}
+ query_params = {}
+ if avoid:
+ if isinstance(avoid, basestring):
+ avoid = [avoid]
+ query_params['avoid'] = ','.join(avoid)
+
+ return self._action(uuid, 'clone', data, query_params=query_params)
+
+
+class Snapshot(ResourceBase):
+ resource_name = 'snapshots'
+
+ def clone(self, uuid, data=None, avoid=None):
+ """
+ Clone a drive.
+
+ :param uuid:
+ Source drive for the clone.
+ :param data:
+ Clone drive options. Refer to API docs for possible options.
+ :param avoid:
+ A list of drive or server uuids to avoid for the clone. Avoid attempts to put the clone on a different
+ physical storage host from the drives in *avoid*. If a server uuid is in *avoid* it is internally expanded
+ to the drives attached to the server.
+ :return:
+ Cloned drive definition.
+ """
+ data = data or {}
+ query_params = {}
+ if avoid:
+ if isinstance(avoid, basestring):
+ avoid = [avoid]
+ query_params['avoid'] = ','.join(avoid)
+
+ return self._action(uuid, 'clone', data, query_params=query_params)
+
+class Tags(ResourceBase):
+ resource_name = 'tags'
+
+ def list_resource(self, uuid, resource_name):
+ url = '{base}{tag_uuid}/{res_name}/'.format(base=self._get_url(), tag_uuid=uuid, res_name=resource_name)
+ return self.c.get(url, return_list=True)
+
+ def drives(self, uuid):
+ return self.list_resource(uuid, 'drives')
+
+ def servers(self, uuid):
+ return self.list_resource(uuid, 'servers')
+
+ def ips(self, uuid):
+ return self.list_resource(uuid, 'ips')
+
+ def vlans(self, uuid):
+ return self.list_resource(uuid, 'vlans')
+
+
+class WebsocketTimeoutError(Exception):
+ pass
+
+
+class Websocket(object):
+ def __init__(self, timeout=10):
+ self.timeout = timeout
+ accounts = Accounts()
+ accounts.authenticate_asynchronous()
+ cookie = accounts.c.resp.cookies['async_auth']
+ self.ws = WebsocketClient(cookie, self.timeout)
+
+ def wait(self, message_filter=None, timeout=None):
+ # message_filter = {'resource_type': ['drives']}
+ # message_filter = {'resource_uri': ['/api/2.0/balance/']}
+
+ events = []
+ if message_filter is not None:
+ for key in message_filter:
+ if isinstance(message_filter[key], basestring):
+ message_filter[key] = [message_filter[key]]
+ if timeout is None:
+ timeout = self.timeout
+ while timeout > 0:
+ start_t = time.time()
+ try:
+ frame = self.ws.recv(timeout)
+ except socket.timeout as e:
+ raise WebsocketTimeoutError('Timeout reached when waiting for events')
+ events.append(frame)
+ if self.filter_frame(message_filter, frame):
+ return events
+ timeout = timeout - (time.time() - start_t)
+ raise WebsocketTimeoutError('Timeout reached when waiting for events')
+
+ def filter_frame(self, message_filter, frame):
+ if not message_filter:
+ return True
+ for key in message_filter:
+ if key in frame:
+ for value in message_filter[key]:
+ if frame[key] == value:
+ return True
+ return False
+
+ def wait_obj_type(self, resource_type, cls, timeout=None):
+ ret = self.wait({"resource_type": resource_type})[-1]
+ return cls().get_from_url(ret['resource_uri'])
+
+ def wait_obj_uri(self, resource_uri, cls, timeout=None):
+ ret = self.wait({"resource_uri": resource_uri})
+ return cls().get_from_url(resource_uri)
+
+ def wait_obj_wrapper(self, waiter, args, kwargs=None, timeout=None, extra_filter=lambda x: True):
+ if timeout is None:
+ timeout = self.timeout
+ if kwargs is None:
+ kwargs = {}
+ while timeout > 0:
+ start_t = time.time()
+ kwargs['timeout'] = timeout
+ frame = waiter(*args, **kwargs)
+ if extra_filter(frame):
+ return frame
+ timeout = timeout - (time.time() - start_t)
+ raise WebsocketTimeoutError('Timeout reached when waiting for events')
+
+ def wait_for_status(self, uri, resource, status, timeout=30):
+ return self.wait_obj_wrapper(self.wait_obj_uri, (uri, resource), timeout=timeout,
+ extra_filter=lambda x: x['status'] == status)
\ No newline at end of file
diff --git a/pycloudsigma-master/src/cloudsigma/resumable_upload.py b/pycloudsigma-master/src/cloudsigma/resumable_upload.py
new file mode 100644
index 0000000..7687f13
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/resumable_upload.py
@@ -0,0 +1,200 @@
+import requests
+import os
+import datetime
+import Queue
+import threading
+import time
+from logging import getLogger
+
+from .resource import Drive, ResourceBase
+
+LOG = getLogger(__name__)
+
+class Upload(ResourceBase):
+ resource_name = 'initupload'
+
+ def __init__(self, image_path, drive_uuid=None, chunk_size=5*1024**2, n_threads=4,
+ drive_name=None, drive_media='disk', progress_callback=None, progress_report_interval=1,
+ generic_client_kwargs=None):
+ """
+ A python implementation of the resummable.js protocol.
+
+ :param image_path:
+ An absolute path to the drive image to be uploaded
+ :param drive_uuid:
+ If given will try to resume the upload to the given drive uuid
+ :param chunk_size:
+ The size of the chunk in bytes. Default is 5MB.
+ :param n_threads:
+ Number of parallel upload threads. Default is 4.
+ :param drive_name
+ The name of the uploaded drive. If not givent it will be set to Upload_
+ :param drive_media:
+ The media of the uploaded drive. If not givent it will be set to "disk"
+ :param progress_callback:
+ A callback to be called every *progress_report_interval* second with the current progress.
+ progress_callback(self.uploaded_size, self.file_size)
+ :param progress_report_interval:
+ Seconds between *progress_callback* calls. Default is 1 second.
+ :param generic_client_kwars:
+ Keyword arguments for the GeneriClient __init__
+ :return:
+ """
+ self.generic_client_kwargs = generic_client_kwargs or {}
+ super(Upload, self).__init__(**self.generic_client_kwargs)
+ self.drive_uuid = drive_uuid
+ self._drive_size = None
+ self.image_path = image_path
+ self.chunk_size = chunk_size
+ self.n_threads = n_threads
+ self.file_size = os.path.getsize(self.image_path)
+ self.create_data = {
+ 'name': drive_name or 'Upload_{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.utcnow()),
+ 'media': drive_media,
+ 'size': self.file_size
+ }
+ self.dc = Drive(**self.generic_client_kwargs)
+ self.queue = Queue.Queue()
+ self.finished = False
+ self.progress_lock = threading.RLock()
+ self.uploaded_size = 0
+ self.progress_callback = progress_callback
+ self.progress_report_interval=progress_report_interval
+
+ @property
+ def remote_size(self):
+ if self._drive_size:
+ return self._drive_size
+
+ drive = self.dc.get(self.drive_uuid)
+ self._drive_size = drive['size']
+
+ return self._drive_size
+
+ def upload(self):
+ if not self.drive_uuid:
+ drive = self.create(self.create_data)
+ self.drive_uuid = drive['uuid']
+
+ if self.remote_size != self.file_size:
+ raise ValueError('File {} has different size from remote drive {}:'
+ ' {} != {}'.format(self.image_path, self.drive_uuid, self.file_size, self.remote_size))
+
+ self.enqueue_chunks()
+
+ watcher_t = threading.Thread(target=self.queue.join)
+ watcher_t.setDaemon(True)
+ watcher_t.start()
+
+ self.start_threads()
+
+ LOG.debug('waiting for queue to finish')
+ while watcher_t.isAlive():
+ self.report_progress()
+ time.sleep(self.progress_report_interval)
+ self.report_progress()
+
+ LOG.debug('queue to finished')
+
+ def retry(self):
+ self.uploaded_size = 0
+ self.upload()
+
+ def file_chunks(self):
+ """
+ Yields tuples (chunk_number, chunk_offset, real_chunk_size).
+
+ ``chunk_number`` is the number of the chunk. Numbering starts from 1.
+ ``chunk_offset`` can be used to seek in the file.
+ ``real_chunk_size`` is necessary because the last chunk is bigger
+
+ :return: yields (chunk_number, chunk_offset, real_chunk_size) tuples
+ """
+ n_chunks = self.file_size // self.chunk_size
+ if n_chunks > 0:
+ for chunk in xrange(n_chunks - 1): # excludes last chunk and starts from 1. last chunk is bigger
+ offset = chunk * self.chunk_size
+ yield chunk+1, offset, self.chunk_size
+
+ last_chunk = n_chunks - 1
+ last_offset = last_chunk * self.chunk_size
+ last_chunk_size = self.file_size - last_offset
+
+ yield last_chunk+1, last_offset, last_chunk_size
+ else: # chunk size bigger than file size
+ yield 1, 0, self.file_size
+
+ def enqueue_chunks(self):
+ for chunk_number, chunk_offset, real_chunk_size in self.file_chunks():
+ self.queue.put((chunk_number, chunk_offset, real_chunk_size))
+
+ def start_threads(self):
+ for _ in xrange(self.n_threads):
+ download_thread = threading.Thread(target=self.upload_enqueued)
+ download_thread.setDaemon(True)
+ download_thread.start()
+
+ def upload_enqueued(self):
+ while not self.finished:
+ chunk_number, chunk_offset, real_chunk_size = self.queue.get()
+ try:
+ LOG.debug('Uploading chunk {}:{}:{}'.format(chunk_number, chunk_offset, real_chunk_size))
+ self.upload_chunk(chunk_number, chunk_offset, real_chunk_size)
+ self.update_progress(real_chunk_size)
+ except:
+ LOG.exception('Error ocurred for chunk {}'.format(chunk_number))
+ self.queue.put((chunk_number, chunk_offset, real_chunk_size))
+ finally:
+ # Always call task_done even on fail because in order to finish the number of put calls should be
+ # equal to task_done calls
+ self.queue.task_done()
+
+
+
+ def upload_chunk(self, chunk_number, chunk_offset, real_chunk_size):
+ upload_url = self.c._get_full_url('/{}/{}/upload/'.format('drives', self.drive_uuid))
+ with open(self.image_path, 'r') as f:
+ f.seek(chunk_offset)
+ file_data = f.read(real_chunk_size)
+ # do str() on numbers because requests multipart encoding assumes integers are file descriptors
+ resumable_js_data = {'resumableChunkNumber': str(chunk_number),
+ 'resumableChunkSize': str(self.chunk_size),
+ 'resumableTotalSize': str(self.file_size),
+ 'resumableIdentifier': os.path.split(self.image_path)[1],
+ 'resumableFilename': os.path.split(self.image_path)[1],
+ }
+
+ kwargs = {
+ 'auth': (self.c.username, self.c.password),
+ 'headers': {
+ 'user-agent': 'CloudSigma turlo client',
+ }
+ }
+
+
+ res = requests.get(upload_url, params=resumable_js_data, **kwargs)
+
+ if 199 < res.status_code < 300:
+ LOG.debug('Chunk {}:{}:{} already uploaded'.format(chunk_number, chunk_offset, real_chunk_size))
+ return
+
+ resumable_js_data_multipart = resumable_js_data.items() +[('file', str(file_data))]
+
+ res = requests.post(upload_url, files=resumable_js_data_multipart, **kwargs)
+ if 199 < res.status_code < 300:
+ LOG.debug('Chunk {}:{}:{} finished uploading'.format(chunk_number, chunk_offset, real_chunk_size))
+ return
+ else:
+ raise Exception('Wrong status {} returned for request '
+ '{}:{}:{}. Response body is:'
+ '\n{}'.format(res.status_code, chunk_number, chunk_offset, real_chunk_size, res.text))
+
+
+ def update_progress(self, uploaded_size):
+ self.progress_lock.acquire()
+ self.uploaded_size += uploaded_size
+ self.progress_lock.release()
+
+ def report_progress(self):
+ if self.progress_callback:
+ self.progress_callback(self.uploaded_size, self.file_size)
diff --git a/pycloudsigma-master/src/cloudsigma/scenarios.py b/pycloudsigma-master/src/cloudsigma/scenarios.py
new file mode 100644
index 0000000..c3de156
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/scenarios.py
@@ -0,0 +1,50 @@
+from cloudsigma.bulk import DrivesBulk
+
+
+def drives_create(id_prefix, count):
+ count = int(count)
+
+ stress_drives = DrivesBulk(id_prefix=id_prefix)
+ stress_drives.create(count)
+
+
+def drives_clone(id_prefix, count, source_drive):
+ count = int(count)
+
+ stress_drives = DrivesBulk(id_prefix=id_prefix)
+ stress_drives.clone(count, source_drive)
+
+
+def drives_clone_all(id_prefix, count):
+ stress_drives = DrivesBulk(id_prefix=id_prefix)
+ res = stress_drives.clone_all(count)
+ print res
+
+
+def drives_list(id_prefix):
+ stress_drives = DrivesBulk(id_prefix=id_prefix)
+ ds = stress_drives.get_list()
+ print ds
+
+
+def _drives_get_by_uuids(id_prefix, uuids):
+ stress_drives = DrivesBulk(id_prefix=id_prefix)
+ ds = stress_drives.get_by_uuids(uuids)
+ print ds
+
+
+def drives_detail(id_prefix):
+ stress_drives = DrivesBulk(id_prefix=id_prefix)
+ ds = stress_drives.get_detail()
+ print ds
+
+
+def drives_wipe(id_prefix):
+ stress_drives = DrivesBulk(id_prefix=id_prefix)
+ stress_drives.wipe()
+
+
+def drives_number(id_prefix):
+ stress_drives = DrivesBulk(id_prefix=id_prefix)
+ ds = stress_drives.get_list()
+ print "Number of DRIVES: %r" % len(ds)
diff --git a/pycloudsigma-master/src/cloudsigma/version.py b/pycloudsigma-master/src/cloudsigma/version.py
new file mode 100644
index 0000000..6a35e85
--- /dev/null
+++ b/pycloudsigma-master/src/cloudsigma/version.py
@@ -0,0 +1 @@
+__version__ = "0.3"
diff --git a/pycloudsigma-master/src/testing/__init__.py b/pycloudsigma-master/src/testing/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pycloudsigma-master/src/testing/acceptance/__init__.py b/pycloudsigma-master/src/testing/acceptance/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pycloudsigma-master/src/testing/acceptance/common.py b/pycloudsigma-master/src/testing/acceptance/common.py
new file mode 100644
index 0000000..9bc0737
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/common.py
@@ -0,0 +1,247 @@
+import cloudsigma.resource as cr
+from cloudsigma import errors
+import unittest
+from nose.plugins.attrib import attr
+import time
+import logging
+from cloudsigma.conf import config
+from unittest import SkipTest
+
+LOG = logging.getLogger(__name__)
+
+
+@attr('acceptance_test')
+class StatefulResourceTestBase(unittest.TestCase):
+ TIMEOUT_DRIVE_CREATED = 2*60
+ TIMEOUT_DRIVE_CLONING = 20*60
+ TIMEOUT_DRIVE_DELETED = 3*60
+
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.client = cr.ResourceBase() # create a resource handle object
+ self._clean_servers()
+ self._clean_drives()
+
+ def tearDown(self):
+ self._clean_servers()
+ self._clean_drives()
+
+ def _get_persistent_image_uuid_and_pass(self):
+ # Get a good persistant test image
+ p_name = config.get('persistent_drive_name')
+ p_pass = config.get('persistent_drive_ssh_password')
+
+ if p_name is None:
+ raise SkipTest('A persistent_drive_name must be stated in the client configuration to execute this test')
+
+ def _filter_drives(av_drives):
+ for drive in av_drives:
+ if p_name in drive['name'] and drive['status'] in ('mounted', 'unmounted', 'cloning_src', ):
+ return drive['uuid']
+ return None
+
+ puuid = _filter_drives(cr.Drive().list_detail())
+ if puuid is None:
+ puuid = _filter_drives(cr.LibDrive().list_detail())
+ if puuid is not None:
+ client_drives = cr.Drive()
+ clone_drive_def = {
+ 'name':p_name,
+ }
+ cloned_drive = client_drives.clone(puuid, clone_drive_def)
+ self._wait_for_status(cloned_drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=client_drives)
+ puuid = cloned_drive['uuid']
+
+ if puuid is None:
+ raise SkipTest("There is no drive matching {}".format(p_name))
+
+ return puuid, p_pass
+
+ def _verify_list(self, resource, should_be_found, client=None):
+ TIMEOUT = 20
+ WAIT_STEP = 3
+
+ if client is None:
+ client = self.client
+
+ count_waited = 0
+ while True:
+ resources = client.list_detail()
+ if should_be_found:
+ self.assertGreaterEqual(len(resources), 1, 'Resource listing fails')
+ resource_found = False
+ for x in resources:
+ if x['uuid'] == resource['uuid']:
+ self.assertDictContainsSubset(resource, x)
+ resource_found = True
+ if should_be_found == resource_found:
+ break
+
+ self.assertLessEqual(count_waited, TIMEOUT/WAIT_STEP, 'Resource list didn\'t update as expected for %d seconds' % (TIMEOUT,))
+ time.sleep(WAIT_STEP)
+ count_waited += 1
+
+ def _wait_for_status(self, uuid, status, client=None, timeout=40):
+ WAIT_STEP = 3
+
+ if client is None:
+ client = self.client
+
+ count_waited = 0
+ while True:
+ resource = client.get(uuid)
+ if resource['status'] == status:
+ break
+ self.assertLessEqual(count_waited, timeout/WAIT_STEP, 'Resource didn\'t reach state "%s" for %d seconds' % (status, timeout))
+ time.sleep(WAIT_STEP)
+ count_waited += 1
+
+ def _wait_deleted(self, uuid, client=None, timeout=TIMEOUT_DRIVE_DELETED):
+ WAIT_STEP = 3
+
+ if client is None:
+ client = self.client
+
+ count_waited = 0
+ while True:
+ try:
+ client.get(uuid)
+ except errors.ClientError as exc:
+ if exc[0] == 404:
+ break
+ else:
+ raise
+ self.assertLessEqual(count_waited, timeout/WAIT_STEP, 'Resource did not delete %d seconds' % (timeout))
+ time.sleep(WAIT_STEP)
+ count_waited += 1
+
+ def _wait_for_open_socket(self, host, port, timeout=15, close_on_success=False):
+ import socket
+ import time
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(3)
+
+ now = time.time()
+ connected = False
+ while now + timeout >= time.time():
+ try:
+ #Check if we can connect to socket
+ sock.connect((host, port))
+ except:
+ time.sleep(1)
+ else:
+ connected = True
+ break
+
+ self.assertTrue(connected, "Socket to {}:{} failed to open in {} seconds".format(
+ host,
+ port,
+ timeout,
+ ))
+
+ if close_on_success:
+ sock.close()
+
+ return sock
+
+ def _wait_socket_close(self, host, port, timeout=15):
+ import socket
+ import time
+
+ now = time.time()
+ closed = False
+ while now + timeout >= time.time():
+ try:
+ #Check if we can connect to socket
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(3)
+ sock.connect((host, port))
+ except:
+ closed = True
+ break
+ else:
+ sock.close()
+ time.sleep(1)
+ self.assertTrue(closed, "We can still open connection to {}:{} after {} seconds".format(
+ host,
+ port,
+ timeout,
+ ))
+
+ def _clean_servers(self):
+ """
+ Removes all the servers in the acceptance test account ( containing 'test' keyword )
+
+ :return:
+ """
+ server_client = cr.Server()
+
+ stopping = []
+ deleting = []
+ inter = []
+ for server in server_client.list_detail():
+ if 'test' in server['name']:
+ status = server['status']
+ if status == 'running':
+ server_client.stop(server['uuid'])
+ stopping.append(server['uuid'])
+ elif status == 'stopped':
+ server_client.delete(server['uuid'])
+ deleting.append(server['uuid'])
+ else:
+ inter.append(server['uuid'])
+
+ for uuid in stopping:
+ try:
+ self._wait_for_status(uuid, 'stopped', client=server_client)
+ except:
+ LOG.exception("Server {} did not stop in time".format(uuid))
+ else:
+
+ server_client.delete(uuid)
+ deleting.append(uuid)
+
+ for uuid in deleting:
+ try:
+ self._wait_deleted(uuid, client=server_client)
+ except:
+ LOG.exception("Server {} did not delete in time".format(uuid))
+
+ if len(inter) != 0:
+ LOG.error('The servers {} are stuck in intermediate states. Cannot remove them.'.format(inter))
+
+ def _clean_drives(self):
+ """
+ Removes all the drives in the acceptance test account ( containing 'test' keyword )
+
+ :return:
+ """
+ drive_client = cr.Drive()
+
+ mounted = []
+ deleting = []
+ inter = []
+ for drive in drive_client.list_detail():
+ if 'test' in drive['name']:
+ status = drive['status']
+ if status == 'mounted':
+ mounted.append(drive['uuid'])
+ elif status in ('unmounted', 'uploading'):
+ drive_client.delete(drive['uuid'])
+ deleting.append(drive['uuid'])
+ else:
+ inter.append(drive['uuid'])
+
+ for uuid in deleting:
+ try:
+ self._wait_deleted(uuid, client=drive_client)
+ except:
+ LOG.exception("Drive {} did not delete in time".format(uuid))
+
+ if mounted:
+ LOG.error('The drives {} are still mounted and cannot be deleted'.format(mounted))
+
+ if inter:
+ LOG.error('The drives {} are stuck in intermediate states and cannot be deleted.'.format(inter))
+
diff --git a/pycloudsigma-master/src/testing/acceptance/test_accounts.py b/pycloudsigma-master/src/testing/acceptance/test_accounts.py
new file mode 100644
index 0000000..3021aee
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_accounts.py
@@ -0,0 +1,18 @@
+import unittest
+from nose.plugins.attrib import attr
+
+import cloudsigma.resource as cr
+from cloudsigma.conf import config
+
+
+@attr('acceptance_test')
+class LoginTest(unittest.TestCase):
+ def test_profile(self):
+ """Test login and getting user profile
+
+ """
+ r = cr.Profile()
+ profile = r.get()
+ self.assertNotEqual(profile, {}, 'Invalid profile returned')
+ self.assertEqual(profile['email'], config['username'], 'Profile returned invalid email')
+ self.assertIn(profile['state'], ('REGULAR', 'NEW_USER', 'TRIAL'), 'Profile returned invalid state')
diff --git a/pycloudsigma-master/src/testing/acceptance/test_billing.py b/pycloudsigma-master/src/testing/acceptance/test_billing.py
new file mode 100644
index 0000000..feaea16
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_billing.py
@@ -0,0 +1,54 @@
+import unittest
+from nose.plugins.attrib import attr
+
+import cloudsigma.resource as cr
+
+from testing.utils import DumpResponse
+
+
+@attr('docs_snippets', 'acceptance_test')
+class BillingDocs(unittest.TestCase):
+
+ def test_discount(self):
+ client = cr.Discount()
+ with DumpResponse(clients=[client])('discount_list'):
+ discount = client.get()
+ with DumpResponse(clients=[client])('discount_schema'):
+ client.get_schema()
+
+ def test_pricing(self):
+ client = cr.Pricing()
+ with DumpResponse(clients=[client])('pricing_list'):
+ pricing = client.list(query_params={'limit': 5})
+
+ with DumpResponse(clients=[client])('pricing_schema'):
+ client.get_schema()
+
+
+ def test_balance(self):
+ client = cr.Balance()
+ with DumpResponse(clients=[client])('balance_list'):
+ balance = client.get()
+ with DumpResponse(clients=[client])('balance_schema'):
+ client.get_schema()
+
+ def test_currentusage(self):
+ client = cr.CurrentUsage()
+ with DumpResponse(clients=[client])('currentusage_list'):
+ currentusage = client.get()
+ with DumpResponse(clients=[client])('currentusage_schema'):
+ client.get_schema()
+
+ def test_ledger(self):
+ client = cr.Ledger()
+ with DumpResponse(clients=[client])('ledger_list'):
+ ledger = client.get()
+ with DumpResponse(clients=[client])('ledger_schema'):
+ client.get_schema()
+
+ def test_licenses(self):
+ client = cr.Licenses()
+ with DumpResponse(clients=[client])('licenses_list'):
+ licenses = client.get()
+ with DumpResponse(clients=[client])('licenses_schema'):
+ client.get_schema()
diff --git a/pycloudsigma-master/src/testing/acceptance/test_capabilities.py b/pycloudsigma-master/src/testing/acceptance/test_capabilities.py
new file mode 100644
index 0000000..1d6b940
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_capabilities.py
@@ -0,0 +1,20 @@
+from nose.plugins.attrib import attr
+from testing.acceptance.common import StatefulResourceTestBase
+from testing.utils import DumpResponse
+import cloudsigma.resource as cr
+
+
+@attr('acceptance_test')
+class CapabilitiesTest(StatefulResourceTestBase):
+ def setUp(self):
+ super(CapabilitiesTest, self).setUp()
+ self.client = cr.Capabilites()
+ self.dump_response = DumpResponse(clients=[self.client])
+
+ @attr('docs_snippets')
+ def test_capabilities(self):
+ with self.dump_response('capabilities_schema'):
+ self.client.get_schema()
+
+ with self.dump_response('capabilities'):
+ self.client.list()
diff --git a/pycloudsigma-master/src/testing/acceptance/test_core.py b/pycloudsigma-master/src/testing/acceptance/test_core.py
new file mode 100644
index 0000000..43b4245
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_core.py
@@ -0,0 +1,446 @@
+import json
+import os
+from nose.plugins.attrib import attr
+from testing.utils import DumpResponse
+
+from . import common
+
+from cloudsigma import resource as cr
+from cloudsigma import generic as gc
+from unittest import SkipTest
+import logging
+
+LOG = logging.getLogger(__name__)
+
+@attr('acceptance_test')
+class TestCoreFuncs(common.StatefulResourceTestBase):
+
+ def test_servers_operations(self):
+
+ dc = cr.Drive()
+ sc = cr.Server()
+ vc = cr.VLAN()
+
+ puuid, p_pass = self._get_persistent_image_uuid_and_pass()
+
+ LOG.debug('Get a vlan from the account')
+ all_vlans = vc.list()
+ if not all_vlans:
+ raise SkipTest('There is no vlan in the acceptance test account')
+ vlan = all_vlans[0]
+
+ LOG.debug('Clone the persistent image')
+ d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'})
+ self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc)
+
+ g_def = {
+ "name": "test_server",
+ "cpu": 1000,
+ "mem": 1024 ** 3,
+ 'vnc_password': 'testserver',
+ 'drives': [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": d1['uuid'],
+ "boot_order": 1
+ },
+
+ ],
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "ip": None,
+ "conf": "dhcp"
+ },
+ "model": "virtio",
+ },
+ {
+ "model": "virtio",
+ "vlan": vlan['uuid'],
+
+ }
+ ],
+ }
+
+ LOG.debug('Creating guest with drive')
+ g1 = sc.create(g_def)
+ self._wait_for_status(d1['uuid'], 'mounted', client=dc)
+
+ LOG.debug('Clone the guest')
+ g2 = sc.clone(g1['uuid'])
+ self._wait_for_status(g2['uuid'], 'stopped', client=sc)
+
+ LOG.debug('Check if the drive is active ( mounted )')
+ d2_uuid = g2['drives'][0]['drive']['uuid']
+ self._wait_for_status(d2_uuid, 'mounted', client=dc)
+
+ LOG.debug('Start both guests')
+ sc.start(g1['uuid'])
+ sc.start(g2['uuid'])
+
+ self._wait_for_status(g1['uuid'], 'running', client=sc)
+ self._wait_for_status(g2['uuid'], 'running', client=sc)
+
+ LOG.debug('Refetch guest configurations')
+ g1 = sc.get(g1['uuid'])
+ g2 = sc.get(g2['uuid'])
+
+ LOG.debug('Get the assigned ips')
+ ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"]
+ ip2 = g2['nics'][0]['runtime']['ip_v4']["uuid"]
+
+ self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True)
+ self._wait_for_open_socket(ip2, 22, timeout=40, close_on_success=True)
+
+ from fabric.api import settings as fabric_settings
+ from fabric import tasks, api
+
+ fab_kwargs = {
+ "warn_only": True,
+ "abort_on_prompts": True,
+ "use_ssh_config": p_pass is None
+ }
+ LOG.debug('Using fabric config {}'.format(fab_kwargs))
+ if p_pass is not None:
+ fab_kwargs['password'] = p_pass
+ LOG.debug('Using a password to SSH to the servers ( not using ssh config )')
+
+ with fabric_settings(**fab_kwargs):
+ LOG.debug('Changing hostnames and restarting avahi on guest 1')
+ set_hostname = 'hostname {} && service avahi-daemon restart'
+ tasks.execute(
+ api.run,
+ set_hostname.format("atom1"),
+ hosts=["root@%s" % ip1]
+ )
+
+ LOG.debug('Changing hostnames and restarting avahi on guest 2')
+ tasks.execute(
+ api.run,
+ set_hostname.format("atom2"),
+ hosts=["root@%s" % ip2]
+ )
+
+ LOG.debug('Ping the two hosts via private network')
+ ping_res = tasks.execute(
+ api.run,
+ "ping atom2.local -c 1",
+ hosts=["root@%s" % ip1]
+ )
+ self.assertEqual(ping_res.values()[0].return_code, 0, 'Could not ping host atom2 from atom1')
+
+ LOG.debug('Halt both servers')
+ tasks.execute(
+ api.run,
+ "halt",
+ hosts=["root@%s" % ip1, "root@%s" % ip2]
+ )
+
+ LOG.debug('Wait for complete shutdown')
+ self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40)
+ self._wait_for_status(g2['uuid'], 'stopped', client=sc)
+
+ LOG.debug('Deleting both guests')
+ sc.delete(g1['uuid'])
+ sc.delete(g2['uuid'])
+
+ LOG.debug('Deleting both drives')
+ dc.delete(d1['uuid'])
+ dc.delete(d2_uuid)
+
+ self._wait_deleted(d1['uuid'], client=dc)
+ self._wait_deleted(d2_uuid, client=dc)
+
+ def get_single_ctx_val(self, command, expected_val, fab_kwargs, ip1, fabric_settings, tasks, api):
+ with fabric_settings(**fab_kwargs):
+ # TODO: Remove this retry when proper guest context client is implemented
+ res_string = None
+ for retry in xrange(5):
+ if retry > 0:
+ LOG.warning('Retrying guest context single value execution {}'.format(retry))
+ ctx_val_res = tasks.execute(
+ api.run,
+ command,
+ hosts=["root@%s" % ip1]
+ )
+
+ res_string = ctx_val_res.values()[0]
+ if res_string == expected_val:
+ break
+ return res_string
+
+ def get_full_ctx(self, command, fab_kwargs, ip1, fabric_settings, tasks, api):
+ with fabric_settings(**fab_kwargs):
+ res_string = ''
+ # TODO: Remove this retry when proper guest context client is implemented
+ ctx_res_json = {}
+ for retry in xrange(5):
+ if retry > 0:
+ LOG.warning('Retrying guest context whole definition execution {}'.format(retry))
+ try:
+ ctx_res = tasks.execute(
+ api.run,
+ command,
+ hosts=["root@%s" % ip1]
+ )
+ res_string = ctx_res.values()[0]
+ ctx_res_json = json.loads(res_string)
+ except:
+ continue
+ else:
+ break
+
+ return ctx_res_json, res_string
+
+ def dump_ctx_command(self, command, res_string, op_name, dump_path):
+ with open(os.path.join(dump_path, 'request_' + op_name), 'w') as dump_file:
+ dump_file.write(command)
+ with open(os.path.join(dump_path, 'response_' + op_name), 'w') as dump_file:
+ dump_file.write(res_string)
+
+ def check_key_retrieval(self, g_def, op_name, ctx_path, dump_path, fab_kwargs, ip1, fabric_settings, tasks, api):
+ command = self.command_template.format(ctx_path)
+ expected_val = g_def
+ for path_el in ctx_path.split('/'):
+ if path_el: # non-empty string
+ expected_val = expected_val.get(path_el)
+ res_string = self.get_single_ctx_val(command, expected_val, fab_kwargs, ip1, fabric_settings, tasks, api)
+ self.assertEqual(res_string, expected_val)
+ self.dump_ctx_command(command, res_string, op_name, dump_path)
+
+ def check_all_retrieval(self, g_def, op_name, dump_path, fab_kwargs, ip1, fabric_settings, tasks, api):
+ command = self.command_template.format('')
+ ctx_res_json, res_string = self.get_full_ctx(command, fab_kwargs, ip1, fabric_settings, tasks, api)
+ for k, v in g_def.items():
+ if not isinstance(v, (list, dict)):
+ self.assertEqual(v, ctx_res_json[k])
+ self.dump_ctx_command(command, res_string, op_name, dump_path)
+
+ @attr('docs_snippets')
+ def test_guest_context(self):
+ dc = cr.Drive()
+ sc = cr.Server()
+ gcc = cr.GlobalContext()
+ dump_response = DumpResponse(clients=[sc, dc, gcc])
+ # ensure empty global context
+ gcc.update({})
+
+ puuid, p_pass = self._get_persistent_image_uuid_and_pass()
+ LOG.debug('Clone the persistent image')
+ d1 = dc.clone(puuid, {'name': 'test_clone_1'})
+ from uuid import uuid4
+ g_def = {
+ "name": "test_server",
+ "cpu": 1000,
+ "mem": 1024 ** 3,
+ 'vnc_password': str(uuid4())[:18].replace('-', ''),
+ 'drives': [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": d1['uuid'],
+ "boot_order": 1
+ },
+
+ ],
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "ip": None,
+ "conf": "dhcp"
+ },
+ "model": "virtio",
+ },
+ ],
+ "meta": {
+ "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy4XpmD3kEfRZ+LCwFh3Xmqrkm7rSiDu8v+ZCTOA3vlNjmy/ZOc3vy9Zr+IhWPP4yipiApkGRsBM63tTgnxqUUn/WU7qkbBNktZBcs5p7Mj/zO4ZHkk4VoTczFzHlPGwuak2P4wTftEj7sU8IRutaAbMoKj4AMuFF50j4sIgF7P5b5FtTIM2b5HSW8BlDz10b67+xsj6s3Jv05xxbBs+RWj+v7D5yjMVeeErXoSui8dlHpUu6QOVKn8LLmdpxvehc6ns8yW7cbQvWmLjOICMnm6BXdVtOKWBncDq9FGLmKF3fUeZZPbv79Z7dyZs+xGZGMHbpaNHpuY9QhNS/hQ5D5 dave@hal"
+ }
+ }
+
+ LOG.debug('Creating guest with drive')
+
+ with dump_response('guest_for_context'):
+ g1 = sc.create(g_def)
+
+ self._wait_for_status(d1['uuid'], 'mounted', client=dc)
+
+ sc.start(g1['uuid'])
+
+ self._wait_for_status(g1['uuid'], 'running', client=sc)
+
+ LOG.debug('Refetch guest configurations')
+
+ g1 = sc.get(g1['uuid'])
+
+
+ LOG.debug('Get the assigned ips')
+ ip1 = g1['nics'][0]['runtime']['ip_v4']["uuid"]
+
+ self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True)
+
+ from fabric.api import settings as fabric_settings
+ from fabric import tasks, api
+
+ fab_kwargs = {
+ "warn_only": True,
+ "abort_on_prompts": True,
+ "use_ssh_config": p_pass is None
+ }
+ LOG.debug('Using fabric config {}'.format(fab_kwargs))
+ if p_pass is not None:
+ fab_kwargs['password'] = p_pass
+ LOG.debug('Using a password to SSH to the servers ( not using ssh config )')
+
+ dump_path = dump_response.response_dump.dump_path
+
+ #command_template = r"read -t 1 -d $'\004' DISCARD < /dev/ttyS1; " \
+ # r'echo -en "<\n{}\n>" > /dev/ttyS1 && read -t 3 READVALUE < /dev/ttyS1 && echo $READVALUE'
+ self.command_template = r'v=$(read -t 13 READVALUE < /dev/ttyS1 && echo $READVALUE & sleep 1; echo -en "<\n{}\n>" > /dev/ttyS1; wait %1); echo $v'
+
+ LOG.debug('Test the guest context')
+
+ LOG.debug('Check single value retrieval')
+ self.check_key_retrieval(g_def, 'context_single_value', 'name', dump_path, fab_kwargs, ip1, fabric_settings,
+ tasks, api)
+
+
+ ##########################################
+ LOG.debug('Check key retrieval')
+ self.check_key_retrieval(g_def, 'context_single_value_ssh_key', '/meta/ssh_public_key', dump_path, fab_kwargs, ip1,
+ fabric_settings, tasks, api)
+
+ ##########################################
+ LOG.debug('Check complete context retrieval')
+ self.check_all_retrieval(g_def, 'context_all', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api)
+
+ ##########################################
+ ##########################################
+ ##########################################
+ ##########################################
+ LOG.debug('Check context dynamic update')
+ g_def['name'] += '_renamed'
+ g_def['meta']['another_key'] = 'a value or something'
+
+ upd_res = sc.update(g1['uuid'], g_def)
+ self.assertEqual(g_def['name'], upd_res['name'])
+
+ LOG.debug('Check single value retrieval')
+
+ self.check_key_retrieval(g_def, 'context_single_value_dynamic', 'name', dump_path, fab_kwargs, ip1,
+ fabric_settings, tasks, api)
+
+ ##########################################
+ LOG.debug('Check key retrieval')
+ self.check_key_retrieval(g_def, 'context_single_value_another_key_dynamic', '/meta/another_key', dump_path,
+ fab_kwargs, ip1, fabric_settings, tasks, api)
+
+ ##########################################
+ LOG.debug('Check complete context retrieval')
+ self.check_all_retrieval(g_def, 'context_all_dynamic', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api)
+
+ ###########################################
+ ###########################################
+ ###########################################
+ with dump_response('update_global_context'):
+ gcc.update({'new_global_key': 'new_global_val'})
+
+ LOG.debug('Check global context retrieval')
+ command = self.command_template.format('/global_context/new_global_key')
+ expected_val = 'new_global_val'
+ res_string = self.get_single_ctx_val(command, expected_val, fab_kwargs, ip1, fabric_settings, tasks, api)
+ self.assertEqual(res_string, expected_val)
+ self.dump_ctx_command(command, res_string, 'global_context_single_value', dump_path)
+
+ self.check_all_retrieval(g_def, 'global_context_all', dump_path, fab_kwargs, ip1, fabric_settings, tasks, api)
+
+
+
+
+ LOG.debug('Stopping guest')
+ sc.stop(g1['uuid'])
+ self._wait_for_status(g1['uuid'], 'stopped', client=sc, timeout=40)
+
+ LOG.debug('Delete guest')
+ sc.delete(g1['uuid'])
+
+ LOG.debug('Delete drive')
+ dc.delete(d1['uuid'])
+ self._wait_deleted(d1['uuid'], client=dc)
+
+
+
+ def test_firewall(self):
+ dc = cr.Drive()
+ sc = cr.Server()
+ fwp = cr.FirewallPolicy()
+
+ puuid, p_pass = self._get_persistent_image_uuid_and_pass()
+
+ LOG.debug('Clone the persistent image')
+ d1 = dc.clone(puuid, {'name': 'test_atom_clone_1'})
+ self._wait_for_status(d1['uuid'], status='unmounted', timeout=self.TIMEOUT_DRIVE_CLONING, client=dc)
+
+ fw_policy = fwp.create({})
+
+ g_def = {
+ "name": "testFirewallServer",
+ "cpu": 1000,
+ "mem": 1024 ** 3,
+ 'vnc_password': 'testserver',
+ 'drives': [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": d1['uuid'],
+ "boot_order": 1
+ },
+
+ ],
+ "nics": [
+ {
+ "firewall_policy": fw_policy['uuid'],
+ "ip_v4_conf": {
+ "ip": None,
+ "conf": "dhcp"
+ },
+ "model": "virtio",
+ },
+ ],
+ }
+
+ guest = sc.create(g_def)
+ self._wait_for_status(d1['uuid'], 'mounted', client=dc)
+
+ sc.start(guest['uuid'])
+
+ self._wait_for_status(guest['uuid'], 'running', client=sc)
+ guest = sc.get(guest['uuid'])
+ ip1 = guest['nics'][0]['runtime']['ip_v4']["uuid"]
+
+ self._wait_for_open_socket(ip1, 22, timeout=60, close_on_success=True)
+
+ fw_policy['rules'] = [
+ {
+ "ip_proto": "tcp",
+ "dst_port": 22,
+ "direction": "in",
+ "action": "drop",
+ "comment": "Block SSH traffic"
+ }
+ ]
+
+ fwp.update(fw_policy['uuid'], fw_policy)
+
+ self._wait_socket_close(ip1, 22)
+
+ fw_policy['rules'] = []
+
+ fwp.update(fw_policy['uuid'], fw_policy)
+ self._wait_for_open_socket(ip1, 22)
+
+ sc.stop(guest['uuid'])
+ self._wait_for_status(guest['uuid'], 'stopped', client=sc)
+ sc.delete(guest['uuid'])
+
+ fwp.delete(fw_policy['uuid'])
diff --git a/pycloudsigma-master/src/testing/acceptance/test_drives.py b/pycloudsigma-master/src/testing/acceptance/test_drives.py
new file mode 100644
index 0000000..fa433b7
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_drives.py
@@ -0,0 +1,494 @@
+from multiprocessing import Process, Queue
+import os
+import struct
+import tempfile
+import unittest
+import random
+from nose.plugins.attrib import attr
+
+import cloudsigma.resource as cr
+import cloudsigma.errors as errors
+
+from testing.utils import DumpResponse
+from testing.acceptance.common import StatefulResourceTestBase
+
+from logging import getLogger
+LOG = getLogger(__name__)
+
+@attr('acceptance_test')
+class DriveBasicTest(StatefulResourceTestBase):
+
+ def setUp(self):
+ super(DriveBasicTest, self).setUp()
+ self.client = cr.Drive()
+ self.dump_response = DumpResponse(clients=[self.client])
+
+ @attr('docs_snippets')
+ def test_drive_cycle(self):
+ drive_def = {
+ 'name': 'test_drive_1',
+ 'size': 1024000000,
+ 'media': 'disk',
+ }
+
+ with self.dump_response('drive_create_minimal'):
+ drive = self.client.create(drive_def)
+ drive_uuid = drive['uuid']
+
+ self.assertEqual(drive['status'], 'creating')
+
+
+ self._wait_for_status(drive_uuid, 'unmounted')
+
+ with self.dump_response('drive_get_unmounted'):
+ drive = self.client.get(drive_uuid)
+
+ with self.dump_response('drive_update_meta'):
+ drive['meta'] = {'meta_key1': 'value', 'meta_key2': 'value\nwith\nnew lines'}
+ updated_drive = self.client.update(drive_uuid, drive)
+
+ self.assertEqual(drive['meta'], updated_drive['meta'])
+
+ with self.dump_response('drive_delete'):
+ self.client.delete(drive_uuid)
+
+ self._wait_deleted(drive_uuid)
+
+ @attr('docs_snippets')
+ def test_drive_resize(self):
+ DRIVE_CREATE_SIZE = 2*1024**3
+ drive_def = {
+ 'name': 'test_drive_1',
+ 'size': DRIVE_CREATE_SIZE,
+ 'media': 'disk',
+ }
+ drive = self.client.create(drive_def)
+ self.assertEqual(drive['status'], 'creating')
+ self._wait_for_status(drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CREATED)
+
+ DRIVE_NEW_SIZE = DRIVE_CREATE_SIZE + 3*1024**3
+ with self.dump_response('drive_resize'):
+ drive_def['size'] = DRIVE_NEW_SIZE
+ resizing_drive = self.client.update(drive['uuid'], drive_def)
+ self.assertEqual(resizing_drive['status'], 'resizing')
+ self._wait_for_status(resizing_drive['uuid'], 'unmounted')
+
+ resized_drive = self.client.get(drive['uuid'])
+ self.assertEqual(int(resized_drive['size']), DRIVE_NEW_SIZE, 'Size mismatch after drive resize')
+
+ DRIVE_NEW_ODD_SIZE = DRIVE_NEW_SIZE + 1*1024**3 + 7*1024**2 + 3*1024
+ drive_def['size'] = DRIVE_NEW_ODD_SIZE
+ resizing_drive = self.client.update(drive['uuid'], drive_def)
+ self.assertEqual(resizing_drive['status'], 'resizing')
+ self._wait_for_status(resizing_drive['uuid'], 'unmounted')
+
+ ALLOWED_SIZE_ROUNDING = 64*1024
+ resized_drive = self.client.get(drive['uuid'])
+ self.assertNotEqual(int(resized_drive['size']),
+ DRIVE_NEW_SIZE,
+ 'Size of {!r} did not change'.format(drive['uuid'])
+ )
+
+ self.assertLess(abs(DRIVE_NEW_ODD_SIZE-int(resized_drive['size'])), ALLOWED_SIZE_ROUNDING,
+ 'New size differs with more than %d bytes, requested size %d bytes, reported size after resize %d bytes' % (
+ ALLOWED_SIZE_ROUNDING,
+ DRIVE_NEW_ODD_SIZE,
+ resized_drive['size'],
+ )
+ )
+
+ self.client.delete(drive['uuid'])
+ self._wait_deleted(drive['uuid'])
+
+ @attr('docs_snippets')
+ def test_drive_resize_action(self):
+ DRIVE_CREATE_SIZE = 2 * 1024 ** 3
+ drive_def = {
+ 'name': 'test_drive_1',
+ 'size': DRIVE_CREATE_SIZE,
+ 'media': 'disk',
+ }
+ drive = self.client.create(drive_def)
+ self._wait_for_status(drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CREATED)
+
+ drive['size'] = 2 * drive['size']
+ with self.dump_response('drive_resize_action'):
+ self.client.resize(drive['uuid'], drive)
+
+ self._wait_for_status(drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CREATED)
+
+ resized_drive = self.client.get(drive['uuid'])
+ self.assertEqual(resized_drive['size'], drive['size'])
+
+ self.client.delete(drive['uuid'])
+ self._wait_deleted(drive['uuid'])
+
+ @attr('docs_snippets')
+ def test_drive_listing(self):
+ req = [
+ {
+ 'name': 'test_drive_%i' % i,
+ 'size': '1024000000',
+ 'media': 'disk',
+ } for i in range(5)
+ ]
+
+ with self.dump_response('drive_create_bulk'):
+ drives = self.client.create(req)
+
+ for drive in drives:
+ self._wait_for_status(drive['uuid'], 'unmounted')
+
+ #Get the short list of fields
+ with self.dump_response('drive_list'):
+ self.client.list()
+
+ #Get just a list of uuids
+ with self.dump_response('drive_list_just_uuid_and_status'):
+ just_uuids = self.client.list(query_params={'fields':'uuid,status'})
+
+ for el in just_uuids:
+ self.assertEqual(set(el.keys()), set(['uuid', 'status']))
+
+ #Get detailed information on drives
+ with self.dump_response('drive_list_detail'):
+ self.client.list_detail()
+
+ for drive in drives:
+ self.client.delete(drive['uuid'])
+
+ for drive in drives:
+ self._wait_deleted(drive['uuid'])
+
+ @attr('docs_snippets')
+ def test_drive_edit(self):
+ drive_def = {
+ 'name': 'test_drive_x',
+ 'size': 1024000000,
+ 'media': 'disk',
+ }
+
+ drive = self.client.create(drive_def)
+ self._wait_for_status(drive['uuid'], 'unmounted')
+
+ drive_def['name'] = 'test_drive_y'
+ drive_def['media'] = 'cdrom'
+
+ with self.dump_response('drive_edit'):
+ updated_drive = self.client.update(drive['uuid'], drive_def)
+
+ self.assertDictContainsSubset(drive_def, updated_drive)
+
+ self.client.delete(updated_drive['uuid'])
+ self._wait_deleted(updated_drive['uuid'])
+
+ @attr('docs_snippets')
+ def test_drive_clone(self):
+ drive_def = {
+ 'name': 'test_drive_x',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+
+ drive = self.client.create(drive_def)
+ self._wait_for_status(drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING)
+
+ clone_drive_def = {
+ 'name': 'test_drive_y',
+ 'media': 'cdrom',
+ 'affinities': [],
+ }
+
+ with self.dump_response('drive_clone'):
+ cloned_drive = self.client.clone(drive['uuid'], clone_drive_def)
+
+ self._wait_for_status(cloned_drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING)
+
+ self.client.delete(drive['uuid'])
+ self.client.delete(cloned_drive['uuid'])
+
+ self._wait_deleted(cloned_drive['uuid'], timeout=60)
+ self._wait_deleted(drive['uuid'], timeout=60)
+
+ def test_drive_clone_by_name(self):
+ drive_def = {
+ 'name': 'test_drive_x_%s' % random.randint(0, 10000),
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+
+ drive = self.client.create(drive_def)
+ self._wait_for_status(drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING)
+
+ clone_drive_def = {
+ 'name': 'test_drive_y',
+ 'media': 'cdrom',
+ 'affinities': [],
+ }
+ cloned_drive = self.client.clone_by_name(drive['name'], clone_drive_def)
+
+ self._wait_for_status(cloned_drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING)
+
+ self.client.delete(drive['uuid'])
+ self.client.delete(cloned_drive['uuid'])
+
+ self._wait_deleted(cloned_drive['uuid'], timeout=60)
+ self._wait_deleted(drive['uuid'], timeout=60)
+
+ def test_drive_avoid(self):
+ drive_def = {
+ 'name': 'test_drive_x',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+
+ drive = self.client.create(drive_def)
+ self._wait_for_status(drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING)
+
+ clone_drive_def = {
+ 'name': 'test_drive_y',
+ 'media': 'cdrom',
+ 'affinities': [],
+ }
+
+
+ cloned_drive = self.client.clone(drive['uuid'], clone_drive_def, avoid=drive['uuid'])
+
+ another_dirve = self.client.create(drive_def, avoid=drive['uuid'])
+
+ self._wait_for_status(cloned_drive['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING)
+ self._wait_for_status(another_dirve['uuid'], 'unmounted', timeout=self.TIMEOUT_DRIVE_CLONING)
+
+ self.client.delete(drive['uuid'])
+ self.client.delete(cloned_drive['uuid'])
+ self.client.delete(another_dirve['uuid'])
+
+ self._wait_deleted(cloned_drive['uuid'], timeout=60)
+ self._wait_deleted(drive['uuid'], timeout=60)
+ self._wait_deleted(another_dirve['uuid'], timeout=60)
+
+ @attr('docs_snippets')
+ def test_get_schema(self):
+ with self.dump_response('drive_schema'):
+ self.client.get_schema()
+
+@attr('acceptance_test')
+class LibraryDriveTest(StatefulResourceTestBase):
+
+ def _gen_server_definition(self, drives=[], changed_def={}):
+ drive_tmp = {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": None,
+ "boot_order": 1
+ }
+
+ server_def = {
+ 'name': 'testServerAcc',
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ 'drives': [],
+ }
+
+ server_def.update(changed_def)
+ for drive in drives:
+ if isinstance(drive, dict):
+ drive = server_def['drives'].append(drive)
+ elif isinstance(drive, basestring):
+ guest_drive = drive_tmp.copy()
+ guest_drive['drive'] = drive
+ drive = guest_drive
+ else:
+ drive = None
+
+ if drive is not None:
+ server_def['drives'].append(drive)
+
+ return server_def
+
+ def setUp(self):
+ super(LibraryDriveTest, self).setUp()
+ self.client = cr.LibDrive()
+ self.dump_response = DumpResponse(clients=[self.client])
+
+ @attr('docs_snippets')
+ def test_get_schema(self):
+ with self.dump_response('libdrive_schema'):
+ self.client.get_schema()
+
+ @attr('docs_snippets')
+ def test_libdrive_listing(self):
+ with self.dump_response('libdrive_list'):
+ libdrives = self.client.list(query_params={'limit': 5})
+
+ # Select the lib drive with most interesting attributes
+ libdrive_uuid = libdrives[0]['uuid'] # by default use the first possible
+ for d in libdrives:
+ if len(d['licenses']) > 0: # pick a drive with licenses
+ libdrive_uuid = d['uuid']
+ break
+
+ with self.dump_response('libdrive_get'):
+ libdrive = self.client.get(libdrive_uuid)
+
+ dc = cr.Drive()
+ with DumpResponse(clients=[dc])('librdrive_get_through_drives'):
+ libdrive_from_drive_url = dc.get(libdrive_uuid)
+
+ self.assertIsNone(libdrive_from_drive_url['owner'])
+ self.assertEqual(libdrive['uuid'], libdrive_from_drive_url['uuid'])
+ self.assertEqual(libdrive['name'], libdrive_from_drive_url['name'])
+
+
+ def test_attaching_cdrom(self):
+
+ server_client = cr.Server()
+
+ found = None
+ for drive in self.client.list():
+ if drive['media'] == 'cdrom':
+ found = drive
+ break
+
+ if found is None:
+ raise unittest.SkipTest('Cannot find a cdrom drive in drives library')
+
+ guest_def = self._gen_server_definition(drives=[found['uuid']])
+ new_guest = server_client.create(guest_def)
+
+ server_client.delete(new_guest['uuid'])
+ self._wait_deleted(new_guest['uuid'], client=server_client)
+
+ def test_attaching_preinstalled(self):
+ server_client = cr.Server()
+
+ found = None
+ for drive in self.client.list():
+ if drive['media'] == 'disk':
+ found = drive
+ break
+
+ if found is None:
+ raise unittest.SkipTest('Cannot find a preinstalled drive in the drives library')
+
+ guest_def = self._gen_server_definition(drives=[found['uuid']])
+
+ with self.assertRaises(errors.PermissionError):
+ server_client.create(guest_def)
+
+
+@attr('stress_test')
+class DriveStressTest(StatefulResourceTestBase):
+ CLONE_COUNT = 20
+ DRIVE_COUNT = 100
+
+ def setUp(self):
+ super(DriveStressTest, self).setUp()
+ self.client = cr.Drive()
+
+ def _get_min_drive_size(self):
+ return 1*1000**3
+
+ def test_create_delete(self):
+ """Creating MANY small drives via API, see if it works"""
+
+ min_size = self._get_min_drive_size()
+ defin_list = [
+ {
+ "name": "test_drive_{}".format(num),
+ "size": min_size,
+ "media": "disk",
+ } for num in range(self.DRIVE_COUNT)
+ ]
+ res = []
+
+ for drive_def in defin_list:
+ res.append(self.client.create(drive_def))
+
+ for creating_drive in res:
+ self._wait_for_status(creating_drive['uuid'], status='unmounted', client=self.client, timeout=60)
+
+ for drive in res:
+ self.client.delete(drive['uuid'])
+
+ for deleted_drive in res:
+ self._wait_deleted(deleted_drive['uuid'], self.client, timeout=60)
+
+ def test_clone(self):
+ """Clone SOME drives via API, see if it works"""
+ puuid, ppass = self._get_persistent_image_uuid_and_pass()
+
+ cloned = []
+ for num in range(self.CLONE_COUNT):
+ cloned.append(self.client.clone(puuid, {'name': "test_atom_clone_{}".format(num)}))
+
+ for cloning_drive in cloned:
+ self._wait_for_status(cloning_drive['uuid'], status='unmounted', client=self.client, timeout=self.TIMEOUT_DRIVE_CLONING)
+
+ for drive in cloned:
+ self.client.delete(drive['uuid'])
+
+ for deleted_drive in cloned:
+ self._wait_deleted(deleted_drive['uuid'], self.client, timeout=60)
+
+
+class TestUpload(StatefulResourceTestBase):
+ def setUp(self):
+ super(TestUpload, self).setUp()
+
+ self.file_size = 10 * 1024 ** 2 + random.randrange(0, 1024) # 10.something MiB
+ self.file_path = self.generate_file()
+ # self.downloaded_path = tempfile.mktemp(prefix='test_download_')
+ self.dc = cr.Drive()
+
+ def tearDown(self):
+ super(TestUpload, self).tearDown()
+ os.remove(self.file_path)
+ # os.remove(self.downloaded_path)
+
+ def generate_file(self):
+ fd, path = tempfile.mkstemp(prefix='drive_upload_test')
+
+ os.fdopen(fd).close()
+ with open(path, 'r+b') as f:
+ written = 0
+ # write 64 bit random values
+ data = struct.pack('=Q', random.randrange(0, 2 ** 64)) * 128 * 4
+ while written + 1024 * 4 <= self.file_size:
+ f.write(data)
+ written += 1024 * 4
+
+ # write 8 bit random values until we reach required size
+ while written < self.file_size:
+ f.write(chr(random.randrange(0, 2 ** 8)))
+ written += 1
+
+ return path
+
+ def test_resumable_upload(self):
+ from cloudsigma.resumable_upload import Upload
+ def do_upload(queue):
+ up = Upload(self.file_path, chunk_size=1024**2, drive_name='test_drive_upload')
+
+ up.upload()
+
+ queue.put((up.drive_uuid, up.uploaded_size))
+
+ queue = Queue()
+ proc = Process(target=do_upload, args=(queue,))
+ proc.start()
+
+ proc.join(2*60)
+ if proc.is_alive():
+ proc.terminate()
+ raise Exception('Upload did not finish in time')
+
+ uuid, uploaded_size = queue.get(block=False)
+ LOG.debug('Finished uploading {}'.format(uuid))
+ self.assertEqual(uploaded_size, self.file_size)
+
+ drive = self.dc.get(uuid)
+ self.assertEqual(drive['status'], 'unmounted')
+
+ self.dc.delete(uuid)
diff --git a/pycloudsigma-master/src/testing/acceptance/test_firewall.py b/pycloudsigma-master/src/testing/acceptance/test_firewall.py
new file mode 100644
index 0000000..70292de
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_firewall.py
@@ -0,0 +1,153 @@
+import unittest
+from nose.plugins.attrib import attr
+from testing.utils import DumpResponse
+import cloudsigma.resource as resource
+
+
+@attr('acceptance_test')
+class FirewallPolicyTest(unittest.TestCase):
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.client = resource.FirewallPolicy()
+ self.dump_response = DumpResponse(clients=[self.client])
+ self.base_policy = {
+ "name": "My awesome policy",
+ "rules": [
+ {
+ "dst_ip": "23",
+ "direction": "out",
+ "action": "drop",
+ "comment": "Drop traffic from the VM to IP address 23.0.0.0/32"
+ },
+ {
+ "src_ip": "172.66.32.0/24",
+ "ip_proto": "tcp",
+ "dst_port": "22",
+ "direction": "in",
+ "action": "accept",
+ "comment": "Allow SSH traffic to the VM from our office in Dubai"
+ },
+ {
+ "ip_proto": "tcp",
+ "dst_port": "22",
+ "direction": "in",
+ "action": "drop",
+ "comment": "Drop all other SSH traffic to the VM"
+ },
+ {
+ "src_ip": "!172.66.32.55",
+ "ip_proto": "udp",
+ "direction": "in",
+ "action": "drop",
+ "comment": "Drop all UDP traffic to the VM, not originating from 172.66.32.55"
+ },
+ {
+ "ip_proto": "tcp",
+ "dst_port": "!1:1024",
+ "direction": "in",
+ "action": "drop",
+ "comment": "Drop any traffic, to the VM with destination port not between 1-1024"
+ }
+ ]
+ }
+ self._clean_policies()
+
+ def tearDown(self):
+ self._clean_policies()
+
+ def _clean_policies(self):
+ policies = self.client.list_detail()
+ server_client = resource.Server()
+ deleted_servers = []
+ for policy in policies:
+ for server in policy['servers']:
+ if server['uuid'] not in deleted_servers:
+ deleted_servers.append(server['uuid'])
+ server_client.delete(server['uuid'])
+ self.client.delete(policy['uuid'])
+
+ @attr('docs_snippets')
+ def test_get_schema(self):
+ with self.dump_response('fwpolicy_schema'):
+ self.client.get_schema()
+
+ @attr('docs_snippets')
+ def test_crud_policy(self):
+ base_policy = self.base_policy.copy()
+
+ with self.dump_response('fwpolicy_create_minimal'):
+ min_policy = self.client.create({})
+
+ self.assertDictContainsSubset({}, min_policy)
+
+ with self.dump_response('fwpolicy_create_full'):
+ full_policy = self.client.create(base_policy)
+
+ # Test if applied rules look like the ones returned from the API
+ # The dict is subset will not work, because API alters/normalizes some of the data
+ for idx, rules in enumerate(base_policy['rules']):
+ for key in rules:
+ match_a = str(full_policy['rules'][idx][key])
+ match_b = rules[key]
+ print match_a, match_b
+ self.assertTrue(match_a.startswith(match_b))
+
+ with self.dump_response('fwpolicy_list'):
+ res = self.client.list()
+
+ with self.dump_response('fwpolicy_list_detail'):
+ res = self.client.list_detail()
+
+ self.assertEqual(len(res), 2)
+
+ updated_policy = full_policy.copy()
+ updated_policy['rules'] = [updated_policy['rules'][0]]
+
+ with self.dump_response('fwpolicy_get'):
+ self.client.get(full_policy['uuid'])
+
+ with self.dump_response('fwpolicy_update'):
+ up_pol = self.client.update(full_policy['uuid'], updated_policy)
+
+ self.assertEqual(len(up_pol['rules']), 1)
+
+ with self.dump_response('fwpolicy_delete'):
+ self.client.delete(full_policy['uuid'])
+
+ self.client.delete(min_policy['uuid'])
+
+ res = self.client.list()
+ self.assertEqual(len(res), 0)
+
+ @attr('docs_snippets')
+ def test_server_fw_rules(self):
+ policy = self.client.create(self.base_policy)
+
+ server_def = {
+ 'name': 'FirewalledServer',
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ "nics": [
+ {
+ "firewall_policy": policy['uuid'],
+ "ip_v4_conf": {
+ "ip": None,
+ "conf": "dhcp"
+ },
+ "model": "virtio",
+ }
+ ],
+ }
+ server_client = resource.Server()
+ with DumpResponse(clients=[server_client])("fwpolicy_server_attach"):
+ server = server_client.create(server_def)
+
+ self.assertEqual(server['nics'][0]['firewall_policy']['uuid'], policy['uuid'])
+
+ self.client.delete(policy['uuid'])
+
+ server = server_client.get(server['uuid'])
+ self.assertIsNone(server['nics'][0]['firewall_policy'])
+
+ server_client.delete(server['uuid'])
diff --git a/pycloudsigma-master/src/testing/acceptance/test_networking.py b/pycloudsigma-master/src/testing/acceptance/test_networking.py
new file mode 100644
index 0000000..4dd6780
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_networking.py
@@ -0,0 +1,61 @@
+from nose.plugins.attrib import attr
+from testing.acceptance.common import StatefulResourceTestBase
+
+import cloudsigma.resource as cr
+from testing.utils import DumpResponse
+
+
+@attr('acceptance_test')
+class VLANBasicTest(StatefulResourceTestBase):
+ def setUp(self):
+ super(VLANBasicTest, self).setUp()
+ self.client = cr.VLAN()
+ self.dump_response = DumpResponse(clients=[self.client])
+
+ @attr('docs_snippets')
+ def test_vlan_listing_get_and_update(self):
+ with self.dump_response('vlan_schema'):
+ self.client.get_schema()
+
+ with self.dump_response('vlan_list'):
+ self.client.list()
+
+ with self.dump_response('vlan_list_detail'):
+ res = self.client.list_detail()
+
+ vlan_uuid = res[0]['uuid']
+ with self.dump_response('vlan_get'):
+ self.client.get(vlan_uuid)
+
+ with self.dump_response('vlan_update'):
+ self.client.update(vlan_uuid, {'meta': {'name': 'my vlan', 'custom_field': 'some custom data'}})
+
+ self.client.update(vlan_uuid, {'meta': {}})
+
+
+@attr('acceptance_test')
+class IPBasicTest(StatefulResourceTestBase):
+ def setUp(self):
+ super(IPBasicTest, self).setUp()
+ self.client = cr.IP()
+ self.dump_response = DumpResponse(clients=[self.client])
+
+ @attr('docs_snippets')
+ def test_ip_listing_get_and_update(self):
+ with self.dump_response('ip_schema'):
+ self.client.get_schema()
+
+ with self.dump_response('ip_list'):
+ self.client.list()
+
+ with self.dump_response('ip_list_detail'):
+ res = self.client.list_detail()
+
+ ip_uuid = res[0]['uuid']
+ with self.dump_response('ip_get'):
+ self.client.get(ip_uuid)
+
+ with self.dump_response('ip_update'):
+ self.client.update(ip_uuid, {'meta': {'name': 'my ip', 'custom_field': 'some custom data'}})
+
+ self.client.update(ip_uuid, {'meta': {}})
diff --git a/pycloudsigma-master/src/testing/acceptance/test_profile.py b/pycloudsigma-master/src/testing/acceptance/test_profile.py
new file mode 100644
index 0000000..7d08b1e
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_profile.py
@@ -0,0 +1,45 @@
+from nose.plugins.attrib import attr
+from testing.acceptance.common import StatefulResourceTestBase
+from testing.utils import DumpResponse
+import cloudsigma.resource as cr
+import simplejson as json
+
+
+def anonymize_profile(response_body):
+ data = json.loads(response_body)
+
+ data['email'] = 'user@example.com'
+ data['meta'] = {}
+ data['first_name'] = 'John'
+ data['last_name'] = 'Doe'
+ data['bank_reference'] = 'jdoe123'
+ data['uuid'] = "6f670b3c-a2e6-433f-aeab-b976b1cdaf03"
+
+ return json.dumps(data)
+
+
+@attr('acceptance_test')
+class ProfileTest(StatefulResourceTestBase):
+ def setUp(self):
+ super(ProfileTest, self).setUp()
+ self.client = cr.Profile()
+
+ @attr('docs_snippets')
+ def test_profile(self):
+
+ with DumpResponse(name='profile', clients=[self.client], resp_data_filter=anonymize_profile):
+ profile = self.client.get()
+
+ profile['company'] = 'Newly Set Company Name'
+ with DumpResponse(name='profile_update', clients=[self.client],
+ resp_data_filter=anonymize_profile,
+ req_data_filter=anonymize_profile):
+ self.client.update(profile)
+
+ profile['company'] = ''
+ self.client.update(profile)
+
+ @attr('docs_snippets')
+ def test_get_schema(self):
+ with DumpResponse(name='profile_schema', clients=[self.client]):
+ self.client.get_schema()
diff --git a/pycloudsigma-master/src/testing/acceptance/test_servers.py b/pycloudsigma-master/src/testing/acceptance/test_servers.py
new file mode 100644
index 0000000..47c6467
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_servers.py
@@ -0,0 +1,936 @@
+import unittest
+import time
+from nose.plugins.attrib import attr
+import socket
+import urlparse
+from testing.acceptance.common import StatefulResourceTestBase
+
+import cloudsigma.resource as cr
+from testing.utils import DumpResponse
+
+
+@attr('acceptance_test')
+class ServerTestBase(StatefulResourceTestBase):
+ def setUp(self):
+ unittest.TestCase.setUp(self)
+ self.client = cr.Server() # create a resource handle object
+ self.dump_response = DumpResponse(clients=[self.client])
+
+ def _create_a_server(self, server_req=None):
+ if server_req is None:
+ server_req = {
+ 'name': 'testServerAcc',
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ }
+ server = self.client.create(server_req)
+ self.assertDictContainsSubset(server_req, server, 'Server created with different params')
+ self.assertEqual(server['status'], 'stopped', 'Server created with wrong status')
+ return server
+
+
+class ServerTest(ServerTestBase):
+ @classmethod
+ def tearDownClass(cls):
+ # TODO: Clean-up after the tests using the bulk tools
+ super(ServerTest, cls).tearDownClass()
+
+ @attr('docs_snippets')
+ def test_list_limit(self):
+ servers = [self._create_a_server(server_req={
+ 'name': 'test server %d' % (i,),
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ }) for i in xrange(50)]
+ with DumpResponse(clients=[self.client])('server_list'):
+ servers_list = self.client.list(query_params={'limit': 20})
+ self.assertEqual(20, len(servers_list))
+ time.sleep(10)
+ for server in servers:
+ self.client.delete(server['uuid'])
+
+ @attr('docs_snippets')
+ def test_server_state_cycle(self):
+ """Test simple server create-start-stop-delete cycle
+ """
+ dump_response = DumpResponse(clients=[self.client])
+
+ with dump_response('server_create_minimal'):
+ server = self._create_a_server()
+
+ self._verify_list(server, True)
+
+ with dump_response('server_start'):
+ self.client.start(server['uuid'])
+
+ self._wait_for_status(server['uuid'], 'running')
+
+ with dump_response('server_stop'):
+ self.client.stop(server['uuid'])
+
+ self._wait_for_status(server['uuid'], 'stopped')
+
+ with dump_response('server_delete'):
+ self.client.delete(server['uuid'])
+
+ self._verify_list(server, False)
+
+ @attr('docs_snippets')
+ def test_create_full_server(self):
+ dv = cr.Drive()
+ dump_response = DumpResponse(clients=[self.client])
+
+ drive_def_1 = {
+ 'name': 'test_drive_1',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+
+ drive_def_2 = {
+ 'name': 'test_drive_2',
+ 'size': '1024000000',
+ 'media': 'cdrom',
+ }
+
+ drive1 = dv.create(drive_def_1)
+ drive2 = dv.create(drive_def_2)
+
+ self._wait_for_status(drive1['uuid'], 'unmounted', client=dv)
+ self._wait_for_status(drive2['uuid'], 'unmounted', client=dv)
+
+ server_definition = {
+ "requirements": [],
+ "name": "test_acc_full_server",
+ "cpus_instead_of_cores": False,
+ "tags": [],
+ "mem": 256*1024**2,
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "conf": "dhcp"
+ },
+ }
+ ],
+ "enable_numa": False,
+ "cpu": 1000,
+ "drives": [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive1['uuid'],
+ "boot_order": 1
+ },
+ {
+ "device": "ide",
+ "dev_channel": "0:0",
+ "drive": drive2['uuid'],
+ },
+ ],
+ "smp": 1,
+ "hv_relaxed": False,
+ "hv_tsc": False,
+ "meta": {
+ "description": "A full server with description"
+ },
+
+ "vnc_password": "tester",
+ }
+
+ with dump_response('server_create_full'):
+ server = self.client.create(server_definition)
+
+ # TODO: Uncomment this when the guest_drive definition order changes reach production
+ #self._verify_list(server, True)
+
+ self.client.delete(server['uuid'])
+
+ self._verify_list(server, False)
+
+ dv.delete(drive1['uuid'])
+ dv.delete(drive2['uuid'])
+
+ self._wait_deleted(drive1['uuid'], client=dv)
+ self._wait_deleted(drive2['uuid'], client=dv)
+
+ @attr('docs_snippets')
+ def test_recurse_delete_guest_w_disks(self):
+ dv = cr.Drive()
+ dump_response = DumpResponse(clients=[self.client])
+
+ drive_def_1 = {
+ 'name': 'test_drive_1',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+
+ drive_def_2 = {
+ 'name': 'test_drive_2',
+ 'size': '1024000000',
+ 'media': 'cdrom',
+ }
+
+ drive1 = dv.create(drive_def_1)
+ drive2 = dv.create(drive_def_2)
+
+ self._wait_for_status(drive1['uuid'], 'unmounted', client=dv)
+ self._wait_for_status(drive2['uuid'], 'unmounted', client=dv)
+
+ with DumpResponse(clients=[dv], name='server_recurse_del_disks_drives_before'):
+ dv.list_detail()
+
+ server_definition = {
+ "requirements": [],
+ "name": "test_acc_full_server",
+ "cpus_instead_of_cores": False,
+ "tags": [],
+ "mem": 256*1024**2,
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "conf": "dhcp"
+ },
+ }
+ ],
+ "enable_numa": False,
+ "cpu": 1000,
+ "drives": [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive1['uuid'],
+ "boot_order": 1
+ },
+ {
+ "device": "ide",
+ "dev_channel": "0:0",
+ "drive": drive2['uuid'],
+ },
+ ],
+ "smp": 1,
+ "hv_relaxed": False,
+ "hv_tsc": False,
+ "meta": {
+ "description": "A full server with description"
+ },
+
+ "vnc_password": "tester",
+ }
+
+ with dump_response('server_recurse_del_disks_create'):
+ server = self.client.create(server_definition)
+
+ # TODO: Uncomment this when the guest_drive definition order changes reach production
+ #self._verify_list(server, True)
+ with dump_response('server_recurse_del_disks_delete'):
+ self.client.delete_with_disks(server['uuid'])
+
+ self._verify_list(server, False)
+
+ self._wait_deleted(drive1['uuid'], client=dv)
+
+ self.assertEqual(dv.get(drive2['uuid'])['status'], 'unmounted')
+ with DumpResponse(clients=[dv], name='server_recurse_del_disks_drives_after'):
+ dv.list()
+
+ dv.delete(drive2['uuid'])
+ self._wait_deleted(drive2['uuid'], client=dv)
+
+ def test_recurse_delete_guest_w_cdroms(self):
+ dv = cr.Drive()
+
+ drive_def_1 = {
+ 'name': 'test_drive_1',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+
+ drive_def_2 = {
+ 'name': 'test_drive_2',
+ 'size': '1024000000',
+ 'media': 'cdrom',
+ }
+
+ drive1 = dv.create(drive_def_1)
+ drive2 = dv.create(drive_def_2)
+
+ self._wait_for_status(drive1['uuid'], 'unmounted', client=dv)
+ self._wait_for_status(drive2['uuid'], 'unmounted', client=dv)
+
+ server_definition = {
+ "requirements": [],
+ "name": "test_acc_full_server",
+ "cpus_instead_of_cores": False,
+ "tags": [],
+ "mem": 256*1024**2,
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "conf": "dhcp"
+ },
+ }
+ ],
+ "enable_numa": False,
+ "cpu": 1000,
+ "drives": [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive1['uuid'],
+ "boot_order": 1
+ },
+ {
+ "device": "ide",
+ "dev_channel": "0:0",
+ "drive": drive2['uuid'],
+ },
+ ],
+ "smp": 1,
+ "hv_relaxed": False,
+ "hv_tsc": False,
+ "meta": {
+ "description": "A full server with description"
+ },
+
+ "vnc_password": "tester",
+ }
+
+
+ server = self.client.create(server_definition)
+
+ # TODO: Uncomment this when the guest_drive definition order changes reach production
+ #self._verify_list(server, True)
+
+ self.client.delete_with_cdroms(server['uuid'])
+
+ self._verify_list(server, False)
+
+ self._wait_deleted(drive2['uuid'], client=dv)
+
+ self.assertEqual(dv.get(drive1['uuid'])['status'], 'unmounted')
+ dv.delete(drive1['uuid'])
+ self._wait_deleted(drive1['uuid'], client=dv)
+
+ @attr('docs_snippets')
+ def test_recurse_delete_server_w_all_drives(self):
+ dv = cr.Drive()
+ dump_response = DumpResponse(clients=[self.client])
+
+ drive_def_1 = {
+ 'name': 'test_drive_1',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+
+ drive_def_2 = {
+ 'name': 'test_drive_2',
+ 'size': '1024000000',
+ 'media': 'cdrom',
+ }
+
+ drive1 = dv.create(drive_def_1)
+ drive2 = dv.create(drive_def_2)
+
+ self._wait_for_status(drive1['uuid'], 'unmounted', client=dv)
+ self._wait_for_status(drive2['uuid'], 'unmounted', client=dv)
+
+ with DumpResponse(clients=[dv], name='server_recurse_del_all_drives_drives_before'):
+ dv.list_detail()
+
+ server_definition = {
+ "requirements": [],
+ "name": "test_acc_full_server",
+ "cpus_instead_of_cores": False,
+ "tags": [],
+ "mem": 256*1024**2,
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "conf": "dhcp"
+ },
+ }
+ ],
+ "enable_numa": False,
+ "cpu": 1000,
+ "drives": [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive1['uuid'],
+ "boot_order": 1
+ },
+ {
+ "device": "ide",
+ "dev_channel": "0:0",
+ "drive": drive2['uuid'],
+ },
+ ],
+ "smp": 1,
+ "hv_relaxed": False,
+ "hv_tsc": False,
+ "meta": {
+ "description": "A full server with description"
+ },
+
+ "vnc_password": "tester",
+ }
+
+ with dump_response('server_recurse_del_all_drives_create'):
+ server = self.client.create(server_definition)
+
+ # TODO: Uncomment this when the guest_drive definition order changes reach production
+ #self._verify_list(server, True)
+ with dump_response('server_recurse_del_all_drives_delete'):
+ self.client.delete_with_all_drives(server['uuid'])
+
+ self._verify_list(server, False)
+
+
+
+ self._wait_deleted(drive1['uuid'], client=dv)
+ self._wait_deleted(drive2['uuid'], client=dv)
+ self._wait_deleted(server['uuid'], client=self.client)
+
+ with DumpResponse(clients=[dv], name='server_recurse_del_all_drives_drives_after'):
+ dv.list_detail()
+
+
+ @attr('docs_snippets')
+ def test_server_nics(self):
+ server = self._create_a_server()
+
+ subs_client = cr.Subscriptions()
+
+ vlan_client = cr.VLAN()
+ vlans = vlan_client.list()
+ if not vlans:
+ subs_client.create({'resource': 'vlan', 'amount': 1, 'period': '1 month'})
+ vlans = vlan_client.list()
+ vlan_uuid = vlans[0]['uuid']
+
+ ip_client = cr.IP()
+ ips = ip_client.list()
+ free_ips = [ip for ip in ips if ip['server'] is None]
+ if not free_ips:
+ subs_client.create({'resource': 'ip', 'amount': 1, 'period': '1 month'})
+ ips = ip_client.list()
+ free_ips = [ip for ip in ips if ip['server'] is None]
+
+ ip_uuid = free_ips[0]['uuid']
+
+ server['nics'] = [{'vlan': vlan_uuid}]
+
+ with DumpResponse(clients=[self.client], name='server_add_private_nic'):
+ server = self.client.update(server['uuid'], server)
+
+ server['nics'] = [{'ip_v4_conf': {'conf': 'dhcp'}, 'model': 'e1000'}]
+ with DumpResponse(clients=[self.client], name='server_add_dhcp_nic'):
+ server = self.client.update(server['uuid'], server)
+
+ server['nics'] = [{'ip_v4_conf': {'conf': 'dhcp'}, 'model': 'e1000'}, {'vlan': vlan_uuid}]
+ server = self.client.update(server['uuid'], server)
+ with DumpResponse(clients=[self.client], name='server_get_two_nics'):
+ server = self.client.get(server['uuid'])
+
+ server['nics'][0]['ip_v4_conf'] = {'conf': 'static', 'ip': ip_uuid}
+ with DumpResponse(clients=[self.client], name='server_change_nic_to_static'):
+ server = self.client.update(server['uuid'], server)
+
+ server['nics'] = [server['nics'][1], server['nics'][0]]
+ with DumpResponse(clients=[self.client], name='server_rearrange_nics'):
+ server = self.client.update(server['uuid'], server)
+
+ private_mac = server['nics'][0]['mac']
+ server['nics'] = [{'ip_v4_conf': {'conf': 'dhcp'}, 'mac': private_mac}]
+ with DumpResponse(clients=[self.client], name='server_del_and_change_nic'):
+ server = self.client.update(server['uuid'], server)
+
+ server['nics'] = [{'ip_v4_conf': {'conf': 'manual'}}]
+ with DumpResponse(clients=[self.client], name='server_add_manual_nic'):
+ server = self.client.update(server['uuid'], server)
+
+ self.client.delete(server['uuid'])
+
+ @attr('docs_snippets')
+ def test_server_runtime(self):
+ dv = cr.Drive()
+ drive_def_1 = {
+ 'name': 'test_drive_1',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+ drive1 = dv.create(drive_def_1)
+ self._wait_for_status(drive1['uuid'], 'unmounted', client=dv)
+
+ server_def = {
+ 'name': 'testServerAcc',
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ 'drives': [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive1['uuid'],
+ "boot_order": 1
+ },
+
+ ],
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "ip": None,
+ "conf": "dhcp"
+ },
+ "model": "virtio",
+ }
+ ],
+ }
+
+ server = self.client.create(server_def)
+
+ self._verify_list(server, True)
+
+ self.client.start(server['uuid'])
+
+ self._wait_for_status(server['uuid'], 'running')
+
+ with DumpResponse(clients=[self.client], name='server_get_running'):
+ server_def = self.client.get(server['uuid'])
+
+ self.assertEqual(server_def['runtime']['nics'][0]['interface_type'], 'public')
+ self.assertIsNotNone(server_def['runtime']['nics'][0]['ip_v4'])
+
+ # check runtime call
+ runtime = self.client.runtime(server['uuid'])
+ self.assertEqual(runtime['nics'][0]['interface_type'], 'public')
+ self.assertIsNotNone(runtime['nics'][0]['ip_v4'])
+
+ self.client.stop(server['uuid'])
+ self._wait_for_status(server['uuid'], 'stopped')
+
+ self.client.delete(server['uuid'])
+ self._verify_list(server, False)
+
+ dv.delete(drive1['uuid'])
+ self._wait_deleted(drive1['uuid'], client=dv)
+
+ def _open_vnc_tunnel(self):
+ server = self._create_a_server()
+ self.client.start(server['uuid'])
+ self._wait_for_status(server['uuid'], 'running')
+
+ with self.dump_response('server_open_vnc'):
+ open_vnc_resp = self.client.open_vnc(server['uuid'])
+
+ self.assertDictContainsSubset({'result': 'success', 'uuid': server['uuid']}, open_vnc_resp)
+
+ #Parsing vnc address and port from vnc_url
+ vnc_args = urlparse.urlparse(open_vnc_resp['vnc_url']).netloc.split(":")
+ vnc_address = (str(vnc_args[0]), int(vnc_args[1]))
+
+ return server, vnc_address
+
+ def _close_vnc_tunnel(self, server):
+ with self.dump_response('server_close_vnc'):
+ close_vnc_resp = self.client.close_vnc(server['uuid'])
+
+ self.assertDictContainsSubset({'result': 'success', 'uuid': server['uuid'], 'action': 'close_vnc'},
+ close_vnc_resp)
+
+ self.client.stop(server['uuid'])
+ self._wait_for_status(server['uuid'], 'stopped')
+
+ self.client.delete(server['uuid'])
+ self._verify_list(server, False)
+
+ @attr('docs_snippets')
+ def test_vnc_tunnel_open_close(self):
+ server, _ = self._open_vnc_tunnel()
+ time.sleep(3)
+ self._close_vnc_tunnel(server)
+
+ def test_vnc_tunnel(self):
+ server, vnc_address = self._open_vnc_tunnel()
+
+ vnc_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ vnc_sock.settimeout(10)
+
+ now = time.time()
+
+ while now + 15 >= time.time():
+ try:
+ #Check if we can connect to VNC address
+ vnc_sock.connect(vnc_address)
+ except:
+ time.sleep(1)
+ else:
+ break
+
+ #Checking if VNC initial handshake is sent
+ vnc_ver = vnc_sock.recv(16)
+ self.assertRegexpMatches(vnc_ver, 'RFB \d+\.\d+\\n')
+ vnc_sock.close()
+
+ self._close_vnc_tunnel(server)
+
+ @attr('docs_snippets')
+ def test_get_schema(self):
+ with DumpResponse(clients=[self.client], name='server_schema'):
+ self.client.get_schema()
+
+ @attr('docs_snippets')
+ def test_server_list(self):
+
+ server_req = [
+ {
+ 'name': 'test_server_%i' % i,
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ } for i in range(5)
+ ]
+
+ with self.dump_response('server_create_bulk'):
+ servers = self.client.create(server_req)
+
+ with self.dump_response('server_list'):
+ self.client.list()
+
+ with self.dump_response('server_list_detail'):
+ self.client.list_detail()
+
+ for server in servers:
+ self.client.delete(server['uuid'])
+
+ remaining_servers = [srv['uuid'] for srv in self.client.list()]
+
+ for server in servers:
+ self.assertNotIn(server['uuid'], remaining_servers)
+
+ @attr('docs_snippets')
+ def test_server_edit(self):
+ server_def = {
+ 'name': 'test_server_1',
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ }
+
+ server = self._create_a_server(server_req=server_def)
+
+ # Test simple update
+ server_def['name'] = 'test_server_updated'
+ server_def['cpu'] = 2000
+ server_def['vnc_password'] = 'updated_password'
+
+ with self.dump_response('server_edit_minimal'):
+ updated_server = self.client.update(server['uuid'], server_def)
+
+ self.assertDictContainsSubset(server_def, updated_server)
+
+ server_def['meta'] = {'meta_key1': 'value1', 'meta_key2': 'value2'}
+ with self.dump_response('server_add_meta'):
+ self.client.update(server['uuid'], server_def)
+ updated_server['meta'] = {'meta_key2': 'value2', 'meta_key3': 'value3'}
+ with self.dump_response('server_edit_meta'):
+ updated_server = self.client.update(server['uuid'], updated_server)
+
+ self.assertTrue('meta_key1' not in updated_server['meta'])
+ self.assertTrue(updated_server['meta']['meta_key3'] == 'value3')
+
+ del server_def['meta']
+ self.client.update(server['uuid'], server_def)
+
+ dv = cr.Drive()
+
+ drive_def_1 = {
+ 'name': 'test_drive_1',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+
+ drive = dv.create(drive_def_1)
+ self._wait_for_status(drive['uuid'], 'unmounted', client=dv)
+
+ #Test attach drive
+ server_def['drives'] = [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive['uuid'],
+ "boot_order": 1
+ },
+ ]
+
+ with self.dump_response('server_attach_drive'):
+ updated_server = self.client.update(server['uuid'], server_def)
+
+ self.assertEqual(
+ server_def['drives'][0]['drive'],
+ updated_server['drives'][0]['drive']['uuid'],
+ 'The updated server and the update definition do not match'
+ )
+
+ self.client.delete(updated_server['uuid'])
+
+ dv.delete(drive['uuid'])
+
+ self._wait_deleted(drive['uuid'], client=dv)
+
+ def test_bulk_start_stop_and_usage(self):
+
+ # Check if usage is correct
+ usage_client = cr.CurrentUsage()
+ curr_cpu_usage = usage_client.list()['usage']['cpu']['using']
+
+ server_req = [{
+ 'name': 'test_start_stop_server_%i' % i,
+ 'cpu': 500,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ } for i in range(40)]
+
+ # Creating 40 servers
+ servers = self.client.create(server_req)
+ cpu_usage = sum(g['cpu'] for g in server_req) + curr_cpu_usage
+
+ # Starting the servers
+ for server in servers:
+ self.client.start(server['uuid'])
+
+ time.sleep(2) # give a bit of time for usage to update
+ self.assertEqual(cpu_usage, usage_client.list()['usage']['cpu']['using'])
+
+ # Wait for status running
+ for server in servers:
+ self._wait_for_status(server['uuid'], 'running')
+
+ # Stop the servers
+ for server in servers:
+ self.client.stop(server['uuid'])
+
+ # Wait for them to stop
+ for server in servers:
+ self._wait_for_status(server['uuid'], 'stopped', timeout=45)
+
+ # Delete them
+ for server in servers:
+ self.client.delete(server['uuid'])
+
+ @attr('docs_snippets')
+ def test_server_clone(self):
+ server = self._create_a_server()
+
+ with DumpResponse(clients=[self.client], name='server_get_clone_source'):
+ server = self.client.get(server['uuid'])
+
+ with DumpResponse(clients=[self.client], name='server_clone'):
+ clone = self.client.clone(server['uuid'], {'name': 'test cloned server name', 'random_vnc_password': True})
+
+ self.client.delete(server['uuid'])
+ self.client.delete(clone['uuid'])
+
+ def test_server_clone_with_drive(self):
+ dv = cr.Drive()
+ drive_def_1 = {
+ 'name': 'test_drive_1',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+ drive1 = dv.create(drive_def_1)
+ self._wait_for_status(drive1['uuid'], 'unmounted', client=dv)
+
+ dv = cr.Drive()
+ drive_def_2 = {
+ 'name': 'test_drive_2',
+ 'size': '1024000000',
+ 'media': 'cdrom',
+ }
+ drive2 = dv.create(drive_def_2)
+ self._wait_for_status(drive2['uuid'], 'unmounted', client=dv)
+
+ server_def = {
+ 'name': 'testServerAcc',
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ 'drives': [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive1['uuid'],
+ "boot_order": 1
+ },
+ {
+ "device": "virtio",
+ "dev_channel": "0:1",
+ "drive": drive2['uuid'],
+ "boot_order": 2
+ },
+
+ ],
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "ip": None,
+ "conf": "dhcp"
+ },
+ "model": "virtio",
+ }
+ ],
+ }
+
+ server = self.client.create(server_def)
+
+ clone = self.client.clone(server['uuid'], {'name': 'cloned server name', 'random_vnc_password': True})
+
+ for mount in clone['drives']:
+ drive_uuid = mount['drive']['uuid']
+ self._wait_for_status(drive_uuid, 'mounted', client=dv)
+
+ self.assertNotEqual(clone['drives'][0]['drive']['uuid'], server['drives'][0]['drive']['uuid'])
+ self.assertEqual(clone['drives'][1]['drive']['uuid'], server['drives'][1]['drive']['uuid'])
+
+ self.client.delete_with_all_drives(server['uuid'])
+ self.client.delete_with_disks(clone['uuid'])
+
+ self._wait_deleted(server['drives'][0]['drive']['uuid'], client=dv)
+ self._wait_deleted(server['drives'][1]['drive']['uuid'], client=dv)
+ self._wait_deleted(clone['drives'][0]['drive']['uuid'], client=dv)
+
+ def test_server_clone_with_avoid_drive(self):
+ dv = cr.Drive()
+ drive_def_1 = {
+ 'name': 'test_drive_1',
+ 'size': '1024000000',
+ 'media': 'disk',
+ }
+ drive1 = dv.create(drive_def_1)
+ self._wait_for_status(drive1['uuid'], 'unmounted', client=dv)
+
+ dv = cr.Drive()
+ drive_def_2 = {
+ 'name': 'test_drive_2',
+ 'size': '1024000000',
+ 'media': 'cdrom',
+ }
+ drive2 = dv.create(drive_def_2)
+ self._wait_for_status(drive2['uuid'], 'unmounted', client=dv)
+
+ server_def = {
+ 'name': 'testServerAcc',
+ 'cpu': 1000,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ 'drives': [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive1['uuid'],
+ "boot_order": 1
+ },
+ {
+ "device": "virtio",
+ "dev_channel": "0:1",
+ "drive": drive2['uuid'],
+ "boot_order": 2
+ },
+
+ ],
+ "nics": [
+ {
+ "ip_v4_conf": {
+ "ip": None,
+ "conf": "dhcp"
+ },
+ "model": "virtio",
+ }
+ ],
+ }
+
+ server = self.client.create(server_def)
+
+ clone = self.client.clone(server['uuid'], {'name': 'cloned server name', 'random_vnc_password': True},
+ avoid=[server['uuid']])
+
+ for mount in clone['drives']:
+ drive_uuid = mount['drive']['uuid']
+ self._wait_for_status(drive_uuid, 'mounted', client=dv)
+
+ self.assertNotEqual(clone['drives'][0]['drive']['uuid'], server['drives'][0]['drive']['uuid'])
+ self.assertEqual(clone['drives'][1]['drive']['uuid'], server['drives'][1]['drive']['uuid'])
+
+ self.client.delete_with_all_drives(server['uuid'])
+ self.client.delete_with_disks(clone['uuid'])
+
+ self._wait_deleted(server['drives'][0]['drive']['uuid'], client=dv)
+ self._wait_deleted(server['drives'][1]['drive']['uuid'], client=dv)
+ self._wait_deleted(clone['drives'][0]['drive']['uuid'], client=dv)
+
+
+@attr('stress_test')
+class ServerStressTest(StatefulResourceTestBase):
+ SERVER_COUNT = 60
+
+ def setUp(self):
+ super(ServerStressTest, self).setUp()
+ self.server_client = cr.Server()
+ self.drive_client = cr.Drive()
+
+ def test_create_start_test_io(self):
+ """Servers create, start, test drive io and stop"""
+
+ server_req = []
+ puuid, ppass = self._get_persistent_image_uuid_and_pass()
+
+ cloned = []
+ for num in range(self.SERVER_COUNT):
+ cloned.append(self.drive_client.clone(puuid, {'name': "stress_atom_clone_{}".format(num)}))
+
+ for i, drive in enumerate(cloned):
+ server_req.append({
+ 'name': 'stress_drive_iops_%i' % i,
+ 'cpu': 500,
+ 'mem': 512 * 1024 ** 2,
+ 'vnc_password': 'testserver',
+ 'drives': [
+ {
+ "device": "virtio",
+ "dev_channel": "0:0",
+ "drive": drive['uuid'],
+ "boot_order": 1
+ },
+ ],
+ 'nics': [
+ {
+ "ip_v4_conf": {
+ "ip": None,
+ "conf": "dhcp"
+ },
+ "model": "virtio",
+ }
+ ],
+
+ })
+
+ servers = self.server_client.create(server_req)
+ sip_map = {}
+
+ for cloning_drive in cloned:
+ self._wait_for_status(cloning_drive['uuid'], status='mounted', client=self.drive_client, timeout=120*1000)
+
+ for server in servers:
+ self.server_client.start(server['uuid'])
+
+ for server in servers:
+ self._wait_for_status(server['uuid'], status='running', client=self.server_client)
+
+ for server in self.server_client.list_detail():
+ sip_map[server['uuid']] = server['runtime']['nics'][0]['ip_v4']
+
+ for server in servers:
+ self.server_client.stop(server['uuid'])
+
+
diff --git a/pycloudsigma-master/src/testing/acceptance/test_snapshots.py b/pycloudsigma-master/src/testing/acceptance/test_snapshots.py
new file mode 100644
index 0000000..49b60dc
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_snapshots.py
@@ -0,0 +1,180 @@
+from nose.plugins.attrib import attr
+from testing.acceptance.common import StatefulResourceTestBase
+from testing.utils import DumpResponse
+from cloudsigma.errors import ClientError
+import cloudsigma.resource as cr
+
+
+@attr('acceptance_test')
+class SnapshotsTest(StatefulResourceTestBase):
+ def setUp(self):
+ super(SnapshotsTest, self).setUp()
+ self.snap_client = cr.Snapshot()
+ self.drive_client = cr.Drive()
+ self.dump_response = DumpResponse(clients=[self.snap_client, self.drive_client])
+
+ @attr('docs_snippets')
+ def test_get_snapshot_schema(self):
+ with self.dump_response("snapshot_schema"):
+ self.snap_client.get_schema()
+
+ @attr('docs_snippets')
+ def test_snapshot_cycle(self):
+ drive_def = {
+ 'name': 'test_drive_snapshot',
+ 'size': 1024000000,
+ 'media': 'disk',
+ }
+
+ with self.dump_response('drive_for_snapshots'):
+ d = self.drive_client.create(drive_def)
+ drive_uuid = d['uuid']
+
+ self._wait_for_status(drive_uuid, client=self.drive_client, status='unmounted')
+ self.assertFalse(d['snapshots'])
+
+ snap_def = {
+ 'drive': drive_uuid,
+ 'name': 'first_snapshot',
+ 'meta': {'key': 'val'}
+ }
+
+ with self.dump_response('snapshot_create'):
+ snap = self.snap_client.create(snap_def)
+ snap_uuid = snap['uuid']
+
+ self._wait_for_status(snap_uuid, 'available', client=self.snap_client)
+ self.assertEqual(snap['drive']['uuid'], drive_uuid)
+
+ with self.dump_response('drive_with_one_snapshot'):
+ d = self.drive_client.get(drive_uuid)
+
+ self.assertEqual(snap_uuid, d['snapshots'][0]['uuid'])
+
+ another_snap_def = {'drive': drive_uuid}
+ with self.dump_response('snapshot_create_another'):
+ another_snap = self.snap_client.create(another_snap_def)
+ another_snap_uuid = another_snap['uuid']
+ self._wait_for_status(another_snap_uuid, 'available', client=self.snap_client)
+ another_snap['name'] = 'another_snap'
+ self.snap_client.update(another_snap_uuid, another_snap)
+
+ another_snap = self.snap_client.get(another_snap_uuid)
+
+ self.assertEqual('another_snap', another_snap['name'])
+
+ with self.dump_response('drive_with_two_snapshots'):
+ d = self.drive_client.get(drive_uuid)
+
+ self.assertItemsEqual([snap_uuid, another_snap_uuid], [s['uuid'] for s in d['snapshots']])
+
+ with self.dump_response('snapshot_delete'):
+ self.snap_client.delete(snap_uuid)
+
+ self._wait_deleted(snap_uuid, client=self.snap_client)
+ with self.assertRaises(ClientError) as cm:
+ self.snap_client.get(snap_uuid)
+ self.assertEqual(cm.exception[0], 404)
+
+ d = self.drive_client.get(drive_uuid)
+ self.assertEqual(another_snap_uuid, d['snapshots'][0]['uuid'])
+
+ self.drive_client.delete(drive_uuid)
+
+ self._wait_deleted(drive_uuid, client=self.drive_client)
+
+ with self.assertRaises(ClientError) as cm:
+ self.snap_client.get(another_snap_uuid)
+ self.assertEqual(cm.exception[0], 404)
+
+ @attr('docs_snippets')
+ def test_snapshot_listing(self):
+
+ drive_def = {
+ 'name': 'test_drive_snapshot',
+ 'size': 1024000000,
+ 'media': 'disk',
+ }
+
+ # Create 3 drives
+ drive_uuids = []
+ for _ in xrange(3):
+ d = self.drive_client.create(drive_def)
+ drive_uuids.append(d['uuid'])
+
+ for d_uuid in drive_uuids:
+ self._wait_for_status(d_uuid, 'unmounted', client=self.drive_client)
+
+ self.assertFalse(self.drive_client.get(drive_uuids[0])['snapshots'])
+
+ # Create two snapshots for each drive
+ snap_uuids = []
+ for d_uuid in drive_uuids:
+ snap_uuid1 = self.snap_client.create({'drive': d_uuid})['uuid']
+ snap_uuid2 = self.snap_client.create({'drive': d_uuid})['uuid']
+ snap_uuids.extend([snap_uuid1, snap_uuid2])
+
+ with self.dump_response("snapshot_get"):
+ self.snap_client.get(snap_uuid1)
+
+ with self.dump_response('snapshot_list'):
+ self.snap_client.list()
+ with self.dump_response('snapshot_list_detail'):
+ snap_list = self.snap_client.list_detail()
+
+ self.assertLessEqual(6, len(snap_list))
+ self.assertTrue(set(snap_uuids).issubset([s['uuid'] for s in snap_list]))
+
+ with self.dump_response('snapshot_list_for_drive'):
+ drive_snapshots = self.snap_client.list_detail(query_params={'drive': drive_uuids[0]})
+
+ self.assertEqual(len(drive_snapshots), 2)
+ with self.dump_response('snapshots_in_drive_def'):
+ snapshots_from_drive_def = self.drive_client.get(drive_uuids[0])['snapshots']
+
+ self.assertItemsEqual([s['uuid'] for s in drive_snapshots], [s['uuid'] for s in snapshots_from_drive_def])
+
+ for d_uuid in drive_uuids:
+ self.drive_client.delete(d_uuid)
+
+ self._wait_deleted(drive_uuids[0], client=self.drive_client)
+ self.assertFalse(self.snap_client.list_detail(query_params={'drive': drive_uuids[0]}))
+
+ @attr('docs_snippets')
+ def test_snapshot_clone(self):
+ drive_def = {
+ 'name': 'test_drive_snapshot',
+ 'size': 1024000000,
+ 'media': 'disk',
+ }
+
+ with self.dump_response('drive_for_clone_snapshot'):
+ d = self.drive_client.create(drive_def)
+ drive_uuid = d['uuid']
+
+ self._wait_for_status(drive_uuid, client=self.drive_client, status='unmounted')
+ snap = self.snap_client.create({'drive': drive_uuid})
+ snap_uuid = snap['uuid']
+ self._wait_for_status(snap_uuid, client=self.snap_client, status='available')
+
+ with self.dump_response('snapshot_clone'):
+ cloned_drive = self.snap_client.clone(snap_uuid, avoid=drive_uuid)
+
+ self.assertEqual(snap['name'], cloned_drive['name'])
+ self.assertEqual(d['media'], cloned_drive['media'])
+ self.assertEqual(d['size'], cloned_drive['size'])
+
+ self._wait_for_status(cloned_drive['uuid'], 'unmounted', client=self.drive_client)
+ self.drive_client.delete(cloned_drive['uuid'])
+ self._wait_deleted(cloned_drive['uuid'], client=self.drive_client)
+
+ clone_data = {'media': 'cdrom', 'name': 'test_drive_snapshot_clone_name'}
+
+ cloned_drive = self.snap_client.clone(snap_uuid, data=clone_data, avoid=drive_uuid)
+
+ self.assertEqual(clone_data['name'], cloned_drive['name'])
+ self.assertEqual(clone_data['media'], cloned_drive['media'])
+ self.assertEqual(d['size'], cloned_drive['size'])
+
+ self._wait_for_status(cloned_drive['uuid'], 'unmounted', client=self.drive_client)
+ self.drive_client.delete(cloned_drive['uuid'])
diff --git a/pycloudsigma-master/src/testing/acceptance/test_subscriptions.py b/pycloudsigma-master/src/testing/acceptance/test_subscriptions.py
new file mode 100644
index 0000000..53a467d
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_subscriptions.py
@@ -0,0 +1,28 @@
+import os
+import unittest
+from nose.plugins.attrib import attr
+
+import cloudsigma.resource as cr
+
+from testing.utils import DumpResponse
+
+
+@attr('acceptance_test')
+class BillingBase(unittest.TestCase):
+
+ @attr('docs_snippets')
+ def test_subscription_list(self):
+ client = cr.Subscriptions()
+ with DumpResponse(clients=[client])('subscription_list'):
+ client.get()
+
+ with DumpResponse(clients=[client])('subscription_schema'):
+ client.get_schema()
+
+ def test_subscription_create(self):
+ if os.environ.get('TURLO_MANUAL_TESTS', '0') == '0':
+ raise unittest.SkipTest("Subscriptions cannot be deleted by the user so this cannot be cleaned up. Use TURLO_MANUAL_TESTS=1 environment variable")
+
+ client = cr.Subscriptions()
+ with DumpResponse(clients=[client])('subscription_create'):
+ sub = client.create({"resource": "dssd", "amount": 1000*3 * 10, "period": "1 month"})
diff --git a/pycloudsigma-master/src/testing/acceptance/test_tags.py b/pycloudsigma-master/src/testing/acceptance/test_tags.py
new file mode 100644
index 0000000..9f4e702
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_tags.py
@@ -0,0 +1,67 @@
+from nose.plugins.attrib import attr
+from testing.acceptance.common import StatefulResourceTestBase
+from testing.utils import DumpResponse
+import cloudsigma.resource as cr
+
+
+@attr('acceptance_test')
+class tagsTest(StatefulResourceTestBase):
+ def setUp(self):
+ super(tagsTest, self).setUp()
+ self.client = cr.Tags()
+ self.dump_response = DumpResponse(clients=[self.client])
+
+ tags = self.client.list()
+
+ for tag in tags:
+ self.client.delete(tag['uuid'])
+
+ @attr('docs_snippets')
+ def test_tags(self):
+ with self.dump_response('tags_schema'):
+ self.client.get_schema()
+
+ sc = cr.Server()
+ server1 = sc.create({'name': 'test_server1', 'cpu': 1000, 'mem': 512*1024**2, 'vnc_password': 'pass'})
+ server2 = sc.create({'name': 'test_server2', 'cpu': 1000, 'mem': 512*1024**2, 'vnc_password': 'pass'})
+
+ dc = cr.Drive()
+ drive = dc.create({'name': 'test_drive', 'size': 1000**3, 'media': 'disk'})
+
+ ip = cr.IP().list()[0]
+ vlan = cr.VLAN().list()[0]
+
+ with self.dump_response('tags_create'):
+ tag1 = self.client.create({'name': 'MyGroupOfThings'})
+
+ with self.dump_response('tags_create_with_resource'):
+ tag2 = self.client.create({'name': 'TagCreatedWithResource',
+ 'resources': [server1['uuid'], server2['uuid'], drive['uuid'], ip['uuid'],
+ vlan['uuid']]})
+ with self.dump_response('tags_list'):
+ self.client.list()
+
+ with self.dump_response('tags_list_detail'):
+ self.client.list_detail()
+
+ with self.dump_response('tags_get'):
+ self.client.get(tag2['uuid'])
+
+ with self.dump_response('tags_update_resources'):
+ self.client.update(tag2['uuid'], {'name': 'TagCreatedWithResource', 'resources': [server1['uuid'],
+ drive['uuid']]})
+
+ server2['tags'] = [tag1['uuid'], tag2['uuid']]
+ with DumpResponse(clients=[sc], name='tags_update_tag_from_resource'):
+ sc.update(server2['uuid'], server2)
+
+ with self.dump_response('tags_list_resource'):
+ self.client.servers(tag1['uuid'])
+
+ dc.delete(drive['uuid'])
+ sc.delete(server1['uuid'])
+ sc.delete(server2['uuid'])
+
+ with self.dump_response('tags_delete'):
+ self.client.delete(tag1['uuid'])
+ self.client.delete(tag2['uuid'])
diff --git a/pycloudsigma-master/src/testing/acceptance/test_websocket.py b/pycloudsigma-master/src/testing/acceptance/test_websocket.py
new file mode 100644
index 0000000..ee2dd93
--- /dev/null
+++ b/pycloudsigma-master/src/testing/acceptance/test_websocket.py
@@ -0,0 +1,59 @@
+import unittest
+from nose.plugins.attrib import attr
+from cloudsigma import resource
+from cloudsigma import errors
+
+
+@attr('acceptance_test')
+class WebsocketTest(unittest.TestCase):
+ def setUp(self):
+ super(WebsocketTest, self).setUp()
+
+ def test_drive(self):
+ ws = resource.Websocket()
+ d = resource.Drive().create({"size": 1000 ** 3, "name": "", "media": "disk"})
+ ret = ws.wait_obj_wrapper(ws.wait_obj_uri, (d['resource_uri'], resource.Drive), timeout=30,
+ extra_filter=lambda x: x['status'] == 'unmounted')
+ resource.Drive().delete(d['uuid'])
+ try:
+ ret = ws.wait_obj_wrapper(ws.wait_obj_uri, (d['resource_uri'], resource.Drive), timeout=30,
+ extra_filter=lambda x: False)
+ except errors.ClientError as e:
+ if e.args[0] != 404:
+ raise
+
+ def test_guest(self):
+ ws = resource.Websocket()
+ g = resource.Server().create({"cpu": 1000, "name": "", "mem": 256 * 1024 ** 2, "vnc_password": "foo"})
+ ret = ws.wait_obj_wrapper(ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30,
+ extra_filter=lambda x: x['status'] == 'stopped')
+ resource.Server().start(g['uuid'])
+ ret = ws.wait_obj_wrapper(ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30,
+ extra_filter=lambda x: x['status'] == 'running')
+ resource.Server().stop(g['uuid'])
+ ret = ws.wait_obj_wrapper(ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30,
+ extra_filter=lambda x: x['status'] == 'stopped')
+ resource.Server().delete(g['uuid'])
+ try:
+ g = ws.wait_obj_wrapper(ws.wait_obj_uri, (ret['resource_uri'], resource.Server), timeout=30,
+ extra_filter=lambda x: False)
+ except errors.ClientError as e:
+ if e.args[0] != 404:
+ raise
+
+ def test_guest_drive(self):
+ ws = resource.Websocket()
+ g = resource.Server().create({"cpu": 1000, "name": "", "mem": 256 * 1024 ** 2, "vnc_password": "foo"})
+ ret = ws.wait_obj_wrapper(ws.wait_obj_uri, (g['resource_uri'], resource.Server), timeout=30,
+ extra_filter=lambda x: x['status'] == 'stopped')
+ d = resource.Drive().create({"size": 1000 ** 3, "name": "", "media": "disk"})
+ ret = ws.wait_obj_wrapper(ws.wait_obj_uri, (d['resource_uri'], resource.Drive), timeout=30,
+ extra_filter=lambda x: x['status'] == 'unmounted')
+
+ resource.Server().update(g['uuid'], {"cpu": 1000, "name": "", "mem": 256 * 1024 ** 2, "vnc_password": "foo",
+ "drives": [
+ {"dev_channel": "0:0", "device": "virtio", "drive": d['uuid']}]})
+ ws.wait_obj_uri(g['resource_uri'], resource.Server)
+ resource.Drive().delete(d['uuid'])
+ resource.Server().delete(g['uuid'])
+
diff --git a/pycloudsigma-master/src/testing/templates/__init__.py b/pycloudsigma-master/src/testing/templates/__init__.py
new file mode 100644
index 0000000..ac6f33b
--- /dev/null
+++ b/pycloudsigma-master/src/testing/templates/__init__.py
@@ -0,0 +1,6 @@
+__author__ = 'islavov'
+import os
+
+def get_template(template_name):
+ with open(os.path.join(os.path.dirname(__file__), template_name), 'r') as tpl:
+ return tpl.read()
diff --git a/pycloudsigma-master/src/testing/templates/request_template b/pycloudsigma-master/src/testing/templates/request_template
new file mode 100644
index 0000000..e312036
--- /dev/null
+++ b/pycloudsigma-master/src/testing/templates/request_template
@@ -0,0 +1,5 @@
+{reqres.method} {path_url} HTTP/1.1
+Content-Type: {content_type}
+Authorization: Basic SWYgeW91IGZvdW5kIHRoaXMsIGhhdmUgYSBjb29raWUsIHlvdSBkZXNlcnZlIGl0IDop
+
+{data}
diff --git a/pycloudsigma-master/src/testing/templates/response_template b/pycloudsigma-master/src/testing/templates/response_template
new file mode 100644
index 0000000..e7a2c3e
--- /dev/null
+++ b/pycloudsigma-master/src/testing/templates/response_template
@@ -0,0 +1,4 @@
+HTTP/1.1 {reqres.status_code} {reqres.reason}
+Content-Type: {content_type}
+
+{data}
diff --git a/pycloudsigma-master/src/testing/utils.py b/pycloudsigma-master/src/testing/utils.py
new file mode 100644
index 0000000..3259426
--- /dev/null
+++ b/pycloudsigma-master/src/testing/utils.py
@@ -0,0 +1,137 @@
+import urllib
+from cloudsigma.conf import config
+import simplejson
+import logging
+import os
+import urlparse
+from testing.templates import get_template
+
+__author__ = 'islavov'
+
+LOG = logging.getLogger(__name__)
+
+
+class ResponseDumper(object):
+
+ def __init__(self, name=None, suffix=None, dump_path=None, req_data_filter=None, resp_data_filter=None):
+ self.name = name
+ self.suffix = suffix
+ self.tmp_name = None
+ self.req_data_filter = req_data_filter
+ self.resp_data_filter = resp_data_filter
+
+ # If dump path not found/derived,
+ if dump_path is None and config.get('dump_path') is not None:
+ self.dump_path = os.path.join(os.path.expanduser(config['dump_path']))
+ else:
+ self.dump_path = dump_path
+
+ def __call__(self, resp, *args, **kwargs):
+
+ if self.dump_path is None:
+ return
+
+ if not os.path.exists(self.dump_path):
+ LOG.debug("Creating samples path - {}".format(self.dump_path))
+ os.makedirs(self.dump_path)
+
+ if not resp.ok:
+ LOG.error('Response not OK for dump - {}'.format(resp.text))
+ return
+
+ fname = self.get_filename(resp)
+
+ with open(os.path.join(self.dump_path, "request_{}".format(fname, )), "w") as fl:
+ LOG.info("Dumping request to {}".format(fl.name))
+ if self.req_data_filter:
+ data = self.resp_data_filter(resp.request.body)
+ else:
+ data = resp.request.body
+ data = data or ''
+ fl.write(self.get_populated_template(
+ "request_template",
+ resp.request,
+ data,
+ path_url=urllib.unquote(resp.request.path_url)))
+
+ with open(os.path.join(self.dump_path, "response_{}".format(fname)), "w") as fl:
+ LOG.info("Dumping response to {}".format(fl.name))
+ if self.resp_data_filter:
+ LOG.info("Filtering response data")
+ data = self.resp_data_filter(resp.content)
+ else:
+ data = resp.content
+ fl.write(self.get_populated_template("response_template", resp, data))
+
+ self.tmp_name = None
+
+ def get_filename(self, resp):
+ url = urlparse.urlparse(resp.request.path_url)
+ path_arr = [segment for segment in url.path.split('/') if segment]
+
+ if self.tmp_name:
+ return self.tmp_name
+ elif self.name:
+ fname = self.name
+ else:
+ fname = "{}_api_{}_{}".format(resp.request.method, path_arr[1], path_arr[2])
+ if len(path_arr) > 3:
+ check = path_arr[3]
+ if check == 'detail':
+ fname += "_list_detail"
+ else:
+ fname += "_detail"
+ if path_arr[4:]:
+ fname += "_" + "_".join(path_arr[4:])
+
+ if url.query:
+ query_tuple = urlparse.parse_qsl(url.query)
+ for key, val in sorted(query_tuple):
+ if key not in ['limit', 'format']:
+ fname += "_{}_{}".format(key, val)
+
+ if self.suffix:
+ fname += "_{}".format(self.suffix)
+
+ return fname
+
+ def get_populated_template(self, template, reqres, data=None, **kwargs):
+ if data is not None:
+ try:
+ data = simplejson.dumps(simplejson.loads(data), sort_keys=True, indent=4)
+ except:
+ data = ""
+ return get_template(template).format(
+ reqres=reqres,
+ content_type=reqres.headers.get('content-type'),
+ data=data,
+ **kwargs
+ )
+
+
+class DumpResponse(object):
+
+ def __init__(self, clients=[], *args, **kwargs):
+ self.clients = clients
+ self.response_dump = ResponseDumper(*args, **kwargs)
+
+ def __call__(self, tmp_name=None):
+ if tmp_name is not None:
+ self.response_dump.tmp_name = tmp_name
+ return self
+
+ def __enter__(self):
+ for client in self.clients:
+ client.attach_response_hook(self.response_dump)
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ for client in self.clients:
+ client.detach_response_hook()
+ self.response_dump.tmp_name = None
+
+ def set_tmp_name(self, val):
+ """
+ Sets a temporary name for the dump. Dropped after the response is returned.
+ """
+ self.response_dump.tmp_name = val