diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ChangeLog 0.35-0ubuntu1/ChangeLog
--- 0.25.2+20140217+git2a90c1a2eb-1/ChangeLog	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ChangeLog	2016-03-10 15:59:22.000000000 +0000
@@ -1,3 +1,126 @@
+2016-03-10 Juan J. Martinez <juan@memset.com>
+
+ * 0.35
+ - Fix in large object support for swiftclient > 2.6.0.
+ - Added swiftclient version to the FTP banner.
+
+2015-08-20 Juan J. Martinez <juan@memset.com>
+
+ * 0.34
+ - Fixed a bug introduced in 0.33 that was preventing 0 byte files to be
+   created. Thanks to Vil for the patch.
+
+2015-07-08 Juan J. Martinez <juan@memset.com>
+
+ * 0.33
+ - Fixed a bug in split file support failing to propagate the
+   "insecure" option for self-signed certificates. Thanks to "triton7"
+   for the patch.
+ - Delayed opening connection to Swift when a file is uploaded to avoid
+   connection timeouts in ceirtain conditions. Thanks to Vil for the patch.
+
+2015-02-11 Juan J. Martinez <juan@memset.com>
+
+ * 0.32
+ - Fixes to hide-part-dir by John Leach to support any segment format.
+
+2014-12-11 Juan J. Martinez <juan@memset.com>
+
+ * 0.31
+ - New permit-foreign-addresses configuration token to expose that
+   functionality from pyftpdlib. This is useful when the control
+   connection is proxified. Check the example configuration file for
+   more information. Thanks to Koert van der Veer for the patch.
+ - swiftclient requirement updated to be >= 2.1.0.
+ - Tokens won't be reused in large files support to avoid broken
+   uploads because of expired tokens.
+ - Fixed a bug in large files support with UTF-8 encoded filenames.
+ - Added error translation in two methods of the file emulation. Accessing
+   an unexistent file was resulting in an unhandled exception instead of
+   sending an FTP error to the client. Thanks to John Leach for the report.
+ - A --config command line option has been added to specify an alternative
+   configuration option that will be used instead of the default one in /etc.
+
+2014-10-06 Juan J. Martinez <juan@memset.com>
+
+ * 0.30
+ - Migrated to swiftclient >= 2.0.0 and requests. This means older
+   versions of the library are not supported, so please upgrade.
+ - Added new configuration token "insecure" to allow connections to
+   auth servers with invalid SSL certs (eg, self-signed certificates).
+
+   This is a potentially disruptive release, please test before upgrading
+   your production systems!
+
+2014-09-19 Juan J. Martinez <juan@memset.com>
+
+ * 0.29
+ - Fixed large file support to support non-ascii encodings.
+
+   Thanks to Édouard Puginier for the report!
+
+2014-08-12 Juan J. Martinez <juan@memset.com>
+
+ * 0.28.1
+ - Fixed a bug in syslog logging.
+
+2014-08-12 Juan J. Martinez <juan@memset.com>
+
+ * 0.28
+ - Better swift connection management avoiding CLOSE_WAIT problems
+   with long time FTP sessions.
+ - Added new configuration token "rackspace-service-network" to support
+   Rackspace's service network as implemented in swiftclient.
+
+2014-05-21 Juan J. Martinez <juan@memset.com>
+
+ * 0.27
+ - Added new configuration token "passive-ports" to expose pyftpdlib's
+   functionality. Check ftpcloudfs.conf.example for details.
+ - Explicitly close connections instead of relying on the garbage
+   collector. Some compatibility code has been added for those using
+   python-swiftclient < 1.9.0.
+
+2014-05-14 Juan J. Martinez <juan@memset.com>
+
+ * 0.26.2
+ - Changed setup.py to force swiftclient 1.9.0 as version 2.x uses
+   Requests and we're currently incompatible.
+
+2014-04-10 Juan J. Martinez <juan@memset.com>
+
+ * 0.26.1
+ - Fixed a bug in token cache code with auth 2.0 (keystone) when used
+   with memcache. The same username in different tenants could get the
+   same auth token.
+
+   This bug was introduced in 0.24.
+
+   Thanks to Igor Belikov for the report and the patch!
+
+ - Auth 2.0 support in tests has been improved. 
+
+2014-03-20 Juan J. Martinez <juan@memset.com>
+
+ * 0.26
+  This release improves large file support:
+  - Large file rename (manifest only, not the parts).
+  - Delete large files (manifest and parts are removed).
+  - Hide ".part" directory in directory listings.
+  - keystone auth support in tests
+
+   Thanks to Sokolov Ilya for his contributions to this release!
+
+2014-02-20 Juan J. Martinez <juan@memset.com>
+
+ * 0.25.3
+  This is a small bug-fix release:
+  - Added "requests" library support with python-swiftclient >= 2.0.2.
+    Thanks to Chmouel Boudjnah for the patch!
+  - Fixed a small issue with directory listings and swift 1.9.1, thanks
+    to Pedro Perez for the patch!
+  - Copyright year bump
+
 2013-12-01 Juan J. Martinez <juan@memset.com>
 
  * 0.25.1, 0.25.2
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/debian/changelog 0.35-0ubuntu1/debian/changelog
--- 0.25.2+20140217+git2a90c1a2eb-1/debian/changelog	2014-02-20 04:14:07.000000000 +0000
+++ 0.35-0ubuntu1/debian/changelog	2017-02-21 15:29:45.000000000 +0000
@@ -1,3 +1,12 @@
+ftp-cloudfs (0.35-0ubuntu1) zesty; urgency=medium
+
+  * d/gbp.conf: Update gbp configuration file.
+  * d/control: Update Vcs-* links and maintainers.
+  * New upstream release. 
+  * d/watch: Point to pypi to fetch tarball.
+
+ -- Chuck Short <zulcss@ubuntu.com>  Tue, 21 Feb 2017 10:25:42 -0500
+
 ftp-cloudfs (0.25.2+20140217+git2a90c1a2eb-1) unstable; urgency=medium
 
   * Releasing a few fixes, including one for the latest version of swiftclient
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/debian/control 0.35-0ubuntu1/debian/control
--- 0.25.2+20140217+git2a90c1a2eb-1/debian/control	2014-02-20 04:14:07.000000000 +0000
+++ 0.35-0ubuntu1/debian/control	2017-02-21 15:29:45.000000000 +0000
@@ -1,7 +1,8 @@
 Source: ftp-cloudfs
 Section: net
 Priority: extra
-Maintainer: PKG OpenStack <openstack-devel@lists.alioth.debian.org>
+Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
+XSBC-Original-Maintainer: PKG OpenStack <openstack-devel@lists.alioth.debian.org>
 Uploaders: Loic Dachary (OuoU) <loic@debian.org>,
            Julien Danjou <acid@debian.org>,
            Thomas Goirand <zigo@debian.org>,
@@ -12,8 +13,8 @@ Build-Depends: debhelper (>= 9),
                python-all (>= 2.6.6-3~),
                python-setuptools
 Standards-Version: 3.9.5
-Vcs-Browser: http://anonscm.debian.org/gitweb/?p=openstack/ftp-cloudfs.git
-Vcs-Git: git://anonscm.debian.org/openstack/ftp-cloudfs.git
+Vcs-Browser: https://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/ftp-cloudfs
+Vcs-Git: git://git.launchpad.net/~ubuntu-server-dev/ubuntu/+source/ftp-cloudfs
 Homepage: https://github.com/cloudfs/ftp-cloudfs
 
 Package: ftp-cloudfs
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/debian/gbp.conf 0.35-0ubuntu1/debian/gbp.conf
--- 0.25.2+20140217+git2a90c1a2eb-1/debian/gbp.conf	2014-02-20 04:14:07.000000000 +0000
+++ 0.35-0ubuntu1/debian/gbp.conf	2017-02-21 15:29:45.000000000 +0000
@@ -1,8 +1,7 @@
 [DEFAULT]
-upstream-branch = master
-debian-branch = debian/unstable
+debian-branch = master
 upstream-tag = %(version)s
-compression = xz
+pristine-tar = True
 
-[git-buildpackage]
-export-dir = ../build-area/
+[buildpackage]
+export-dir = ../build-area
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/debian/watch 0.35-0ubuntu1/debian/watch
--- 0.25.2+20140217+git2a90c1a2eb-1/debian/watch	2014-02-20 04:14:07.000000000 +0000
+++ 0.35-0ubuntu1/debian/watch	2017-02-21 15:29:45.000000000 +0000
@@ -1,2 +1,3 @@
 version=3
-https://github.com/cloudfs/ftp-cloudfs/tags .*/(\d[\d\.]+)\.tar\.gz
+opts=uversionmangle=s/(rc|a|b|c)/~$1/ \
+https://pypi.debian.net/ftp-cloudfs/ftp-cloudfs-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz)))
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/chunkobject.py 0.35-0ubuntu1/ftpcloudfs/chunkobject.py
--- 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/chunkobject.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ftpcloudfs/chunkobject.py	2015-08-20 10:00:00.000000000 +0000
@@ -1,6 +1,7 @@
 
 import logging
 from urllib import quote
+from httplib import HTTPException
 from socket import timeout
 from ssl import SSLError
 from swiftclient.client import ClientException, http_connection
@@ -9,46 +10,71 @@ from ftpcloudfs.utils import smart_str
 
 class ChunkObject(object):
 
-    def __init__(self, conn, container, name, content_type=None):
-        # FIXME
-        # self._name_check()
-
-        parsed, self.chunkable_http = http_connection(conn.url)
-
-        logging.debug("ChunkObject: new connection open (%r, %r)" % (parsed, self.chunkable_http))
-
-        path = '%s/%s/%s' % (parsed.path.rstrip('/'),
-                             quote(smart_str(container)),
-                             quote(smart_str(name)),
-                             )
-        headers = { 'X-Auth-Token': conn.token,
-                    'Content-Type': content_type or 'application/octet-stream',
-                    'Transfer-Encoding': 'chunked',
-                    # User-Agent ?
-                    }
+    def __init__(self, conn, container, name, content_type=None, reuse_token = True):
+        self.raw_conn = None
+
+        if reuse_token:
+            self.url = conn.url
+            token = conn.token
+        else:
+            self.url, token = conn.get_auth()
+        self.parsed, self.conn = http_connection(self.url)
+
+        self.path = '%s/%s/%s' % (self.parsed.path.rstrip('/'),
+                                  quote(smart_str(container)),
+                                  quote(smart_str(name)),
+                                  )
+        self.headers = { 'X-Auth-Token': token,
+                         'Content-Type': content_type or 'application/octet-stream',
+                         'Transfer-Encoding': 'chunked',
+                         'Connection': 'close',
+                         # User-Agent ?
+                         }
         if conn.real_ip:
-            headers['X-Forwarded-For'] = conn.real_ip
-        self.chunkable_http.putrequest('PUT', path)
-        for key, value in headers.iteritems():
-            self.chunkable_http.putheader(key, value)
-        self.chunkable_http.endheaders()
-        logging.debug("ChunkedObject: path=%r, headers=%r" % (path, headers))
+            self.headers['X-Forwarded-For'] = conn.real_ip
+
+        logging.debug("ChunkedObject: path=%r, headers=%r" % (self.path, self.headers))
+
+        self.already_sent = 0
+
+    def _open_connection(self):
+        logging.debug("ChunkObject: new connection open (%r, %r)" % (self.parsed, self.conn))
+
+        # we can't use the generator interface offered by requests to do a
+        # chunked transfer encoded PUT, so we do this is to get control over the
+        # "real" http connection and do the HTTP request ourselves
+        self.raw_conn = self.conn.request_session.get_adapter(self.url).get_connection(self.url)._get_conn()
+
+        self.raw_conn.putrequest('PUT', self.path, skip_accept_encoding=True)
+        for key, value in self.headers.iteritems():
+            self.raw_conn.putheader(key, value)
+        self.raw_conn.endheaders()
 
     def send_chunk(self, chunk):
+        if self.raw_conn is None:
+            self._open_connection()
+
         logging.debug("ChunkObject: sending %s bytes" % len(chunk))
         try:
-            self.chunkable_http.send("%X\r\n" % len(chunk))
-            self.chunkable_http.send(chunk)
-            self.chunkable_http.send("\r\n")
-        except (timeout, SSLError), err:
+            self.raw_conn.send("%X\r\n" % len(chunk))
+            self.raw_conn.send(chunk)
+            self.raw_conn.send("\r\n")
+        except (timeout, SSLError, HTTPException), err:
             raise ClientException(err.message)
+        else:
+            self.already_sent += len(chunk)
+            logging.debug("ChunkObject: already sent %s bytes" % self.already_sent)
 
     def finish_chunk(self):
+        if self.raw_conn is None:
+            self._open_connection()
+
         logging.debug("ChunkObject: finish_chunk")
         try:
-            self.chunkable_http.send("0\r\n\r\n")
-            response = self.chunkable_http.getresponse()
-        except (timeout, SSLError), err:
+            self.raw_conn.send("0\r\n\r\n")
+            response = self.raw_conn.getresponse()
+        except (timeout, SSLError, HTTPException), err:
+            self.raw_conn.close()
             raise ClientException(err.message)
 
         try:
@@ -57,6 +83,10 @@ class ChunkObject(object):
             # this is not relevant, keep going
             pass
 
+        # we always close the connection
+        self.raw_conn.close()
+        self.conn.request_session.close()
+
         if response.status // 100 != 2:
             raise ClientException(response.reason,
                                   http_status=response.status,
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/constants.py 0.35-0ubuntu1/ftpcloudfs/constants.py
--- 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/constants.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ftpcloudfs/constants.py	2016-03-10 15:59:32.000000000 +0000
@@ -1,6 +1,6 @@
-version = '0.25.2'
+version = '0.35'
 
-default_banner = "ftp-cloudfs %v using pyftpdlib %f ready."
+default_banner = "ftp-cloudfs %v using pyftpdlib %f (swiftclient %s) ready."
 default_config_file = '/etc/ftpcloudfs.conf'
 default_address = '127.0.0.1'
 default_port = 2021
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/fs.py 0.35-0ubuntu1/ftpcloudfs/fs.py
--- 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/fs.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ftpcloudfs/fs.py	2016-03-10 11:28:08.000000000 +0000
@@ -12,12 +12,13 @@ import time
 import mimetypes
 import stat
 import logging
+from urllib import unquote
 from errno import EPERM, ENOENT, EACCES, EIO, ENOTDIR, ENOTEMPTY
-from swiftclient.client import Connection, ClientException
+from swiftclient.client import Connection, ClientException, quote
 from chunkobject import ChunkObject
 from errors import IOSError
 import posixpath
-from utils import smart_str
+from utils import smart_str, smart_unicode
 from functools import wraps
 import memcache
 import multiprocessing
@@ -35,9 +36,6 @@ __all__ = ['ObjectStorageFS']
 class ProxyConnection(Connection):
     """
     Add X-Forwarded-For header to all requests.
-
-    Optionally if `range_from` is available it will be used to add a Range header
-    starting from it.
     """
 
     # max time to cache auth tokens (seconds), based on swift defaults
@@ -46,38 +44,43 @@ class ProxyConnection(Connection):
     def __init__(self, memcache, *args, **kwargs):
         self.memcache = memcache
         self.real_ip = None
-        self.range_from = None
         self.ignore_auth_cache = False
+        self.tenant_name = None
+        if kwargs.get('auth_version') == "2.0":
+            self.tenant_name = kwargs['tenant_name']
         super(ProxyConnection, self).__init__(*args, **kwargs)
 
     def http_connection(self):
         def request_wrapper(fn):
             @wraps(fn)
-            def request_x_forwarded_for(method, url, body=None, headers=None):
+            def request_header_injection(method, url, data=None, headers=None):
                 if headers is None:
                     headers = {}
                 if self.real_ip:
                     headers['X-Forwarded-For'] = self.real_ip
-                if self.range_from:
-                    headers['Range'] = "bytes=%s-" % self.range_from
-                    # only for one request
-                    self.range_from = None
-
-                if 'body' in fn.func_code.co_varnames:
-                    fn(method, url, body=body, headers=headers)
-                else:  # swiftclient 2.0, ported to Requests
-                    fn(method, url, data=body, headers=headers)
-            return request_x_forwarded_for
+                fn(method, url, data=data, headers=headers)
+            return request_header_injection
 
         parsed, conn = super(ProxyConnection, self).http_connection()
         conn.request = request_wrapper(conn.request)
 
         return parsed, conn
 
+    def close(self):
+        """Our own close that actually closes the connection"""
+        if self.http_conn and type(self.http_conn) is tuple and len(self.http_conn) > 1:
+            conn = self.http_conn[1]
+            if hasattr(conn, "request_session"):
+                conn.request_session.close()
+                self.http_conn = None
+            else:
+                super(ProxyConnection, self).close()
+
     def get_auth(self):
         """Perform the authentication using a token cache if memcache is available"""
         if self.memcache:
-            key = "tk%s" % md5("%s%s%s" % (self.authurl, self.user, self.key)).hexdigest()
+            tenant_name = self.tenant_name or "-"
+            key = "tk%s" % md5("%s%s%s%s" % (self.authurl, self.user, tenant_name, self.key)).hexdigest()
             cache = self.memcache.get(key)
             if not cache or self.ignore_auth_cache:
                 logging.debug("token cache miss, key=%s" % key)
@@ -119,6 +122,19 @@ def translate_objectstorage_error(fn):
             raise IOSError(err, msg)
     return wrapper
 
+def close_when_done(fn):
+    """
+    Decorator to close swift connection when the ftp command is done.
+    """
+    @wraps(fn)
+    def wrapper(obj, *args, **kwargs):
+        try:
+            return fn(obj, *args, **kwargs)
+        finally:
+            if obj.conn:
+                obj.close()
+    return wrapper
+
 def parse_fspath(path):
     """
     Returns a (container, path) tuple.
@@ -139,8 +155,8 @@ class ObjectStorageFD(object):
 
     split_size = 0
 
-    def __init__(self, cffs, container, obj, mode):
-        self.cffs = cffs
+    def __init__(self, connection, container, obj, mode):
+        self.conn = connection
         self.container = container
         self.name = obj
         self.mode = mode
@@ -171,16 +187,11 @@ class ObjectStorageFD(object):
 
     @property
     def part_base_name(self):
-        return u"%s.part" % self.name
+        return "%s.part" % self.name
 
     @property
     def part_name(self):
-        return u"%s/%.6d" % (self.part_base_name, self.part)
-
-    @property
-    def conn(self):
-        """Connection to the storage."""
-        return self.cffs.conn
+        return "%s/%.6d" % (self.part_base_name, self.part)
 
     def _start_copy_task(self):
         """
@@ -192,8 +203,9 @@ class ObjectStorageFD(object):
         """
         def copy_task(conn, container, name, part_name, part_base_name):
             # open a new connection
-            conn = ProxyConnection(None, preauthurl=conn.url, preauthtoken=conn.token)
-            headers = { 'x-copy-from': "/%s/%s" % (container, name) }
+            url, token = conn.get_auth()
+            conn = ProxyConnection(None, preauthurl=url, preauthtoken=token, insecure=conn.insecure)
+            headers = { 'x-copy-from': quote("/%s/%s" % (container, name)) }
             logging.debug("copying first part %r/%r, %r" % (container, part_name, headers))
             try:
                 conn.put_object(container, part_name, headers=headers, contents=None)
@@ -201,7 +213,7 @@ class ObjectStorageFD(object):
                 logging.error("Failed to copy %s: %s" % (name, ex.http_reason))
                 sys.exit(1)
             # setup the manifest
-            headers = { 'x-object-manifest': "%s/%s" % (container, part_base_name) }
+            headers = { 'x-object-manifest': quote("%s/%s" % (container, part_base_name)) }
             logging.debug("creating manifest %r/%r, %r" % (container, name, headers))
             try:
                 conn.put_object(container, name, headers=headers, contents=None)
@@ -209,6 +221,7 @@ class ObjectStorageFD(object):
                 logging.error("Failed to store the manifest %s: %s" % (name, ex.http_reason))
                 sys.exit(1)
             logging.debug("copy task done")
+            conn.close()
         self.pending_copy_task = multiprocessing.Process(target=copy_task,
                                                          args=(self.conn,
                                                                self.container,
@@ -237,7 +250,7 @@ class ObjectStorageFD(object):
                     current_size = len(data)-offs
                 self.part_size += current_size
                 if not self.obj:
-                    self.obj = ChunkObject(self.conn, self.container, self.part_name, content_type=self.content_type)
+                    self.obj = ChunkObject(self.conn, self.container, self.part_name, content_type=self.content_type, reuse_token=False)
                 self.obj.send_chunk(data[offs:offs+current_size])
                 offs += current_size
                 if self.part_size == self.split_size:
@@ -256,17 +269,20 @@ class ObjectStorageFD(object):
     @translate_objectstorage_error
     def close(self):
         """Close the object and finish the data transfer."""
-        if 'r' in self.mode:
-            return
-        if self.pending_copy_task:
-            logging.debug("waiting for a pending copy task...")
-            self.pending_copy_task.join()
-            logging.debug("wait is over")
-            if self.pending_copy_task.exitcode != 0:
-                raise IOSError(EIO, 'Failed to store the file')
-        if self.obj is not None:
-            self.obj.finish_chunk()
+        if 'r' not in self.mode:
+            if self.pending_copy_task:
+                logging.debug("waiting for a pending copy task...")
+                self.pending_copy_task.join()
+                logging.debug("wait is over")
+                if self.pending_copy_task.exitcode != 0:
+                    raise IOSError(EIO, 'Failed to store the file')
+            if self.obj is not None:
+                self.obj.finish_chunk()
+        self.obj = None
+        self.closed = True
+        self.conn.close()
 
+    @translate_objectstorage_error
     def read(self, size=65536):
         """
         Read data from the object.
@@ -276,16 +292,13 @@ class ObjectStorageFD(object):
         NB: It uses the size passed into the first call for all subsequent calls.
         """
         if self.obj is None:
+            headers = { }
             if self.total_size > 0:
-                self.conn.range_from = self.total_size
-                # we need to open a new connection to inject the `Range` header
-                if self.conn.http_conn:
-                    self.conn.http_conn[1].close()
-                    self.conn.http_conn = None
-            _, self.obj = self.conn.get_object(self.container, self.name, resp_chunk_size=size)
+                headers["Range"] = "bytes=%d-" % self.total_size
+            _, self.obj = self.conn.get_object(self.container, self.name, resp_chunk_size=size, headers=headers)
 
         logging.debug("read size=%r, total_size=%r (range_from: %s)" % (size,
-                self.total_size, self.conn.range_from))
+                self.total_size, self.total_size))
 
         try:
             buff = self.obj.next()
@@ -295,6 +308,7 @@ class ObjectStorageFD(object):
         else:
             return buff
 
+    @translate_objectstorage_error
     def seek(self, offset, whence=None):
         """
         Seek in the object.
@@ -378,9 +392,10 @@ class ListDirCache(object):
 
     def key(self, index):
         """Returns a key for a user distributed cache."""
-        logging.debug("cache key for %r" % [self.cffs.authurl, self.cffs.username, index])
+        tenant_name = self.cffs.tenant_name or "-"
+        logging.debug("cache key for %r" % [self.cffs.authurl, self.cffs.username, tenant_name, index])
         if not hasattr(self, "_key_base"):
-            self._key_base = md5("%s%s" % (self.cffs.authurl, self.cffs.username)).hexdigest()
+            self._key_base = md5("%s%s%s" % (self.cffs.authurl, self.cffs.username, tenant_name)).hexdigest()
         return "%s-%s" % (self._key_base, md5(smart_str(index)).hexdigest())
 
     def flush(self, path=None):
@@ -451,17 +466,34 @@ class ListDirCache(object):
             objects.extend(newobjects)
         logging.debug("total number of objects %s:" % len(objects))
 
+        if self.cffs.hide_part_dir:
+            manifests = {}
+
         for obj in objects:
             # {u'bytes': 4820,  u'content_type': '...',  u'hash': u'...',  u'last_modified': u'2008-11-05T00:56:00.406565',  u'name': u'new_object'},
             if 'subdir' in obj:
                 # {u'subdir': 'dirname'}
                 obj['name'] = obj['subdir'].rstrip("/")
+
+                # If a manifest and it's segment directory have the
+                # same name then we have to choose which we want to
+                # show, we can't show both. So we choose to keep the
+                # manifest if hide_part_dir is enabled.
+                #
+                # We can do this here because swift returns objects in
+                # alphabetical order so the manifest will come before
+                # its segments.
+                if self.cffs.hide_part_dir and obj['name'] in manifests:
+                    logging.debug("Not adding subdir %s which would overwrite manifest" % obj['name'])
+                    continue
             elif obj.get('bytes') == 0 and obj.get('hash') and obj.get('content_type') != 'application/directory':
                 # if it's a 0 byte file, has a hash and is not a directory, we make an extra call
                 # to check if it's a manifest file and retrieve the real size / hash
                 manifest_obj = self.conn.head_object(container, obj['name'])
                 logging.debug("possible manifest file: %r" % manifest_obj)
                 if 'x-object-manifest' in manifest_obj:
+                    if self.cffs.hide_part_dir:
+                        manifests[obj['name']] = smart_unicode(unquote(manifest_obj['x-object-manifest']), "utf-8")
                     logging.debug("manifest found: %s" % manifest_obj['x-object-manifest'])
                     obj['hash'] = manifest_obj['etag']
                     obj['bytes'] = int(manifest_obj['content-length'])
@@ -470,6 +502,18 @@ class ListDirCache(object):
             name = posixpath.basename(obj['name']).encode("utf-8")
             cache[name] = self._make_stat(**obj)
 
+        if self.cffs.hide_part_dir:
+            for manifest in manifests:
+                manifest_container, manifest_obj = parse_fspath('/' + manifests[manifest])
+                if manifest_container == container:
+                    for cache_obj in cache.copy():
+                        # hide any manifest segments, but not the manifest itself, if it
+                        # happens to share a prefix with its segments.
+                        if unicode(unquote(cache_obj), "utf-8") != manifest and \
+                           unicode(unquote(os.path.join(path, cache_obj)), "utf-8").startswith(manifest_obj):
+                            logging.debug("hiding manifest %r segment %r" % (manifest, cache_obj))
+                            del cache[cache_obj]
+
     def listdir_root(self, cache):
         """Fills cache with the list of containers"""
         logging.debug("listdir root")
@@ -592,7 +636,7 @@ class ObjectStorageFS(object):
     memcache_hosts = None
 
     @translate_objectstorage_error
-    def __init__(self, username, api_key, authurl, keystone=None):
+    def __init__(self, username, api_key, authurl, keystone=None, hide_part_dir=False, snet=False, insecure=False):
         """
         Create the Object Storage connection.
 
@@ -600,10 +644,16 @@ class ObjectStorageFS(object):
         api_key
         authurl
         keystone - optional for auth 2.0 (keystone)
+        hider_part_dirt - optional, hide multipart .part files
+        snet - optional, use Rackspace's service network
+        insecure - optional, allow using servers without checking their SSL certs
         """
         self.conn = None
         self.authurl = authurl
         self.keystone = keystone
+        self.hide_part_dir = hide_part_dir
+        self.snet = snet
+        self.insecure = insecure
         # A cache to hold the information from the last listdir
         self._listdir_cache = ListDirCache(self)
         self._cwd = '/'
@@ -616,13 +666,12 @@ class ObjectStorageFS(object):
         if not username or not api_key:
             raise ClientException("username/password required", http_status=401)
 
-        kwargs = dict(authurl=self.authurl, auth_version="1.0")
+        kwargs = dict(authurl=self.authurl, auth_version="1.0", snet=self.snet)
+        tenant_name = None
 
         if self.keystone:
             if self.keystone['tenant_separator'] in username:
                 tenant_name, username = username.split(self.keystone['tenant_separator'], 1)
-            else:
-                tenant_name = None
 
             logging.debug("keystone authurl=%r username=%r tenant_name=%r conf=%r" % (self.authurl, username, tenant_name, self.keystone))
 
@@ -637,17 +686,21 @@ class ObjectStorageFS(object):
         self.conn = ProxyConnection(self._listdir_cache.memcache,
                                     user=username,
                                     key=api_key,
+                                    insecure=self.insecure,
                                     **kwargs
                                     )
         # force authentication
         self.conn.url, self.conn.token = self.conn.get_auth()
-        self.conn.http_conn = None
+        self.conn.close()
         # now we are authenticated and we have an username
         self.username = username
+        self.tenant_name = tenant_name
 
     def close(self):
-        """Dummy function which does nothing - no need to close"""
-        pass
+        """Explicitly close the connection, although it may not be required"""
+        logging.debug("called fs.close()")
+        if self.conn:
+            self.conn.close()
 
     def isabs(self, path):
         """Test whether a path is absolute"""
@@ -673,6 +726,7 @@ class ObjectStorageFS(object):
         logging.debug(e)
         raise IOSError(EPERM, 'Operation not permitted: %s' % e)
 
+    @close_when_done
     @translate_objectstorage_error
     def open(self, path, mode):
         """Open path with mode, raise IOError on error"""
@@ -680,7 +734,7 @@ class ObjectStorageFS(object):
         logging.debug("open %r mode %r" % (path, mode))
         self._listdir_cache.flush(posixpath.dirname(path))
         container, obj = parse_fspath(path)
-        return ObjectStorageFD(self, container, obj, mode)
+        return ObjectStorageFD(self.conn, container, obj, mode)
 
     def chdir(self, path):
         """Change current directory, raise OSError on error"""
@@ -711,6 +765,7 @@ class ObjectStorageFS(object):
             raise
         return True
 
+    @close_when_done
     @translate_objectstorage_error
     def mkdir(self, path):
         """
@@ -731,6 +786,7 @@ class ObjectStorageFS(object):
             logging.debug("Making container %r" % (container,))
             self.conn.put_container(container)
 
+    @close_when_done
     @translate_objectstorage_error
     def listdir(self, path):
         """
@@ -743,6 +799,7 @@ class ObjectStorageFS(object):
         list_dir = map(lambda x: unicode(x, 'utf-8'), self._listdir_cache.listdir(path))
         return list_dir
 
+    @close_when_done
     @translate_objectstorage_error
     def listdir_with_stat(self, path):
         """
@@ -754,6 +811,7 @@ class ObjectStorageFS(object):
         logging.debug("listdir_with_stat %r" % path)
         return [(unicode(name, 'utf-8)'), stat) for name, stat in self._listdir_cache.listdir_with_stat(path)]
 
+    @close_when_done
     @translate_objectstorage_error
     def rmdir(self, path):
         """
@@ -782,6 +840,7 @@ class ObjectStorageFS(object):
             logging.debug("Removing container %r" % (container,))
             self.conn.delete_container(container)
 
+    @close_when_done
     @translate_objectstorage_error
     def remove(self, path):
         """
@@ -791,6 +850,7 @@ class ObjectStorageFS(object):
         """
         path = self.abspath(path)
         logging.debug("remove %r" % path)
+        logging.info("remove %r" % path)
         container, name = parse_fspath(path)
 
         if not name:
@@ -799,10 +859,19 @@ class ObjectStorageFS(object):
         if self.isdir(path):
             raise IOSError(EACCES, "Can't remove a directory (use rmdir instead)")
 
+        meta = self.conn.head_object(container, name)
+        if 'x-object-manifest' in meta:
+            self._remove_path_folder_files(u'/' + smart_unicode(unquote(meta['x-object-manifest']), "utf-8"))
         self.conn.delete_object(container, name)
         self._listdir_cache.flush(posixpath.dirname(path))
         return not name
 
+    def _remove_path_folder_files(self, path):
+        logging.info("Removing manifest file's parts from:  %s" % path)
+        files = self.listdir(path)
+        for file in files:
+          self.remove(path + '/' + file)
+
     @translate_objectstorage_error
     def _rename_container(self, src_container_name, dst_container_name):
         """Rename src_container_name into dst_container_name"""
@@ -812,6 +881,7 @@ class ObjectStorageFS(object):
         self.conn.put_container(dst_container_name)
         self._listdir_cache.flush("/")
 
+    @close_when_done
     @translate_objectstorage_error
     def rename(self, src, dst):
         """
@@ -856,12 +926,17 @@ class ObjectStorageFS(object):
         if not self.isdir(posixpath.split(dst)[0]):
             raise IOSError(ENOENT, "Can't copy %r to %r, destination directory doesn't exist" % (src, dst))
 
-        # check src/dst containers
-        self._container_exists(src_container_name)
+        # check dst container
         self._container_exists(dst_container_name)
 
         # Do the rename of the file/dir
-        headers = { 'x-copy-from': "/%s/%s" % (src_container_name, src_path) }
+        meta = self.conn.head_object(src_container_name, src_path)
+        if 'x-object-manifest' in meta:
+            # a manifest file
+            headers = { 'x-object-manifest': quote(meta['x-object-manifest']) }
+        else:
+            # regular file
+            headers = { 'x-copy-from': quote("/%s/%s" % (src_container_name, src_path)) }
         self.conn.put_object(dst_container_name, dst_path, headers=headers, contents=None)
         # Delete src
         self.conn.delete_object(src_container_name, src_path)
@@ -942,6 +1017,7 @@ class ObjectStorageFS(object):
         except EnvironmentError:
             return False
 
+    @close_when_done
     @translate_objectstorage_error
     def stat(self, path):
         """
@@ -993,6 +1069,7 @@ class ObjectStorageFS(object):
         logging.debug(e)
         raise IOSError(EPERM, 'Operation not permitted: %s' % e)
 
+    @close_when_done
     @translate_objectstorage_error
     def md5(self, path):
         """
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/.gitignore 0.35-0ubuntu1/ftpcloudfs/.gitignore
--- 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/.gitignore	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ftpcloudfs/.gitignore	1970-01-01 00:00:00.000000000 +0000
@@ -1,4 +0,0 @@
-*~
-\#*
-.#*
-*.pyc
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/main.py 0.35-0ubuntu1/ftpcloudfs/main.py
--- 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/main.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ftpcloudfs/main.py	2016-03-10 10:53:46.000000000 +0000
@@ -11,6 +11,7 @@ from logging.handlers import SysLogHandl
 
 from optparse import OptionParser
 import pyftpdlib.servers
+import swiftclient
 
 from server import ObjectStorageFtpFS
 from fs import ObjectStorageFD
@@ -60,13 +61,13 @@ class Main(object):
                 # fall back to UDP
                 handler = SysLogHandler(facility=SysLogHandler.LOG_DAEMON)
             finally:
-                prefix = "%s[%s]: " % (__package__, self.pid)
+                prefix = "%s[%%(process)d]: " % __package__
                 formatter = logging.Formatter(prefix + "%(message)s")
                 handler.setFormatter(formatter)
                 logger.addHandler(handler)
                 logger.setLevel(self.options.log_level)
         else:
-            log_format = '%(asctime)-15s - %(levelname)s - %(message)s'
+            log_format = '[%(process)d] %(asctime)-15s - %(levelname)s - %(message)s'
             logging.basicConfig(filename=self.options.log_file,
                                 format=log_format,
                                 level=self.options.log_level)
@@ -77,15 +78,36 @@ class Main(object):
         if self.config.get("ftpcloudfs", "service-net") is not None:
             logging.warning("service-net configuration token has been deprecated and has no effect (see ChangeLog)")
 
-    def parse_configuration(self, config_file=default_config_file):
+    def parse_configuration(self):
         """Parse the configuration file"""
+
+        # look for an alternative configuration file
+        alt_config_file = False
+        parser = OptionParser() # only for error reporting
+        config_file = default_config_file
+        for arg in sys.argv:
+            if arg == '--config':
+                try:
+                    alt_config_file = sys.argv[sys.argv.index(arg) + 1]
+                    config_file = alt_config_file
+                except IndexError:
+                    # the parser will report the error later on
+                    pass
+            elif arg.startswith('--config='):
+                _, alt_config_file = arg.split('=', 1)
+                if not alt_config_file:
+                    parser.error("--config option requires an argument")
+                config_file = alt_config_file
+
         config = RawConfigParser({'banner': default_banner,
                                   'port': default_port,
                                   'bind-address': default_address,
                                   'workers': None,
                                   'memcache': None,
                                   'max-cons-per-ip': '0',
+                                  'permit-foreign-addresses': 'no',
                                   'auth-url': None,
+                                  'insecure': False,
                                   'service-net': None,
                                   'verbose': 'no',
                                   'syslog': 'no',
@@ -94,15 +116,22 @@ class Main(object):
                                   'uid': None,
                                   'gid': None,
                                   'masquerade-firewall': None,
+                                  'passive-ports': None,
                                   'split-large-files': '0',
+                                  'hide-part-dir': 'no',
                                   # keystone auth 2.0 support
                                   'keystone-auth': False,
                                   'keystone-region-name': None,
                                   'keystone-tenant-separator': default_ks_tenant_separator,
                                   'keystone-service-type': default_ks_service_type,
                                   'keystone-endpoint-type': default_ks_endpoint_type,
+                                  'rackspace-service-net' : 'no',
                                  })
-        config.read(default_config_file)
+
+        if not config.read(config_file) and alt_config_file:
+            # the default conf file is optional
+            parser.error("failed to read %s" % config_file)
+
         if not config.has_section('ftpcloudfs'):
             config.add_section('ftpcloudfs')
 
@@ -110,6 +139,7 @@ class Main(object):
 
     def parse_arguments(self):
         """Parse command line options"""
+
         parser = OptionParser(version="%prog " + version)
         parser.add_option('-p', '--port',
                           type="int",
@@ -131,6 +161,12 @@ class Main(object):
                           default=self.config.get('ftpcloudfs', 'auth-url'),
                           help="Authentication URL (required)")
 
+        parser.add_option('--insecure',
+                          action="store_true",
+                          dest="insecure",
+                          default=self.config.get('ftpcloudfs', 'insecure'),
+                          help="Allow to access servers without checking SSL certs")
+
         memcache = self.config.get('ftpcloudfs', 'memcache')
         if memcache:
             memcache = [x.strip() for x in memcache.split(',')]
@@ -215,6 +251,12 @@ class Main(object):
                           default=self.config.get('ftpcloudfs', 'keystone-endpoint-type'),
                           help="Endpoint type to be used in auth 2.0 (default: %s)" % default_ks_endpoint_type)
 
+        parser.add_option('--config',
+                          type="str",
+                          dest="config",
+                          default=default_config_file,
+                          help="Use an alternative configuration file (default: %s)" % default_config_file)
+
         (options, _) = parser.parse_args()
 
         if options.keystone:
@@ -234,11 +276,15 @@ class Main(object):
         """Run the main ftp server loop."""
         banner = self.config.get('ftpcloudfs', 'banner').replace('%v', version)
         banner = banner.replace('%f', pyftpdlib.__ver__)
+        banner = banner.replace('%s', swiftclient.version.version_string)
 
         MyFTPHandler.banner = banner
         ObjectStorageFtpFS.authurl = self.options.authurl
+        ObjectStorageFtpFS.insecure = self.options.insecure
         ObjectStorageFtpFS.keystone = self.options.keystone
         ObjectStorageFtpFS.memcache_hosts = self.options.memcache
+        ObjectStorageFtpFS.hide_part_dir = self.config.getboolean('ftpcloudfs', 'hide-part-dir')
+        ObjectStorageFtpFS.snet = self.config.getboolean('ftpcloudfs', 'rackspace-service-net')
 
         try:
             # store bytes
@@ -253,6 +299,19 @@ class Main(object):
             except socket.gaierror, (_, errmsg):
                 sys.exit('Masquerade address error: %s' % errmsg)
 
+        passive_ports = self.config.get('ftpcloudfs', 'passive-ports')
+        if passive_ports:
+            try:
+                passive_ports = [p.strip() for p in passive_ports.split(":", 2)]
+                if len(passive_ports) != 2 or passive_ports[0] >= passive_ports[1]:
+                    raise ValueError()
+                passive_ports = map(int, passive_ports)
+                MyFTPHandler.passive_ports = range(passive_ports[0], passive_ports[1]+1)
+            except (ValueError, TypeError):
+                sys.exit('Passive ports error: int:int expected')
+
+        MyFTPHandler.permit_foreign_addresses = self.config.getboolean('ftpcloudfs', 'permit-foreign-addresses')
+
         try:
             max_cons_per_ip = int(self.config.get('ftpcloudfs', 'max-cons-per-ip'))
         except ValueError, errmsg:
@@ -326,4 +385,3 @@ class Main(object):
 
             self.setup_log()
             ftpd.serve_forever()
-
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/server.py 0.35-0ubuntu1/ftpcloudfs/server.py
--- 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/server.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ftpcloudfs/server.py	2014-11-06 15:08:00.000000000 +0000
@@ -14,14 +14,20 @@ class ObjectStorageFtpFS(ObjectStorageFS
     """Object Storage File system emulation for a FTP server."""
     servicenet = False
     authurl = None
+    insecure = False
     keystone = None
+    hide_part_dir = None
+    snet = False
 
-    def __init__(self, username, api_key, authurl=None, keystone=None):
+    def __init__(self, username, api_key, authurl=None, keystone=None, hide_part_dir=None):
         ObjectStorageFS.__init__(self,
                                  username,
                                  api_key,
                                  authurl=authurl or self.authurl,
                                  keystone=keystone or self.keystone,
+                                 hide_part_dir=hide_part_dir or self.hide_part_dir,
+                                 snet = self.snet,
+                                 insecure = self.insecure,
                                  )
 
     def init_abstracted_fs(self, root, cmd_channel):
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/utils.py 0.35-0ubuntu1/ftpcloudfs/utils.py
--- 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs/utils.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ftpcloudfs/utils.py	2016-03-10 11:29:15.000000000 +0000
@@ -28,6 +28,13 @@ class PidFile(object):
             self.pidfile.close()
             os.remove(self.path)
 
+# compatibility later for swifclient < 2.7.0
+def smart_unicode(s, encoding='utf-8'):
+    if isinstance(s, unicode):
+        return s
+    else:
+        return unicode(s, encoding)
+
 #from django.utils
 def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
     if strings_only and isinstance(s, (types.NoneType, int)):
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs.conf.example 0.35-0ubuntu1/ftpcloudfs.conf.example
--- 0.25.2+20140217+git2a90c1a2eb-1/ftpcloudfs.conf.example	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/ftpcloudfs.conf.example	2016-03-10 10:51:14.000000000 +0000
@@ -1,10 +1,13 @@
 # ftpcloudfs example configuration file
 #
+# Defaults are shown in the comments.
+# Configuration tokens don't require quotes.
+#
 
 [ftpcloudfs]
 
-# FTP banner (%v version, %f ftp handler version)
-# banner = ftp-cloudfs %v using pyftpdlib %f ready.
+# FTP banner (%v version, %f ftp handler version, %s swift client version)
+# banner = ftp-cloudfs %v using pyftpdlib %f (switfclient %s) ready.
 
 # Port to bind.
 # port = 2021
@@ -15,6 +18,9 @@
 # Authentication URL (required)
 # auth-url = (empty)
 
+# Allow to access servers without checking SSL certs
+# insecure = no
+
 # DEPRECATED: Number of workers to use (no effect)
 # workers = (empty)
 
@@ -26,10 +32,18 @@
 # default is 0 (no limit)
 # max-cons-per-ip = 0
 
+# Allow data connection from a different IP than the control connection.
+# Useful in situations where the control connection is proxied. Enables 
+# site-to-site transfers, but also introduces a security risk.
+# permit-foreign-addresses = no
+
 # Large file support.
 # Specify a size in MB to split large files.
 # split-large-files = (empty)
 
+# Hide .part directory from large files
+# hide-part-dir = no
+
 # Be verbose on logging.
 # verbose = no
 
@@ -51,6 +65,11 @@
 # Masquerade IP address in case your server is behind a firewall or NATed.
 # masquerade-firewall = (empty)
 
+# Passive ports to be used for data transfers. Expected to be a port range
+# (endpoints included) in integer:integer format (eg. 60000:65535).
+# By default the operating system will assign a port.
+# passive-ports = (empty)
+
 # Auth 2.0 (Keystone), requires keystoneclient
 # keystone-auth = no
 
@@ -58,12 +77,15 @@
 # keystone-region-name = (empty)
 
 # Tenant separator to be used with Auth 2.0 (eg. TENANT.USERNAME)
-# keystone-tenant-separator = '.'
+# keystone-tenant-separator = .
 
 # Service type to be used with Auth 2.0.
-# keystone-service-type = 'object-store'
+# keystone-service-type = object-store
 
 # Endpoint type to be used with Auth 2.0.
-# keystone-endpoint-type = 'publicURL'
+# keystone-endpoint-type = publicURL
+
+# Use Rackspace's ServiceNet internal network.
+# rackspace-service-net = no
 
 # EOF
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/dependency_links.txt 0.35-0ubuntu1/ftp_cloudfs.egg-info/dependency_links.txt
--- 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/dependency_links.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/ftp_cloudfs.egg-info/dependency_links.txt	2016-03-11 08:47:00.000000000 +0000
@@ -0,0 +1 @@
+
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/not-zip-safe 0.35-0ubuntu1/ftp_cloudfs.egg-info/not-zip-safe
--- 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/not-zip-safe	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/ftp_cloudfs.egg-info/not-zip-safe	2013-09-30 13:39:17.000000000 +0000
@@ -0,0 +1 @@
+
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/PKG-INFO 0.35-0ubuntu1/ftp_cloudfs.egg-info/PKG-INFO
--- 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/PKG-INFO	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/ftp_cloudfs.egg-info/PKG-INFO	2016-03-11 08:47:00.000000000 +0000
@@ -0,0 +1,247 @@
+Metadata-Version: 1.1
+Name: ftp-cloudfs
+Version: 0.35
+Summary: FTP interface to OpenStack Object Storage (Swift)
+Home-page: https://pypi.python.org/pypi/ftp-cloudfs/
+Author: Chmouel Boudjnah
+Author-email: chmouel@chmouel.com
+License: MIT
+Download-URL: http://pypi.python.org/packages/source/f/ftp-cloudfs/ftp-cloudfs-0.35.tar.gz
+Description: =================================================
+        FTP Interface to OpenStack Object Storage (Swift)
+        =================================================
+        
+        :Homepage:  https://pypi.python.org/pypi/ftp-cloudfs/
+        :Credits:   Copyright 2009--2016 Chmouel Boudjnah <chmouel@chmouel.com>
+        :Licence:   MIT
+        
+        
+        DESCRIPTION
+        ===========
+        
+        ftp-cloudfs is a ftp server acting as a proxy to `OpenStack Object Storage (swift)`_.
+        It allow you to connect via any FTP client to do upload/download or create containers.
+        
+        By default the server will bind to port 2021 which allow to be run as a non
+        root/administrator user.
+        
+        .. _OpenStack Object Storage (Swift): http://launchpad.net/swift
+        
+        It supports pseudo-hierarchical folders/directories as described in the `OpenStack Object Storage API`_.
+        
+        .. _OpenStack Object Storage API: http://docs.openstack.org/openstack-object-storage/developer/content/
+        
+        
+        REQUIREMENTS
+        ============
+        
+        - Python 2 >= 2.6
+        - python-swiftclient >= 2.1.0 - https://github.com/openstack/python-swiftclient/
+        - pyftpdlib >= 1.3.0 - http://code.google.com/p/pyftpdlib/
+        - python-daemon >= 1.5.5 - http://pypi.python.org/pypi/python-daemon/
+        - python-memcache >= 1.45 - http://www.tummy.com/Community/software/python-memcached/
+        
+        IMPORTANT: pyftpdlib 1.2.0 has a couple of known issues (memory leak, file descriptor leak) and it shouldn't
+        be used in production systems.
+        
+        python-swiftclient 2.x uses Requests and it is currently incompatible with ftp-cloudfs < 0.30.
+        
+        
+        Operating Systems
+        =================
+        
+        ftp-cloudfs is developed and tested in Ubuntu and Debian Linux distributions but it should work on any
+        Unix-like (including Mac OS X) as long as you install the requirements listed above.
+        
+        
+        INSTALL
+        =======
+        
+        Use standard setup.py directives ie.::
+        
+          python setup.py install
+        
+        Or if you have `pip`_ installed you can just run::
+        
+          pip install ftp-cloudfs
+        
+        which will install ftp-cloudfs with all the required dependencies.
+        
+        We also provide a `requirements.txt` file in case you want to install all the dependencies using `pip`
+        without installing ftp-cloudfs::
+        
+          pip install -r requirements.txt
+        
+        ftp-cloudfs has been `included in Debian Jessie`_.
+        
+        .. _`pip`: https://pip.pypa.io/
+        .. _included in Debian Jessie: http://packages.debian.org/jessie/ftp-cloudfs
+        
+        
+        USAGE
+        ======
+        
+        The install should have created a /usr/bin/ftpcloudfs (or whatever
+        prefix defined in your python distribution or command line arguments)
+        which can be used like this:
+        
+        Usage: ftpcloudfs [options]
+        
+        Options:
+          --version             show program's version number and exit
+          -h, --help            show this help message and exit
+          -p PORT, --port=PORT  Port to bind the server (default: 2021)
+          -b BIND_ADDRESS, --bind-address=BIND_ADDRESS
+                                Address to bind (default: 127.0.0.1)
+          -a AUTHURL, --auth-url=AUTHURL
+                                Authentication URL (required)
+          --insecure            Allow to access servers without checking SSL certs
+          --memcache=MEMCACHE   Memcache server(s) to be used for cache (ip:port)
+          -v, --verbose         Be verbose on logging
+          -f, --foreground      Do not attempt to daemonize but run in foreground
+          -l LOG_FILE, --log-file=LOG_FILE
+                                Log File: Default stdout when in foreground
+          --syslog              Enable logging to the system logger (daemon facility)
+          --pid-file=PID_FILE   Pid file location when in daemon mode
+          --uid=UID             UID to drop the privilige to when in daemon mode
+          --gid=GID             GID to drop the privilige to when in daemon mode
+          --keystone-auth       Use auth 2.0 (Keystone, requires keystoneclient)
+          --keystone-region-name=REGION_NAME
+                                Region name to be used in auth 2.0
+          --keystone-tenant-separator=TENANT_SEPARATOR
+                                Character used to separate tenant_name/username in
+                                auth 2.0 (default: TENANT.USERNAME)
+          --keystone-service-type=SERVICE_TYPE
+                                Service type to be used in auth 2.0 (default: object-
+                                store)
+          --keystone-endpoint-type=ENDPOINT_TYPE
+                                Endpoint type to be used in auth 2.0 (default:
+                                publicURL)
+        
+        The defaults can be changed using a configuration file (by default in
+        /etc/ftpcloudfs.conf). Check the example file included in the package.
+        
+        
+        CACHE MANAGEMENT
+        ================
+        
+        `OpenStack Object Storage (Swift)`_ is an object storage and not a real file system.
+        This proxy simulates enough file system functionality to be used over FTP, but it
+        has a performance impact.
+        
+        To improve the performance a cache is used. It can be local or external (with
+        Memcache). By default a local cache is used, unless one or more Memcache servers
+        are configured.
+        
+        If you're using just one client the local cache may be fine, but if you're using
+        several connections, configuring an external cache is highly recommended.
+        
+        If an external cache is available it will be used to cache authentication tokens too
+        so any Memcache server must be secured to prevent unauthorized access as it could be
+        possible to associate a token with a specific user (not trivial) or even use the
+        cache key (MD5 hash) to brute-force the user password.
+        
+        
+        AUTH 2.0
+        ========
+        
+        By default ftp-cloudfs will use Swift auth 1.0, that is compatible with `OpenStack Object Storage`
+        using `swauth`_ auth middleware and Swift implementations such as `Rackspace Cloud Files` or
+        `Memset's Memstore Cloud Storage`.
+        
+        Optionally `OpenStack Identity Service 2.0`_ can be used. Currently python-keystoneclient (0.3.2+
+        recommended) is required to use auth 2.0 and it can be enabled with ``keystone-auth`` option.
+        
+        You can provide a tenant name in the FTP login user with TENANT.USERNAME (using a dot as
+        separator). Please check the example configuration file for further details.
+        
+        .. _swauth: https://github.com/gholt/swauth
+        .. _OpenStack Identity Service 2.0: http://docs.openstack.org/api/openstack-identity-service/2.0/content/index.html
+        .. _RackSpace Cloud Files: http://www.rackspace.com/cloud/cloud_hosting_products/files/
+        .. _Memset's Memstore Cloud Storage: https://www.memset.com/cloud/storage/
+        
+        
+        LARGE FILE SUPPORT
+        ==================
+        
+        The object storage has a limit on the size of a single uploaded object (by default this is 5GB).
+        Files larger than that can be split in parts and merged back on the fly using a manifest file.
+        
+        ftp-cloudfs supports this transparently with the *split-large-files* configuration token, setting
+        it to the number of megabytes wanted to use for each part (disabled by default).
+        
+        When a *FILE* is larger than the specified amount of MB, a *FILE.part* directory will be created and
+        *n* parts will be created splitting the file automatically. The original file name will be used to
+        store the manifest. If the original file is downloaded, the parts will be served as it was a single file.
+        
+        The *FILE.part* directory can be removed from directory listings using the *hide-part-dir* configuration
+        token. Please be aware that the directory will still be visible when accessing the storage using
+        swift API.
+        
+        
+        SUPPORT
+        =======
+        
+        The project website is at:
+        
+        https://github.com/cloudfs/ftp-cloudfs/issues
+        
+        There you can file bug reports, ask for help or contribute patches. There's additional information at:
+        
+        https://github.com/cloudfs/ftp-cloudfs/wiki
+        
+        LICENSE
+        =======
+        
+        Unless otherwise noted, all files are released under the `MIT`_ license,
+        exceptions contain licensing information in them.
+        
+        .. _`MIT`: http://en.wikipedia.org/wiki/MIT_License
+        
+          Copyright (C) 2009-2016 Chmouel Boudjnah <chmouel@chmouel.com>
+        
+          Permission is hereby granted, free of charge, to any person obtaining a copy
+          of this software and associated documentation files (the "Software"), to deal
+          in the Software without restriction, including without limitation the rights
+          to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+          copies of the Software, and to permit persons to whom the Software is
+          furnished to do so, subject to the following conditions:
+        
+          The above copyright notice and this permission notice shall be included in
+          all copies or substantial portions of the Software.
+        
+          THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+          IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+          FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+          AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+          LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+          OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+          THE SOFTWARE.
+        
+        
+        Authors
+        =======
+        
+        - Chmouel Boudjnah <chmouel@chmouel.com>
+        - Nick Craig-Wood <nick@craig-wood.com>
+        - Juan J. Martinez <jjm@usebox.net>
+        
+        
+        Contributors
+        ============
+        
+        - Christophe Le Guern <c35sys@gmail.com>
+        - Konstantin vz'One Enchant <sirkonst@gmail.com>
+        - Maxim Mitroshin <mitroshin@selectel.org>
+        - Sokolov Ilya <falconmain@gmail.com>
+        - John Leach <john@johnleach.co.uk>
+        - Vil Surkin <vills@webzilla.com>
+        
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: Console
+Classifier: Programming Language :: Python
+Classifier: Operating System :: OS Independent
+Classifier: Environment :: No Input/Output (Daemon)
+Classifier: License :: OSI Approved :: MIT License
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/requires.txt 0.35-0ubuntu1/ftp_cloudfs.egg-info/requires.txt
--- 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/requires.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/ftp_cloudfs.egg-info/requires.txt	2016-03-11 08:47:00.000000000 +0000
@@ -0,0 +1,4 @@
+pyftpdlib>=1.3.0
+python-swiftclient>=2.1.0
+python-daemon>=1.5.5
+python-memcached
\ No newline at end of file
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/SOURCES.txt 0.35-0ubuntu1/ftp_cloudfs.egg-info/SOURCES.txt
--- 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/SOURCES.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/ftp_cloudfs.egg-info/SOURCES.txt	2016-03-11 08:47:01.000000000 +0000
@@ -0,0 +1,21 @@
+ChangeLog
+MANIFEST.in
+README.rst
+ftpcloudfs.conf.example
+setup.py
+bin/ftpcloudfs
+ftp_cloudfs.egg-info/PKG-INFO
+ftp_cloudfs.egg-info/SOURCES.txt
+ftp_cloudfs.egg-info/dependency_links.txt
+ftp_cloudfs.egg-info/not-zip-safe
+ftp_cloudfs.egg-info/requires.txt
+ftp_cloudfs.egg-info/top_level.txt
+ftpcloudfs/__init__.py
+ftpcloudfs/chunkobject.py
+ftpcloudfs/constants.py
+ftpcloudfs/errors.py
+ftpcloudfs/fs.py
+ftpcloudfs/main.py
+ftpcloudfs/monkeypatching.py
+ftpcloudfs/server.py
+ftpcloudfs/utils.py
\ No newline at end of file
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/top_level.txt 0.35-0ubuntu1/ftp_cloudfs.egg-info/top_level.txt
--- 0.25.2+20140217+git2a90c1a2eb-1/ftp_cloudfs.egg-info/top_level.txt	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/ftp_cloudfs.egg-info/top_level.txt	2016-03-11 08:47:00.000000000 +0000
@@ -0,0 +1 @@
+ftpcloudfs
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/MANIFEST.in 0.35-0ubuntu1/MANIFEST.in
--- 0.25.2+20140217+git2a90c1a2eb-1/MANIFEST.in	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/MANIFEST.in	2014-04-10 14:41:09.000000000 +0000
@@ -0,0 +1,2 @@
+include README.rst ftpcloudfs.conf.example ChangeLog
+
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/PKG-INFO 0.35-0ubuntu1/PKG-INFO
--- 0.25.2+20140217+git2a90c1a2eb-1/PKG-INFO	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/PKG-INFO	2016-03-11 08:47:01.000000000 +0000
@@ -0,0 +1,247 @@
+Metadata-Version: 1.1
+Name: ftp-cloudfs
+Version: 0.35
+Summary: FTP interface to OpenStack Object Storage (Swift)
+Home-page: https://pypi.python.org/pypi/ftp-cloudfs/
+Author: Chmouel Boudjnah
+Author-email: chmouel@chmouel.com
+License: MIT
+Download-URL: http://pypi.python.org/packages/source/f/ftp-cloudfs/ftp-cloudfs-0.35.tar.gz
+Description: =================================================
+        FTP Interface to OpenStack Object Storage (Swift)
+        =================================================
+        
+        :Homepage:  https://pypi.python.org/pypi/ftp-cloudfs/
+        :Credits:   Copyright 2009--2016 Chmouel Boudjnah <chmouel@chmouel.com>
+        :Licence:   MIT
+        
+        
+        DESCRIPTION
+        ===========
+        
+        ftp-cloudfs is a ftp server acting as a proxy to `OpenStack Object Storage (swift)`_.
+        It allow you to connect via any FTP client to do upload/download or create containers.
+        
+        By default the server will bind to port 2021 which allow to be run as a non
+        root/administrator user.
+        
+        .. _OpenStack Object Storage (Swift): http://launchpad.net/swift
+        
+        It supports pseudo-hierarchical folders/directories as described in the `OpenStack Object Storage API`_.
+        
+        .. _OpenStack Object Storage API: http://docs.openstack.org/openstack-object-storage/developer/content/
+        
+        
+        REQUIREMENTS
+        ============
+        
+        - Python 2 >= 2.6
+        - python-swiftclient >= 2.1.0 - https://github.com/openstack/python-swiftclient/
+        - pyftpdlib >= 1.3.0 - http://code.google.com/p/pyftpdlib/
+        - python-daemon >= 1.5.5 - http://pypi.python.org/pypi/python-daemon/
+        - python-memcache >= 1.45 - http://www.tummy.com/Community/software/python-memcached/
+        
+        IMPORTANT: pyftpdlib 1.2.0 has a couple of known issues (memory leak, file descriptor leak) and it shouldn't
+        be used in production systems.
+        
+        python-swiftclient 2.x uses Requests and it is currently incompatible with ftp-cloudfs < 0.30.
+        
+        
+        Operating Systems
+        =================
+        
+        ftp-cloudfs is developed and tested in Ubuntu and Debian Linux distributions but it should work on any
+        Unix-like (including Mac OS X) as long as you install the requirements listed above.
+        
+        
+        INSTALL
+        =======
+        
+        Use standard setup.py directives ie.::
+        
+          python setup.py install
+        
+        Or if you have `pip`_ installed you can just run::
+        
+          pip install ftp-cloudfs
+        
+        which will install ftp-cloudfs with all the required dependencies.
+        
+        We also provide a `requirements.txt` file in case you want to install all the dependencies using `pip`
+        without installing ftp-cloudfs::
+        
+          pip install -r requirements.txt
+        
+        ftp-cloudfs has been `included in Debian Jessie`_.
+        
+        .. _`pip`: https://pip.pypa.io/
+        .. _included in Debian Jessie: http://packages.debian.org/jessie/ftp-cloudfs
+        
+        
+        USAGE
+        ======
+        
+        The install should have created a /usr/bin/ftpcloudfs (or whatever
+        prefix defined in your python distribution or command line arguments)
+        which can be used like this:
+        
+        Usage: ftpcloudfs [options]
+        
+        Options:
+          --version             show program's version number and exit
+          -h, --help            show this help message and exit
+          -p PORT, --port=PORT  Port to bind the server (default: 2021)
+          -b BIND_ADDRESS, --bind-address=BIND_ADDRESS
+                                Address to bind (default: 127.0.0.1)
+          -a AUTHURL, --auth-url=AUTHURL
+                                Authentication URL (required)
+          --insecure            Allow to access servers without checking SSL certs
+          --memcache=MEMCACHE   Memcache server(s) to be used for cache (ip:port)
+          -v, --verbose         Be verbose on logging
+          -f, --foreground      Do not attempt to daemonize but run in foreground
+          -l LOG_FILE, --log-file=LOG_FILE
+                                Log File: Default stdout when in foreground
+          --syslog              Enable logging to the system logger (daemon facility)
+          --pid-file=PID_FILE   Pid file location when in daemon mode
+          --uid=UID             UID to drop the privilige to when in daemon mode
+          --gid=GID             GID to drop the privilige to when in daemon mode
+          --keystone-auth       Use auth 2.0 (Keystone, requires keystoneclient)
+          --keystone-region-name=REGION_NAME
+                                Region name to be used in auth 2.0
+          --keystone-tenant-separator=TENANT_SEPARATOR
+                                Character used to separate tenant_name/username in
+                                auth 2.0 (default: TENANT.USERNAME)
+          --keystone-service-type=SERVICE_TYPE
+                                Service type to be used in auth 2.0 (default: object-
+                                store)
+          --keystone-endpoint-type=ENDPOINT_TYPE
+                                Endpoint type to be used in auth 2.0 (default:
+                                publicURL)
+        
+        The defaults can be changed using a configuration file (by default in
+        /etc/ftpcloudfs.conf). Check the example file included in the package.
+        
+        
+        CACHE MANAGEMENT
+        ================
+        
+        `OpenStack Object Storage (Swift)`_ is an object storage and not a real file system.
+        This proxy simulates enough file system functionality to be used over FTP, but it
+        has a performance impact.
+        
+        To improve the performance a cache is used. It can be local or external (with
+        Memcache). By default a local cache is used, unless one or more Memcache servers
+        are configured.
+        
+        If you're using just one client the local cache may be fine, but if you're using
+        several connections, configuring an external cache is highly recommended.
+        
+        If an external cache is available it will be used to cache authentication tokens too
+        so any Memcache server must be secured to prevent unauthorized access as it could be
+        possible to associate a token with a specific user (not trivial) or even use the
+        cache key (MD5 hash) to brute-force the user password.
+        
+        
+        AUTH 2.0
+        ========
+        
+        By default ftp-cloudfs will use Swift auth 1.0, that is compatible with `OpenStack Object Storage`
+        using `swauth`_ auth middleware and Swift implementations such as `Rackspace Cloud Files` or
+        `Memset's Memstore Cloud Storage`.
+        
+        Optionally `OpenStack Identity Service 2.0`_ can be used. Currently python-keystoneclient (0.3.2+
+        recommended) is required to use auth 2.0 and it can be enabled with ``keystone-auth`` option.
+        
+        You can provide a tenant name in the FTP login user with TENANT.USERNAME (using a dot as
+        separator). Please check the example configuration file for further details.
+        
+        .. _swauth: https://github.com/gholt/swauth
+        .. _OpenStack Identity Service 2.0: http://docs.openstack.org/api/openstack-identity-service/2.0/content/index.html
+        .. _RackSpace Cloud Files: http://www.rackspace.com/cloud/cloud_hosting_products/files/
+        .. _Memset's Memstore Cloud Storage: https://www.memset.com/cloud/storage/
+        
+        
+        LARGE FILE SUPPORT
+        ==================
+        
+        The object storage has a limit on the size of a single uploaded object (by default this is 5GB).
+        Files larger than that can be split in parts and merged back on the fly using a manifest file.
+        
+        ftp-cloudfs supports this transparently with the *split-large-files* configuration token, setting
+        it to the number of megabytes wanted to use for each part (disabled by default).
+        
+        When a *FILE* is larger than the specified amount of MB, a *FILE.part* directory will be created and
+        *n* parts will be created splitting the file automatically. The original file name will be used to
+        store the manifest. If the original file is downloaded, the parts will be served as it was a single file.
+        
+        The *FILE.part* directory can be removed from directory listings using the *hide-part-dir* configuration
+        token. Please be aware that the directory will still be visible when accessing the storage using
+        swift API.
+        
+        
+        SUPPORT
+        =======
+        
+        The project website is at:
+        
+        https://github.com/cloudfs/ftp-cloudfs/issues
+        
+        There you can file bug reports, ask for help or contribute patches. There's additional information at:
+        
+        https://github.com/cloudfs/ftp-cloudfs/wiki
+        
+        LICENSE
+        =======
+        
+        Unless otherwise noted, all files are released under the `MIT`_ license,
+        exceptions contain licensing information in them.
+        
+        .. _`MIT`: http://en.wikipedia.org/wiki/MIT_License
+        
+          Copyright (C) 2009-2016 Chmouel Boudjnah <chmouel@chmouel.com>
+        
+          Permission is hereby granted, free of charge, to any person obtaining a copy
+          of this software and associated documentation files (the "Software"), to deal
+          in the Software without restriction, including without limitation the rights
+          to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+          copies of the Software, and to permit persons to whom the Software is
+          furnished to do so, subject to the following conditions:
+        
+          The above copyright notice and this permission notice shall be included in
+          all copies or substantial portions of the Software.
+        
+          THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+          IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+          FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+          AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+          LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+          OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+          THE SOFTWARE.
+        
+        
+        Authors
+        =======
+        
+        - Chmouel Boudjnah <chmouel@chmouel.com>
+        - Nick Craig-Wood <nick@craig-wood.com>
+        - Juan J. Martinez <jjm@usebox.net>
+        
+        
+        Contributors
+        ============
+        
+        - Christophe Le Guern <c35sys@gmail.com>
+        - Konstantin vz'One Enchant <sirkonst@gmail.com>
+        - Maxim Mitroshin <mitroshin@selectel.org>
+        - Sokolov Ilya <falconmain@gmail.com>
+        - John Leach <john@johnleach.co.uk>
+        - Vil Surkin <vills@webzilla.com>
+        
+        
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: Console
+Classifier: Programming Language :: Python
+Classifier: Operating System :: OS Independent
+Classifier: Environment :: No Input/Output (Daemon)
+Classifier: License :: OSI Approved :: MIT License
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/README.rst 0.35-0ubuntu1/README.rst
--- 0.25.2+20140217+git2a90c1a2eb-1/README.rst	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/README.rst	2016-03-11 08:45:48.000000000 +0000
@@ -3,7 +3,7 @@ FTP Interface to OpenStack Object Storag
 =================================================
 
 :Homepage:  https://pypi.python.org/pypi/ftp-cloudfs/
-:Credits:   Copyright 2009--2013 Chmouel Boudjnah <chmouel@chmouel.com>
+:Credits:   Copyright 2009--2016 Chmouel Boudjnah <chmouel@chmouel.com>
 :Licence:   MIT
 
 
@@ -27,7 +27,7 @@ REQUIREMENTS
 ============
 
 - Python 2 >= 2.6
-- python-swiftclient >= 1.6.0 - https://github.com/openstack/python-swiftclient/
+- python-swiftclient >= 2.1.0 - https://github.com/openstack/python-swiftclient/
 - pyftpdlib >= 1.3.0 - http://code.google.com/p/pyftpdlib/
 - python-daemon >= 1.5.5 - http://pypi.python.org/pypi/python-daemon/
 - python-memcache >= 1.45 - http://www.tummy.com/Community/software/python-memcached/
@@ -35,11 +35,13 @@ REQUIREMENTS
 IMPORTANT: pyftpdlib 1.2.0 has a couple of known issues (memory leak, file descriptor leak) and it shouldn't
 be used in production systems.
 
+python-swiftclient 2.x uses Requests and it is currently incompatible with ftp-cloudfs < 0.30.
+
 
 Operating Systems
 =================
 
-fpt-cloudfs is developed and tested in Ubuntu and Debian Linux distributions but it should work on any
+ftp-cloudfs is developed and tested in Ubuntu and Debian Linux distributions but it should work on any
 Unix-like (including Mac OS X) as long as you install the requirements listed above.
 
 
@@ -54,11 +56,16 @@ Or if you have `pip`_ installed you can
 
   pip install ftp-cloudfs
 
-which will install ftp-cloudfs with all the dependencies needed.
+which will install ftp-cloudfs with all the required dependencies.
+
+We also provide a `requirements.txt` file in case you want to install all the dependencies using `pip`
+without installing ftp-cloudfs::
+
+  pip install -r requirements.txt
 
 ftp-cloudfs has been `included in Debian Jessie`_.
 
-.. _`pip`: http://pip.openplans.org/
+.. _`pip`: https://pip.pypa.io/
 .. _included in Debian Jessie: http://packages.debian.org/jessie/ftp-cloudfs
 
 
@@ -79,6 +86,7 @@ Options:
                         Address to bind (default: 127.0.0.1)
   -a AUTHURL, --auth-url=AUTHURL
                         Authentication URL (required)
+  --insecure            Allow to access servers without checking SSL certs
   --memcache=MEMCACHE   Memcache server(s) to be used for cache (ip:port)
   -v, --verbose         Be verbose on logging
   -f, --foreground      Do not attempt to daemonize but run in foreground
@@ -108,7 +116,7 @@ The defaults can be changed using a conf
 CACHE MANAGEMENT
 ================
 
-`OpenStack Object Storage (Swift)`_ is an object storage and not a real file system. 
+`OpenStack Object Storage (Swift)`_ is an object storage and not a real file system.
 This proxy simulates enough file system functionality to be used over FTP, but it
 has a performance impact.
 
@@ -157,6 +165,10 @@ When a *FILE* is larger than the specifi
 *n* parts will be created splitting the file automatically. The original file name will be used to
 store the manifest. If the original file is downloaded, the parts will be served as it was a single file.
 
+The *FILE.part* directory can be removed from directory listings using the *hide-part-dir* configuration
+token. Please be aware that the directory will still be visible when accessing the storage using
+swift API.
+
 
 SUPPORT
 =======
@@ -177,7 +189,7 @@ exceptions contain licensing information
 
 .. _`MIT`: http://en.wikipedia.org/wiki/MIT_License
 
-  Copyright (C) 2009-2013 Chmouel Boudjnah <chmouel@chmouel.com>
+  Copyright (C) 2009-2016 Chmouel Boudjnah <chmouel@chmouel.com>
 
   Permission is hereby granted, free of charge, to any person obtaining a copy
   of this software and associated documentation files (the "Software"), to deal
@@ -212,4 +224,7 @@ Contributors
 - Christophe Le Guern <c35sys@gmail.com>
 - Konstantin vz'One Enchant <sirkonst@gmail.com>
 - Maxim Mitroshin <mitroshin@selectel.org>
+- Sokolov Ilya <falconmain@gmail.com>
+- John Leach <john@johnleach.co.uk>
+- Vil Surkin <vills@webzilla.com>
 
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/setup.cfg 0.35-0ubuntu1/setup.cfg
--- 0.25.2+20140217+git2a90c1a2eb-1/setup.cfg	1970-01-01 00:00:00.000000000 +0000
+++ 0.35-0ubuntu1/setup.cfg	2016-03-11 08:47:01.000000000 +0000
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build = 
+tag_date = 0
+tag_svn_revision = 0
+
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/setup.py 0.35-0ubuntu1/setup.py
--- 0.25.2+20140217+git2a90c1a2eb-1/setup.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/setup.py	2014-12-09 11:40:14.000000000 +0000
@@ -21,7 +21,7 @@ setup(name='ftp-cloudfs',
       license='MIT',
       include_package_data=True,
       zip_safe=False,
-      install_requires=['pyftpdlib>=1.3.0', 'python-swiftclient>=1.6.0', 'python-daemon>=1.5.5', 'python-memcached'],
+      install_requires=['pyftpdlib>=1.3.0', 'python-swiftclient>=2.1.0', 'python-daemon>=1.5.5', 'python-memcached'],
       scripts=['bin/ftpcloudfs'],
       packages = find_packages(exclude=['tests',]),
       tests_require = ["nose"],
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/tests/README 0.35-0ubuntu1/tests/README
--- 0.25.2+20140217+git2a90c1a2eb-1/tests/README	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/tests/README	1970-01-01 00:00:00.000000000 +0000
@@ -1,10 +0,0 @@
-To run the tests you'll need access to an  Openstack Object Storage server.
-
-Set these environment variables before running the tests
-
-  export OS_API_USER='user'
-  export OS_API_KEY='key'
-  export OS_AUTH_URL='https://url.of.auth.server/v1.0'
-
-Once your test env is correct, all tests must pass!
-
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/tests/test_fs.py 0.35-0ubuntu1/tests/test_fs.py
--- 0.25.2+20140217+git2a90c1a2eb-1/tests/test_fs.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/tests/test_fs.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,638 +0,0 @@
-#!/usr/bin/python
-import unittest
-import os
-import sys
-from datetime import datetime
-from swiftclient import client
-from ftpcloudfs.fs import ObjectStorageFS, ListDirCache
-from ftpcloudfs.errors import IOSError
-
-#import logging
-#logging.getLogger().setLevel(logging.DEBUG)
-#logging.basicConfig(level=logging.DEBUG)
-
-class ObjectStorageFSTest(unittest.TestCase):
-    '''ObjectStorageFS Tests'''
-
-    def setUp(self):
-        if not hasattr(self, 'username'):
-            cls = self.__class__
-            if not all(['OS_API_KEY' in os.environ,
-                        'OS_API_USER' in os.environ,
-                        'OS_AUTH_URL' in os.environ,
-                        ]):
-                print "env OS_API_USER/OS_API_KEY/OS_AUTH_URL not found."
-                sys.exit(1)
-            cls.username = os.environ['OS_API_USER']
-            cls.api_key = os.environ['OS_API_KEY']
-            cls.auth_url = os.environ.get('OS_AUTH_URL')
-            cls.cnx = ObjectStorageFS(self.username, self.api_key, self.auth_url)
-            cls.conn = client.Connection(user=self.username, key=self.api_key, authurl=self.auth_url)
-        self.container = "ftpcloudfs_testing"
-        self.cnx.mkdir("/%s" % self.container)
-        self.cnx.chdir("/%s" % self.container)
-
-    def create_file(self, path, contents):
-        '''Create path with contents'''
-        fd = self.cnx.open(path, "wb")
-        fd.write(contents)
-        fd.close()
-
-    def read_file(self, path):
-        fd = self.cnx.open(path, "rb")
-        contents = ''
-        while True:
-            chunk = fd.read()
-            if not chunk:
-                break
-            contents += chunk
-        fd.close()
-        return contents
-
-    def test_mkdir_chdir_rmdir(self):
-        ''' mkdir/chdir/rmdir directory '''
-        directory = "/foobarrandom"
-        self.cnx.mkdir(directory)
-        self.cnx.chdir(directory)
-        self.assertEqual(self.cnx.getcwd(), directory)
-        self.assertEqual(self.cnx.listdir(directory), [])
-        self.cnx.rmdir(directory)
-
-    def test_mkdir_chdir_mkdir_rmdir_subdir(self):
-        ''' mkdir/chdir/rmdir sub directory '''
-        directory = "/foobarrandom"
-        self.cnx.mkdir(directory)
-        self.cnx.chdir(directory)
-        subdirectory = "potato"
-        subdirpath = directory + "/" + subdirectory
-        self.cnx.mkdir(subdirectory)
-        # Can't delete a directory with stuff in
-        self.assertRaises(EnvironmentError, self.cnx.rmdir, directory)
-        self.cnx.chdir(subdirectory)
-        self.cnx.chdir("..")
-        self.assertEqual(self.cnx.getcwd(), directory)
-        self.cnx.rmdir(subdirectory)
-        self.cnx.chdir("..")
-        self.cnx.rmdir(directory)
-
-    def test_write_open_delete(self):
-        ''' write/open/delete file '''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-        contents = self.read_file("testfile.txt")
-        self.assertEqual(contents, content_string)
-        self.cnx.remove("testfile.txt")
-
-    def test_write_open_delete_subdir(self):
-        ''' write/open/delete file in a subdirectory'''
-        self.cnx.mkdir("potato")
-        self.cnx.chdir("potato")
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-        content = self.read_file("/%s/potato/testfile.txt" % self.container)
-        self.assertEqual(content, content_string)
-        self.cnx.remove("testfile.txt")
-        self.cnx.chdir("..")
-        self.cnx.rmdir("potato")
-
-    def test_write_to_slash(self):
-        ''' write to slash should not be permitted '''
-        self.cnx.chdir("/")
-        content_string = "Hello Moto"
-        self.assertRaises(EnvironmentError, self.create_file, "testfile.txt", content_string)
-
-    def test_chdir_to_a_file(self):
-        ''' chdir to a file '''
-        self.create_file("testfile.txt", "Hello Moto")
-        self.assertRaises(EnvironmentError, self.cnx.chdir, "/%s/testfile.txt" % self.container)
-        self.cnx.remove("testfile.txt")
-
-    def test_chdir_to_slash(self):
-        ''' chdir to slash '''
-        self.cnx.chdir("/")
-
-    def test_chdir_to_nonexistent_container(self):
-        ''' chdir to non existent container'''
-        self.assertRaises(EnvironmentError, self.cnx.chdir, "/i_dont_exist")
-
-    def test_chdir_to_nonexistent_directory(self):
-        ''' chdir to nonexistend directory'''
-        self.assertRaises(EnvironmentError, self.cnx.chdir, "i_dont_exist")
-        self.assertRaises(EnvironmentError, self.cnx.chdir, "/%s/i_dont_exist" % self.container)
-
-    def test_listdir_root(self):
-        ''' list root directory '''
-        self.cnx.chdir("/")
-        dt = abs(datetime.utcfromtimestamp(self.cnx.getmtime("/")) - datetime.utcnow())
-        self.assertTrue(dt.seconds < 60)
-        ls = self.cnx.listdir(".")
-        self.assertTrue(self.container in ls)
-        dt = abs(datetime.utcfromtimestamp(self.cnx.getmtime(self.container)) - datetime.utcnow())
-        self.assertTrue(dt.seconds < 60)
-        self.assertTrue('potato' not in ls)
-        self.cnx.mkdir("potato")
-        ls = self.cnx.listdir(".")
-        self.assertTrue(self.container in ls)
-        self.assertTrue('potato' in ls)
-        self.cnx.rmdir("potato")
-
-    def test_listdir(self):
-        ''' list directory '''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        dt = abs(datetime.utcfromtimestamp(self.cnx.getmtime("testfile.txt")) - datetime.utcnow())
-        self.assertTrue(dt.seconds < 60)
-        self.assertEqual(self.cnx.listdir("."), ["testfile.txt"])
-        self.cnx.remove("testfile.txt")
-
-    def test_listdir_subdir(self):
-        ''' list a sub directory'''
-        content_string = "Hello Moto"
-        self.create_file("1.txt", content_string)
-        self.create_file("2.txt", content_string)
-        self.cnx.mkdir("potato")
-        self.create_file("potato/3.txt", content_string)
-        self.create_file("potato/4.txt", content_string)
-        self.assertEqual(self.cnx.listdir("."), ["1.txt", "2.txt", "potato"])
-        self.cnx.chdir("potato")
-        self.assertEqual(self.cnx.listdir("."), ["3.txt", "4.txt"])
-        self.cnx.remove("3.txt")
-        self.cnx.remove("4.txt")
-        self.assertEqual(self.cnx.listdir("."), [])
-        self.cnx.chdir("..")
-        self.cnx.remove("1.txt")
-        self.cnx.remove("2.txt")
-        self.assertEqual(self.cnx.listdir("."), ["potato"])
-        dt = abs(datetime.utcfromtimestamp(self.cnx.getmtime("potato")) - datetime.utcnow())
-        self.assertTrue(dt.seconds < 60)
-        self.cnx.rmdir("potato")
-        self.assertEqual(self.cnx.listdir("."), [])
-
-    def test_rename_file(self):
-        '''rename a file'''
-        content_string = "Hello Moto" * 100
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-        self.assertRaises(EnvironmentError, self.cnx.getsize, "testfile2.txt")
-        self.cnx.rename("testfile.txt", "testfile2.txt")
-        self.assertEquals(self.cnx.getsize("testfile2.txt"), len(content_string))
-        self.assertRaises(EnvironmentError, self.cnx.getsize, "testfile.txt")
-        self.cnx.remove("testfile2.txt")
-
-    def test_rename_file_into_subdir1(self):
-        '''rename a file into a subdirectory 1'''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.cnx.mkdir("potato")
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-        self.assertRaises(EnvironmentError, self.cnx.getsize, "potato/testfile3.txt")
-        self.cnx.rename("testfile.txt", "potato/testfile3.txt")
-        self.assertEquals(self.cnx.getsize("potato/testfile3.txt"), len(content_string))
-        self.assertRaises(EnvironmentError, self.cnx.getsize, "testfile.txt")
-        self.cnx.remove("potato/testfile3.txt")
-        self.cnx.rmdir("potato")
-
-    def test_rename_file_into_subdir2(self):
-        '''rename a file into a subdirectory without specifying dest leaf'''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.cnx.mkdir("potato")
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-        self.assertRaises(EnvironmentError, self.cnx.getsize, "potato/testfile.txt")
-        self.cnx.rename("testfile.txt", "potato")
-        self.assertEquals(self.cnx.getsize("potato/testfile.txt"), len(content_string))
-        self.assertRaises(EnvironmentError, self.cnx.getsize, "testfile.txt")
-        self.cnx.remove("potato/testfile.txt")
-        self.cnx.rmdir("potato")
-
-    def test_rename_file_into_root(self):
-        '''rename a file into a subdirectory without specifying dest leaf'''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertRaises(EnvironmentError, self.cnx.rename, "testfile.txt", "/testfile.txt")
-        self.cnx.remove("testfile.txt")
-
-    def test_rename_directory_into_file(self):
-        '''rename a directory into a file - shouldn't work'''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertRaises(EnvironmentError, self.cnx.rename, "/%s" % self.container, "testfile.txt")
-        self.cnx.remove("testfile.txt")
-
-    def test_rename_directory_into_directory(self):
-        '''rename a directory into a directory'''
-        self.cnx.mkdir("potato")
-        self.assertEquals(self.cnx.listdir("potato"), [])
-        self.cnx.rename("potato", "potato2")
-        self.assertEquals(self.cnx.listdir("potato2"), [])
-        self.cnx.rmdir("potato2")
-
-    def test_rename_directory_into_existing_directory(self):
-        '''rename a directory into an existing directory'''
-        self.cnx.mkdir("potato")
-        self.cnx.mkdir("potato2")
-        self.assertEquals(self.cnx.listdir("potato"), [])
-        self.assertEquals(self.cnx.listdir("potato2"), [])
-        self.cnx.rename("potato", "potato2")
-        self.assertEquals(self.cnx.listdir("potato2"), ["potato"])
-        self.assertEquals(self.cnx.listdir("potato2/potato"), [])
-        self.cnx.rmdir("potato2/potato")
-        self.cnx.rmdir("potato2")
-
-    def test_rename_directory_into_self(self):
-        '''rename a directory into itself'''
-        self.cnx.mkdir("potato")
-        self.assertEquals(self.cnx.listdir("potato"), [])
-        self.cnx.rename("potato", "/%s" % self.container)
-        self.assertEquals(self.cnx.listdir("potato"), [])
-        self.cnx.rename("potato", "/%s/potato" % self.container)
-        self.assertEquals(self.cnx.listdir("potato"), [])
-        self.cnx.rename("potato", "potato")
-        self.assertEquals(self.cnx.listdir("potato"), [])
-        self.cnx.rename("/%s/potato" % self.container, ".")
-        self.assertEquals(self.cnx.listdir("potato"), [])
-        self.cnx.rmdir("potato")
-
-    def test_rename_full_directory(self):
-        '''rename a directory into a directory'''
-        self.cnx.mkdir("potato")
-        self.create_file("potato/something.txt", "p")
-        try:
-            self.assertEquals(self.cnx.listdir("potato"), ["something.txt"])
-            self.assertRaises(EnvironmentError, self.cnx.rename, "potato", "potato2")
-        finally:
-            self.cnx.remove("potato/something.txt")
-            self.cnx.rmdir("potato")
-
-    def test_rename_container(self):
-        '''rename an empty container'''
-        self.cnx.mkdir("/potato")
-        self.assertEquals(self.cnx.listdir("/potato"), [])
-        self.assertRaises(EnvironmentError, self.cnx.listdir, "/potato2")
-        self.cnx.rename("/potato", "/potato2")
-        self.assertRaises(EnvironmentError, self.cnx.listdir, "/potato")
-        self.assertEquals(self.cnx.listdir("/potato2"), [])
-        self.cnx.rmdir("/potato2")
-
-    def test_rename_full_container(self):
-        '''rename a full container'''
-        self.cnx.mkdir("/potato")
-        self.create_file("/potato/test.txt", "onion")
-        self.assertEquals(self.cnx.listdir("/potato"), ["test.txt"])
-        self.assertRaises(EnvironmentError, self.cnx.rename, "/potato", "/potato2")
-        self.cnx.remove("/potato/test.txt")
-        self.cnx.rmdir("/potato")
-
-    def test_unicode_file(self):
-        '''Test unicode file creation'''
-        # File names use a utf-8 interface
-        file_name = u"Smiley\u263a.txt".encode("utf-8")
-        self.create_file(file_name, "Hello Moto")
-        self.assertEqual(self.cnx.listdir("."), [unicode(file_name, "utf-8")])
-        self.cnx.remove(file_name)
-
-    def test_unicode_directory(self):
-        '''Test unicode directory creation'''
-        # File names use a utf-8 interface
-        dir_name = u"Smiley\u263aDir".encode("utf-8")
-        self.cnx.mkdir(dir_name)
-        self.assertEqual(self.cnx.listdir("."), [unicode(dir_name, "utf-8")])
-        self.cnx.rmdir(dir_name)
-
-    def test_mkdir_container_unicode(self):
-        ''' mkdir/chdir/rmdir directory '''
-        directory = u"/Smiley\u263aContainer".encode("utf-8")
-        self.cnx.mkdir(directory)
-        self.cnx.chdir(directory)
-        self.cnx.rmdir(directory)
-
-    def test_fakedir(self):
-        '''Make some fake directories and test'''
-
-        objs  = [ "test1.txt", "potato/test2.txt", "potato/sausage/test3.txt", "potato/sausage/test4.txt", ]
-        for obj in objs:
-            self.conn.put_object(self.container, obj, content_type="text/plain", contents="Hello Moto")
-
-        self.assertEqual(self.cnx.listdir("."), ["potato", "test1.txt"])
-        self.assertEqual(self.cnx.listdir("potato"), ["sausage","test2.txt"])
-        self.assertEqual(self.cnx.listdir("potato/sausage"), ["test3.txt", "test4.txt"])
-
-        self.cnx.chdir("potato")
-
-        self.assertEqual(self.cnx.listdir("."), ["sausage","test2.txt"])
-        self.assertEqual(self.cnx.listdir("sausage"), ["test3.txt", "test4.txt"])
-
-        self.cnx.chdir("sausage")
-
-        self.assertEqual(self.cnx.listdir("."), ["test3.txt", "test4.txt"])
-
-        self.cnx.chdir("../..")
-
-        for obj in objs:
-            self.conn.delete_object(self.container, obj)
-
-        self.assertEqual(self.cnx.listdir("."), [])
-
-    def test_md5(self):
-        self.conn.put_object(self.container, "test1.txt", content_type="text/plain", contents="Hello Moto")
-        self.assertEquals(self.cnx.md5("test1.txt"),"0d933ae488fd55cc6bdeafffbaabf0c4")
-        self.cnx.remove("test1.txt")
-        self.assertRaises(EnvironmentError, self.cnx.md5, "/")
-        self.assertRaises(EnvironmentError, self.cnx.md5, "/%s" % self.container)
-        self.cnx.mkdir("/%s/sausage" % self.container)
-        self.assertRaises(EnvironmentError, self.cnx.md5, "/%s/sausage" % self.container)
-        self.cnx.rmdir("/%s/sausage" % self.container)
-
-    def test_listdir_manifest(self):
-        ''' list directory including a manifest file '''
-        content_string = "0" * 1024
-        for i in range(1, 5):
-            self.create_file("testfile.part/%d" % i, content_string)
-        self.conn.put_object(self.container, "testfile", contents=None, headers={ "x-object-manifest": '%s/testfile.part' % self.container })
-        self.assertEqual(self.cnx.listdir("."), ["testfile", "testfile.part"])
-        self.assertEqual(self.cnx.getsize("testfile"), 4096)
-        self.cnx.remove("testfile")
-        for i in range(1, 5):
-            self.cnx.remove("testfile.part/%d" % i)
-
-    def test_seek_set_resume(self):
-        ''' seek/resume functionality (seek_set) '''
-        content_string = "This is a chunk of data"*1024
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        contents = fd.read(1024)
-        fd.close()
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        fd.seek(1024)
-        contents += fd.read(512)
-        fd.close()
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        fd.seek(1024+512)
-        contents += fd.read()
-        fd.close()
-
-        self.assertEqual(contents, content_string)
-        self.cnx.remove("testfile.txt")
-
-    def test_seek_end_resume(self):
-        ''' seek/resume functionality (seek_end) '''
-        content_string = "This is another chunk of data"*1024
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        contents = fd.read(len(content_string)-1024)
-        fd.close()
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        fd.seek(1024, 2)
-        contents += fd.read()
-        fd.close()
-
-        self.assertEqual(contents, content_string)
-        self.cnx.remove("testfile.txt")
-
-    def test_seek_cur_resume(self):
-        ''' seek/resume functionality (seek_cur) '''
-        content_string = "This is another chunk of data"*1024
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        contents = fd.read(len(content_string)-1024)
-        fd.close()
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        fd.seek(1024)
-        fd.read(512)
-        fd.seek(len(content_string)-1024-512-1024, 1)
-        contents += fd.read()
-        fd.close()
-
-        self.assertEqual(contents, content_string)
-        self.cnx.remove("testfile.txt")
-
-    def test_seek_invalid_offset(self):
-        ''' seek functionality, invalid offset  '''
-        content_string = "0"*1024
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.getsize("testfile.txt"), len(content_string))
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        self.assertRaises(IOSError, fd.seek, 1025)
-        fd.close()
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        self.assertRaises(IOSError, fd.seek, -1)
-        fd.close()
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        self.assertRaises(IOSError, fd.seek, -1, 2)
-        fd.close()
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        self.assertRaises(IOSError, fd.seek, 1025, 2)
-        fd.close()
-
-        fd = self.cnx.open("testfile.txt", "rb")
-        fd.read(512)
-        self.assertRaises(IOSError, fd.seek, 513, 1)
-        self.assertRaises(IOSError, fd.seek, -513, 1)
-        fd.close()
-
-        self.cnx.remove("testfile.txt")
-
-    def test_large_file_support(self):
-        ''' auto-split of large files '''
-        size = 1024**2
-        part_size = 64*1024
-        fd = self.cnx.open("bigfile.txt", "wb")
-        fd.split_size = part_size
-        content = ''
-        for part in xrange(size/4096):
-            content += chr(part)*4096
-            fd.write(chr(part)*4096)
-        fd.close()
-        self.assertEqual(self.cnx.listdir("."), ["bigfile.txt", "bigfile.txt.part"])
-        self.assertEqual(self.cnx.getsize("bigfile.txt"), size)
-        self.assertEqual(len(self.cnx.listdir("bigfile.txt.part/")), size/part_size)
-        self.assertEqual(self.cnx.getsize("bigfile.txt.part/000000"), part_size)
-        stored_content = self.read_file("/%s/bigfile.txt" % self.container)
-        self.assertEqual(stored_content, content)
-        self.cnx.remove("bigfile.txt")
-        for i in range(size/part_size):
-            self.cnx.remove("bigfile.txt.part/%.6d" % i)
-
-    def test_large_file_support_big_chunk(self):
-        ''' auto-split of large files, writing a single big chunk '''
-        size = 1024**2
-        part_size = 64*1024
-        fd = self.cnx.open("bigfile.txt", "wb")
-        fd.split_size = part_size
-        fd.write('0'*size)
-        fd.close()
-        self.assertEqual(self.cnx.listdir("."), ["bigfile.txt", "bigfile.txt.part"])
-        self.assertEqual(self.cnx.getsize("bigfile.txt"), size)
-        self.assertEqual(len(self.cnx.listdir("bigfile.txt.part/")), size/part_size)
-        self.assertEqual(self.cnx.getsize("bigfile.txt.part/000000"), part_size)
-        self.cnx.remove("bigfile.txt")
-        for i in xrange(size/part_size):
-            self.cnx.remove("bigfile.txt.part/%.6d" % i)
-
-    def test_large_file_support_content(self):
-        ''' auto-split of large files, reminder last part '''
-        size = 1024**2
-        part_size = 64*1000 # size % part_size != 0
-        content = ''
-        fd = self.cnx.open("bigfile.txt", "wb")
-        fd.split_size = part_size
-        for part in xrange(size/4096):
-            content += chr(part)*4096
-            fd.write(chr(part)*4096)
-        fd.close()
-        stored_content = self.read_file("/%s/bigfile.txt" % self.container)
-        self.assertEqual(len(stored_content), len(content))
-        self.assertEqual(stored_content, content)
-        self.cnx.remove("bigfile.txt")
-        for i in xrange(1+(size/part_size)):
-            self.cnx.remove("bigfile.txt.part/%.6d" % i)
-
-    def tearDown(self):
-        # Delete eveything from the container using the API
-        _, fails = self.conn.get_container(self.container)
-        for obj in fails:
-            self.conn.delete_object(self.container, obj["name"])
-        self.cnx.rmdir("/%s" % self.container)
-        self.assertEquals(fails, [], "The test failed to clean up after itself leaving these objects: %r" % fails)
-
-
-class MockupConnection(object):
-    '''Mockup object to simulate a CF connection.'''
-    def __init__(self, num_objects, objects):
-        self.num_objects = num_objects
-        self.objects = objects
-
-    @staticmethod
-    def gen_object(name):
-        return dict(bytes=1024, content_type='text/plain',
-                    hash='c644eacf6e9c21c7d2cca3ce8bb0ec13',
-                    last_modified='2012-06-20T00:00:00.000000',
-                    name=name)
-
-    @staticmethod
-    def gen_subdir(name):
-        return dict(subdir=name)
-
-    def list_containers_info(self):
-        return [dict(count=self.num_objects, bytes=1024*self.num_objects, name='container'),]
-
-    def get_account(self):
-        return {}, [{ "name": "container", "count": self.num_objects, "bytes": self.num_objects*1024 },]
-
-    def get_container(self, container, prefix=None, delimiter=None, marker=None, limit=10000):
-        if container != 'container':
-            raise client.ClientException("Not found", http_status=404)
-
-        # test provided objects
-        if self.objects:
-            index = 0
-            if marker:
-                while True:
-                    name = self.objects[index].get('name', self.objects[index].get('subdir'))
-                    if marker == name.rstrip("/"):
-                        index += 1
-                        break
-                    index += 1
-                    if index == self.num_objects:
-                        # marker not found, so it's ignored
-                        index = 0
-                        break
-            return {}, self.objects[index:index+10000]
-
-        # generated
-        start = 0
-        if marker:
-            while start <= self.num_objects:
-                if marker == 'object%s.txt' % start:
-                    break
-                start += 1
-
-        end = self.num_objects-start
-        if end == 0:
-            # marker not found, so it's ingored (behaviour in OpenStack
-            # Object Storage)
-            start = 0
-            end = self.num_objects
-        if end > limit:
-            end = limit
-
-        return {}, [self.gen_object('object%s.txt' % i) for i in xrange(start, start+end)]
-
-class MockupOSFS(object):
-    '''Mockup object to simulate a CFFS.'''
-    memcache_hosts = None
-    auth_url = 'https://auth.service.fake/v1'
-    username = 'user'
-
-    def __init__(self, num_objects, objects=None):
-        if objects and len(objects) != num_objects:
-            raise ValueError("objects provided but num_objects doesn't match")
-
-        self.num_objects = num_objects
-        self.objects = objects
-        self.conn = MockupConnection(num_objects, objects)
-
-    def _container_exists(self, container):
-        if container != 'container':
-            raise client.ClientException("Not found", http_status=404)
-
-class ListDirTest(unittest.TestCase):
-    '''
-    ObjectStorageFS cache Tests.
-
-    These tests use the Mockup* objects because some of the tests would require
-    creating/deleting too many objects to run the test over the real storage.
-    '''
-
-    def test_listdir(self):
-        """Test listdir, less than 10000 (limit) objects"""
-        lc = ListDirCache(MockupOSFS(100))
-
-        ld = lc.listdir('/')
-        self.assertEqual(len(ld), 1)
-        self.assertEqual(ld, ['container',])
-
-        ld = lc.listdir('/container')
-        self.assertEqual(len(ld), 100)
-        self.assertEqual(sorted(ld), sorted(['object%s.txt' % i for i in xrange(100)]))
-
-    def test_listdir_marker(self):
-        """Test listdir, more than 10000 (limit) objects"""
-        lc = ListDirCache(MockupOSFS(10100))
-
-        ld = lc.listdir('/container')
-        self.assertEqual(len(ld), 10100)
-        self.assertEqual(sorted(ld), sorted(['object%s.txt' % i for i in xrange(10100)]))
-
-    def test_listdir_marker_is_subdir(self):
-        """Test listdir, more than 10000 (limit) objects, marker will be a subdir"""
-
-        objects = [MockupConnection.gen_object("object%s.txt" % i) for i in xrange(9999)] + \
-                  [MockupConnection.gen_subdir("00dir_name/")] + \
-                  [MockupConnection.gen_object("object%s.txt" % i) for i in xrange(9999, 10099)]
-
-        lc = ListDirCache(MockupOSFS(10100, objects))
-
-        ld = sorted(lc.listdir('/container'))
-        self.assertEqual(len(ld), 10100)
-        self.assertEqual(ld[0], '00dir_name')
-        self.assertEqual(ld[1:], sorted(['object%s.txt' % i for i in xrange(10099)]))
-
-if __name__ == '__main__':
-    unittest.main()
diff -pruN 0.25.2+20140217+git2a90c1a2eb-1/tests/test_ftpcloudfs.py 0.35-0ubuntu1/tests/test_ftpcloudfs.py
--- 0.25.2+20140217+git2a90c1a2eb-1/tests/test_ftpcloudfs.py	2014-02-16 18:10:17.000000000 +0000
+++ 0.35-0ubuntu1/tests/test_ftpcloudfs.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,368 +0,0 @@
-#!/usr/bin/python
-import unittest
-import os
-import sys
-import ftplib
-import StringIO
-from datetime import datetime
-from time import sleep
-from swiftclient import client
-
-from ftpcloudfs.constants import default_address, default_port
-from ftpcloudfs.fs import ListDirCache
-
-#import logging
-#logging.getLogger().setLevel(logging.DEBUG)
-#logging.basicConfig(level=logging.DEBUG)
-
-class FtpObjectStorageFSTest(unittest.TestCase):
-    ''' FTP Cloud FS main test '''
-
-    def setUp(self):
-        if not all(['OS_API_KEY' in os.environ,
-                    'OS_API_USER' in os.environ,
-                    'OS_AUTH_URL' in os.environ,
-                    ]):
-            print "env OS_API_USER/OS_API_KEY/OS_AUTH_URL not found."
-            sys.exit(1)
-
-        self.username = os.environ['OS_API_USER']
-        self.api_key = os.environ['OS_API_KEY']
-        self.auth_url = os.environ.get('OS_AUTH_URL')
-        self.cnx = ftplib.FTP()
-        self.cnx.host = default_address
-        self.cnx.port = default_port
-        self.cnx.connect()
-        self.cnx.login(self.username, self.api_key)
-        self.container = "ftpcloudfs_testing"
-        self.cnx.mkd("/%s" % self.container)
-        self.cnx.cwd("/%s" % self.container)
-        self.conn = client.Connection(user=self.username, key=self.api_key, authurl=self.auth_url)
-
-    def create_file(self, path, contents):
-        '''Create path with contents'''
-        self.cnx.storbinary("STOR %s" % path, StringIO.StringIO(contents))
-
-    def test_mkdir_chdir_rmdir(self):
-        ''' mkdir/chdir/rmdir directory '''
-        directory = "/foobarrandom"
-        self.assertEqual(self.cnx.mkd(directory), directory)
-        self.assertEqual(self.cnx.cwd(directory),
-                         '250 "%s" is the current directory.' % (directory))
-        self.assertEqual(self.cnx.rmd(directory), "250 Directory removed.")
-
-    def test_mkdir_chdir_mkdir_rmdir_subdir(self):
-        ''' mkdir/chdir/rmdir sub directory '''
-        directory = "/foobarrandom"
-        self.assertEqual(self.cnx.mkd(directory), directory)
-        self.assertEqual(self.cnx.cwd(directory),
-                         '250 "%s" is the current directory.' % (directory))
-        subdirectory = "potato"
-        subdirpath = directory + "/" + subdirectory
-        self.assertEqual(self.cnx.mkd(subdirectory), subdirpath)
-        # Can't delete a directory with stuff in
-        self.assertRaises(ftplib.error_perm, self.cnx.rmd, directory)
-        self.assertEqual(self.cnx.cwd(subdirectory),
-                         '250 "%s" is the current directory.' % (subdirpath))
-        self.assertEqual(self.cnx.cwd(".."),
-                         '250 "%s" is the current directory.' % (directory))
-        self.assertEqual(self.cnx.rmd(subdirectory), "250 Directory removed.")
-        self.assertEqual(self.cnx.cwd(".."),
-                         '250 "/" is the current directory.')
-        self.assertEqual(self.cnx.rmd(directory), "250 Directory removed.")
-
-    def test_write_open_delete(self):
-        ''' write/open/delete file '''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.size("testfile.txt"), len(content_string))
-        store = StringIO.StringIO()
-        self.cnx.retrbinary("RETR testfile.txt", store.write)
-        self.assertEqual(store.getvalue(), content_string)
-        self.assertEqual(self.cnx.delete("testfile.txt"), "250 File removed.")
-        store.close()
-
-    def test_write_open_delete_subdir(self):
-        ''' write/open/delete file in a subdirectory'''
-        self.cnx.mkd("potato")
-        self.cnx.cwd("potato")
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.size("testfile.txt"), len(content_string))
-        store = StringIO.StringIO()
-        self.cnx.retrbinary("RETR /%s/potato/testfile.txt" % self.container, store.write)
-        self.assertEqual(store.getvalue(), content_string)
-        self.assertEqual(self.cnx.delete("testfile.txt"), "250 File removed.")
-        self.cnx.cwd("..")
-        self.cnx.rmd("potato")
-        store.close()
-
-    def test_write_to_slash(self):
-        ''' write to slash should not be permitted '''
-        self.cnx.cwd("/")
-        content_string = "Hello Moto"
-        self.assertRaises(ftplib.error_perm, self.create_file, "testfile.txt", content_string)
-
-    def test_chdir_to_a_file(self):
-        ''' chdir to a file '''
-        self.create_file("testfile.txt", "Hello Moto")
-        self.assertRaises(ftplib.error_perm, self.cnx.cwd, "/%s/testfile.txt" % self.container)
-        self.cnx.delete("testfile.txt")
-
-    def test_chdir_to_slash(self):
-        ''' chdir to slash '''
-        self.cnx.cwd("/")
-
-    def test_chdir_to_nonexistent_container(self):
-        ''' chdir to non existent container'''
-        self.assertRaises(ftplib.error_perm, self.cnx.cwd, "/i_dont_exist")
-
-    def test_chdir_to_nonexistent_directory(self):
-        ''' chdir to nonexistend directory'''
-        self.assertRaises(ftplib.error_perm, self.cnx.cwd, "i_dont_exist")
-        self.assertRaises(ftplib.error_perm, self.cnx.cwd, "/%s/i_dont_exist" % self.container)
-
-    def test_listdir_root(self):
-        ''' list root directory '''
-        self.cnx.cwd("/")
-        ls = self.cnx.nlst()
-        self.assertTrue(self.container in ls)
-        self.assertTrue('potato' not in ls)
-        self.cnx.mkd("potato")
-        ls = self.cnx.nlst()
-        self.assertTrue(self.container in ls)
-        self.assertTrue('potato' in ls)
-        self.cnx.rmd("potato")
-
-    def test_listdir(self):
-        ''' list directory '''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertEqual(self.cnx.nlst(), ["testfile.txt"])
-        lines = []
-        self.assertEquals(self.cnx.retrlines('LIST', callback=lines.append), '226 Transfer complete.')
-        self.assertEquals(len(lines), 1)
-        line = lines[0]
-        expected = "-rw-r--r--   1 "+self.username+" "+self.username+"       10 "+ datetime.utcnow().strftime("%b %d %H:")
-        self.assertTrue(line.startswith(expected), "line %r != expected %r" % (line, expected))
-        self.assertTrue(line.endswith(" testfile.txt"))
-        self.cnx.delete("testfile.txt")
-
-    def test_listdir_subdir(self):
-        ''' list a sub directory'''
-        content_string = "Hello Moto"
-        self.create_file("1.txt", content_string)
-        self.create_file("2.txt", content_string)
-        self.cnx.mkd("potato")
-        self.create_file("potato/3.txt", content_string)
-        self.create_file("potato/4.txt", content_string)
-        self.assertEqual(self.cnx.nlst(), ["1.txt", "2.txt", "potato"])
-        self.cnx.cwd("potato")
-        self.assertEqual(self.cnx.nlst(), ["3.txt", "4.txt"])
-        self.cnx.delete("3.txt")
-        self.cnx.delete("4.txt")
-        self.assertEqual(self.cnx.nlst(), [])
-        self.cnx.cwd("..")
-        self.cnx.delete("1.txt")
-        self.cnx.delete("2.txt")
-        self.assertEqual(self.cnx.nlst(), ["potato"])
-        lines = []
-        self.assertEquals(self.cnx.retrlines('LIST', callback=lines.append), '226 Transfer complete.')
-        self.assertEquals(len(lines), 1)
-        line = lines[0]
-        expected = "drwxr-xr-x   1 "+self.username+" "+self.username+"        0 "+ datetime.utcnow().strftime("%b %d %H:")
-        self.assertTrue(line.startswith(expected), "line %r != expected %r" % (line, expected))
-        self.assertTrue(line.endswith(" potato"))
-        self.cnx.rmd("potato")
-        self.assertEqual(self.cnx.nlst(), [])
-
-    def test_rename_file(self):
-        '''rename a file'''
-        content_string = "Hello Moto" * 100
-        self.create_file("testfile.txt", content_string)
-        self.assertEquals(self.cnx.size("testfile.txt"), len(content_string))
-        self.assertRaises(ftplib.error_perm, self.cnx.size, "testfile2.txt")
-        self.cnx.rename("testfile.txt", "testfile2.txt")
-        self.assertEquals(self.cnx.size("testfile2.txt"), len(content_string))
-        self.assertRaises(ftplib.error_perm, self.cnx.size, "testfile.txt")
-        self.cnx.delete("testfile2.txt")
-
-    def test_rename_file_into_subdir1(self):
-        '''rename a file into a subdirectory 1'''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.cnx.mkd("potato")
-        self.assertEquals(self.cnx.size("testfile.txt"), len(content_string))
-        self.assertRaises(ftplib.error_perm, self.cnx.size, "potato/testfile3.txt")
-        self.cnx.rename("testfile.txt", "potato/testfile3.txt")
-        self.assertEquals(self.cnx.size("potato/testfile3.txt"), len(content_string))
-        self.assertRaises(ftplib.error_perm, self.cnx.size, "testfile.txt")
-        self.cnx.delete("potato/testfile3.txt")
-        self.cnx.rmd("potato")
-
-    def test_rename_file_into_subdir2(self):
-        '''rename a file into a subdirectory without specifying dest leaf'''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.cnx.mkd("potato")
-        self.assertEquals(self.cnx.size("testfile.txt"), len(content_string))
-        self.assertRaises(ftplib.error_perm, self.cnx.size, "potato/testfile.txt")
-        self.cnx.rename("testfile.txt", "potato")
-        self.assertEquals(self.cnx.size("potato/testfile.txt"), len(content_string))
-        self.assertRaises(ftplib.error_perm, self.cnx.size, "testfile.txt")
-        self.cnx.delete("potato/testfile.txt")
-        self.cnx.rmd("potato")
-
-    def test_rename_file_into_root(self):
-        '''rename a file into a subdirectory without specifying dest leaf'''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertRaises(ftplib.error_perm, self.cnx.rename, "testfile.txt", "/testfile.txt")
-        self.cnx.delete("testfile.txt")
-
-    def test_rename_directory_into_file(self):
-        '''rename a directory into a file - shouldn't work'''
-        content_string = "Hello Moto"
-        self.create_file("testfile.txt", content_string)
-        self.assertRaises(ftplib.error_perm, self.cnx.rename, "/%s" % self.container, "testfile.txt")
-        self.cnx.delete("testfile.txt")
-
-    def test_rename_directory_into_directory(self):
-        '''rename a directory into a directory'''
-        self.cnx.mkd("potato")
-        self.assertEquals(self.cnx.nlst("potato"), [])
-        self.cnx.rename("potato", "potato2")
-        self.assertEquals(self.cnx.nlst("potato2"), [])
-        self.cnx.rmd("potato2")
-
-    def test_rename_directory_into_existing_directory(self):
-        '''rename a directory into an existing directory'''
-        self.cnx.mkd("potato")
-        self.cnx.mkd("potato2")
-        self.assertEquals(self.cnx.nlst("potato"), [])
-        self.assertEquals(self.cnx.nlst("potato2"), [])
-        self.cnx.rename("potato", "potato2")
-        self.assertEquals(self.cnx.nlst("potato2"), ["potato"])
-        self.assertEquals(self.cnx.nlst("potato2/potato"), [])
-        self.cnx.rmd("potato2/potato")
-        self.cnx.rmd("potato2")
-
-    def test_rename_directory_into_self(self):
-        '''rename a directory into itself'''
-        self.cnx.mkd("potato")
-        self.assertEquals(self.cnx.nlst("potato"), [])
-        self.cnx.rename("potato", "/%s" % self.container)
-        self.assertEquals(self.cnx.nlst("potato"), [])
-        self.cnx.rename("potato", "/%s/potato" % self.container)
-        self.assertEquals(self.cnx.nlst("potato"), [])
-        self.cnx.rename("potato", "potato")
-        self.assertEquals(self.cnx.nlst("potato"), [])
-        self.cnx.rename("/%s/potato" % self.container, ".")
-        self.assertEquals(self.cnx.nlst("potato"), [])
-        self.cnx.rmd("potato")
-
-    def test_rename_full_directory(self):
-        '''rename a directory into a directory'''
-        self.cnx.mkd("potato")
-        self.create_file("potato/something.txt", "p")
-        try:
-            self.assertEquals(self.cnx.nlst("potato"), ["something.txt"])
-            self.assertRaises(ftplib.error_perm, self.cnx.rename, "potato", "potato2")
-        finally:
-            self.cnx.delete("potato/something.txt")
-            self.cnx.rmd("potato")
-
-    def test_rename_container(self):
-        '''rename an empty container'''
-        self.cnx.mkd("/potato")
-        self.assertEquals(self.cnx.nlst("/potato"), [])
-        self.assertRaises(ftplib.error_perm, self.cnx.nlst, "/potato2")
-        self.cnx.rename("/potato", "/potato2")
-        self.assertRaises(ftplib.error_perm, self.cnx.nlst, "/potato")
-        self.assertEquals(self.cnx.nlst("/potato2"), [])
-        self.cnx.rmd("/potato2")
-
-    def test_rename_full_container(self):
-        '''rename a full container'''
-        self.cnx.mkd("/potato")
-        self.create_file("/potato/test.txt", "onion")
-        self.assertEquals(self.cnx.nlst("/potato"), ["test.txt"])
-        self.assertRaises(ftplib.error_perm, self.cnx.rename, "/potato", "/potato2")
-        self.cnx.delete("/potato/test.txt")
-        self.cnx.rmd("/potato")
-
-    def test_unicode_file(self):
-        '''Test unicode file creation'''
-        # File names use a utf-8 interface
-        file_name = u"Smiley\u263a.txt".encode("utf-8")
-        self.create_file(file_name, "Hello Moto")
-        self.assertEqual(self.cnx.nlst(), [file_name])
-        self.cnx.delete(file_name)
-
-    def test_unicode_directory(self):
-        '''Test unicode directory creation'''
-        # File names use a utf-8 interface
-        dir_name = u"Smiley\u263aDir".encode("utf-8")
-        self.cnx.mkd(dir_name)
-        self.assertEqual(self.cnx.nlst(), [dir_name])
-        self.cnx.rmd(dir_name)
-
-    def test_mkdir_container_unicode(self):
-        ''' mkdir/chdir/rmdir directory '''
-        directory = u"/Smiley\u263aContainer".encode("utf-8")
-        self.assertEqual(self.cnx.mkd(directory), directory)
-        self.assertEqual(self.cnx.cwd(directory),
-                         '250 "%s" is the current directory.' % (directory))
-        self.assertEqual(self.cnx.rmd(directory), "250 Directory removed.")
-
-    def test_fakedir(self):
-        '''Make some fake directories and test'''
-
-        objs = ["test1.txt", "potato/test2.txt", "potato/sausage/test3.txt",
-                "potato/sausage/test4.txt",]
-        for obj in objs:
-            self.conn.put_object(self.container, obj, content_type="text/plain", contents="Hello Moto")
-
-        self.assertEqual(self.cnx.nlst(), ["potato", "test1.txt"])
-        self.assertEqual(self.cnx.nlst("potato"), ["sausage","test2.txt"])
-        self.assertEqual(self.cnx.nlst("potato/sausage"), ["test3.txt", "test4.txt"])
-
-        self.cnx.cwd("potato")
-
-        self.assertEqual(self.cnx.nlst(), ["sausage","test2.txt"])
-        self.assertEqual(self.cnx.nlst("sausage"), ["test3.txt", "test4.txt"])
-
-        self.cnx.cwd("sausage")
-
-        self.assertEqual(self.cnx.nlst(), ["test3.txt", "test4.txt"])
-
-        self.cnx.cwd("../..")
-
-        for obj in objs:
-            self.conn.delete_object(self.container, obj)
-
-        sleep(ListDirCache.MAX_CACHE_TIME)
-
-        self.assertEqual(self.cnx.nlst(), [])
-
-    def test_md5(self):
-        ''' MD5 extension'''
-        self.create_file("testfile.txt", "Hello Moto")
-        response = self.cnx.sendcmd("MD5 /%s/testfile.txt" % self.container)
-        self.cnx.delete("testfile.txt")
-        self.assertEqual(response, '251 "/%s/testfile.txt" 0D933AE488FD55CC6BDEAFFFBAABF0C4' % self.container)
-        self.assertRaises(ftplib.error_perm, self.cnx.sendcmd, "MD5 /%s" % self.container)
-        self.assertRaises(ftplib.error_perm, self.cnx.sendcmd, "MD5 /")
-
-    def tearDown(self):
-        # Delete eveything from the container using the API
-        self.cnx.close()
-        _, fails = self.conn.get_container(self.container)
-        for obj in fails:
-            if "name" in obj:
-                self.conn.delete_object(self.container, obj["name"])
-        self.conn.delete_container(self.container)
-        self.assertEquals(fails, [], "The test failed to clean up after itself leaving these objects: %r" % fails)
-
-if __name__ == '__main__':
-    unittest.main()
