black reformat target py39
M gdrivefs/__init__.py +1 -1
@@ 1,1 1,1 @@ 
-__version__ = '0.14.9'
+__version__ = "0.14.9"

          
M gdrivefs/account_info.py +7 -6
@@ 8,10 8,12 @@ import logging
 class AccountInfo(LiveReaderBase):
     """Encapsulates our account info."""
 
-    __map = {'root_id': 'rootFolderId',
-             'largest_change_id': ('largestChangeId', int),
-             'quota_bytes_total': ('quotaBytesTotal', int),
-             'quota_bytes_used': ('quotaBytesUsed', int)}
+    __map = {
+        "root_id": "rootFolderId",
+        "largest_change_id": ("largestChangeId", int),
+        "quota_bytes_total": ("quotaBytesTotal", int),
+        "quota_bytes_used": ("quotaBytesUsed", int),
+    }
 
     def get_data(self):
         gd = get_gdrive()

          
@@ 20,7 22,7 @@ class AccountInfo(LiveReaderBase):
     def __getattr__(self, key):
         target = AccountInfo.__map[key]
         _type = None
-        
+
         if target.__class__ == tuple:
             (target, _type) = target
 

          
@@ 33,4 35,3 @@ class AccountInfo(LiveReaderBase):
     @property
     def keys(self):
         return list(AccountInfo.__map.keys())
-

          
M gdrivefs/auto_auth.py +21 -24
@@ 48,13 48,11 @@ class _WebserverMonitor:
         self.__server_state_e.clear()
 
     def stop(self):
-        assert \
-            self.__server_state_e is not None, \
-            "Thread doesn't appear to have ever been started."
+        assert (
+            self.__server_state_e is not None
+        ), "Thread doesn't appear to have ever been started."
 
-        assert \
-            self.__t.is_alive() is True, \
-            "Thread doesn't appear to be running."
+        assert self.__t.is_alive() is True, "Thread doesn't appear to be running."
 
         self.__server_state_e.clear()
         self.__s.shutdown()

          
@@ 95,39 93,39 @@ class _WebserverMonitor:
 
                 # It's not an authorization response. Bail with the same error
                 # the library would normally send for unhandled requests.
-                if 'code' not in arguments:
+                if "code" not in arguments:
                     self.send_error(
-                        501,
-                        "Unsupported method ({}): {}".format(
-                        self.command, hr.path))
+                        501, "Unsupported method ({}): {}".format(self.command, hr.path)
+                    )
 
                     return
 
-                authcode = arguments['code'][0]
+                authcode = arguments["code"][0]
                 _LOGGER.debug("Received authcode [{}]".format(authcode))
 
                 monitor._authcode = authcode
 
                 monitor._request_state_e.set()
 
-                self.send_response(200, message='OK')
+                self.send_response(200, message="OK")
 
-                self.send_header("Content-type", 'text/html')
+                self.send_header("Content-type", "text/html")
                 self.end_headers()
 
-                self.wfile.write(b"""\
+                self.wfile.write(
+                    b"""\
 <html>
 <head></head>
 <body>
 GDFS authorization recorded.
 </body>
 </html>
-""")
+"""
+                )
 
             def log_message(self, format, *args):
                 pass
 
-
         class Server(socketserver.TCPServer):
             def server_activate(self, *args, **kwargs):
                 r = socketserver.TCPServer.server_activate(self, *args, **kwargs)

          
@@ 139,7 137,7 @@ GDFS authorization recorded.
 
         # Our little webserver. (0) for the port will automatically assign it
         # to some unused port.
-        binding = ('localhost', 0)
+        binding = ("localhost", 0)
         self.__s = Server(binding, Handler)
 
         _LOGGER.debug("Created server.")

          
@@ 157,9 155,9 @@ GDFS authorization recorded.
 
     @property
     def port(self):
-        assert \
-            self._port is not None, \
-            "Thread hasn't been started or a port hasn't been assigned."
+        assert (
+            self._port is not None
+        ), "Thread hasn't been started or a port hasn't been assigned."
 
         return self._port
 

          
@@ 181,7 179,7 @@ class AutoAuth:
     def get_and_write_creds(self):
         _LOGGER.info("Requesting authorization.")
 
-        creds_filepath = gdrivefs.conf.Conf.get('auth_cache_filepath')
+        creds_filepath = gdrivefs.conf.Conf.get("auth_cache_filepath")
         wm = _WebserverMonitor(creds_filepath)
 
         # Start the webserver.

          
@@ 189,9 187,8 @@ class AutoAuth:
 
         # Open a browser window to request authorization.
 
-        redirect_uri = 'http://localhost:{}'.format(wm.port)
-        oa = gdrivefs.oauth_authorize.OauthAuthorize(
-                redirect_uri=redirect_uri)
+        redirect_uri = "http://localhost:{}".format(wm.port)
+        oa = gdrivefs.oauth_authorize.OauthAuthorize(redirect_uri=redirect_uri)
 
         url = oa.step1_get_auth_url()
         _LOGGER.debug("Opening browser: [{}]".format(url))

          
M gdrivefs/buffer_segments.py +105 -75
@@ 6,7 6,7 @@ import pprint
 
 
 class BufferSegments:
-    """Describe a series of strings that, when concatenated, represent the 
+    """Describe a series of strings that, when concatenated, represent the
     whole file. This is used to try and contain the amount of the data that has
     the be copied as updates are applied to the file.
     """

          
@@ 14,17 14,18 @@ class BufferSegments:
     __locker = Lock()
 
     def __init__(self, data, block_size):
-        # An array of 2-tuples: (offset, string). We should allow data to be 
-        # empty. Thus, we should allow a segment to be empty (useful, in 
+        # An array of 2-tuples: (offset, string). We should allow data to be
+        # empty. Thus, we should allow a segment to be empty (useful, in
         # general).
         self.__segments = [(0, data)]
 
         self.__block_size = block_size
 
     def __repr__(self):
-        return ("<BSEGS  SEGS= (%(segs)d) BLKSIZE= (%(block_size)d)>" % 
-                { 'segs': len(self.__segments), 
-                  'block_size': self.__block_size })
+        return "<BSEGS  SEGS= (%(segs)d) BLKSIZE= (%(block_size)d)>" % {
+            "segs": len(self.__segments),
+            "block_size": self.__block_size,
+        }
 
     def dump(self):
         pprint(self.__segments)

          
@@ 36,9 37,9 @@ class BufferSegments:
         while seg_index < len(self.__segments):
             seg_offset = self.__segments[seg_index][0]
 
-            # If the current segment starts after the point of insertion.        
+            # If the current segment starts after the point of insertion.
             if seg_offset > offset:
-                return (seg_index - 1)
+                return seg_index - 1
 
             # If the insertion point is at the beginning of this segment.
             elif seg_offset == offset:

          
@@ 46,18 47,18 @@ class BufferSegments:
 
             seg_index += 1
 
-        # If we get here, we never ran into a segment with an offset greater 
+        # If we get here, we never ran into a segment with an offset greater
         # that the insertion offset.
-        return (seg_index - 1)
+        return seg_index - 1
 
     def __split(self, seg_index, offset):
-        """Split the given segment at the given offset. Offset is relative to 
-        the particular segment (an offset of '0' refers to the beginning of the 
-        segment). At finish, seg_index will represent the segment containing 
-        the first half of the original data (and segment with index 
+        """Split the given segment at the given offset. Offset is relative to
+        the particular segment (an offset of '0' refers to the beginning of the
+        segment). At finish, seg_index will represent the segment containing
+        the first half of the original data (and segment with index
         (seg_index + 1) will contain the second).
         """
-    
+
         (seg_offset, seg_data) = self.__segments[seg_index]
 
         first_half = seg_data[0:offset]

          
@@ 65,19 66,20 @@ class BufferSegments:
         self.__segments.insert(seg_index, firsthalf_segment)
 
         second_half = seg_data[offset:]
-        if second_half == '':
-            raise IndexError("Can not use offset (%d) to split segment (%d) "
-                             "of length (%d)." % 
-                             (offset, seg_index, len(seg_data)))
-        
+        if second_half == "":
+            raise IndexError(
+                "Can not use offset (%d) to split segment (%d) "
+                "of length (%d)." % (offset, seg_index, len(seg_data))
+            )
+
         secondhalf_segment = (seg_offset + offset, second_half)
         self.__segments[seg_index + 1] = secondhalf_segment
 
         return (firsthalf_segment, secondhalf_segment)
 
     def apply_update(self, offset, data):
-        """Find the correct place to insert the data, splitting existing data 
-        segments into managable fragments ("segments"), overwriting a number of 
+        """Find the correct place to insert the data, splitting existing data
+        segments into managable fragments ("segments"), overwriting a number of
         bytes equal in size to the incoming data. If the incoming data will
         overflow the end of the list, grow the list.
         """

          
@@ 85,99 87,119 @@ class BufferSegments:
         with self.__locker:
             data_len = len(data)
 
-            if len(self.__segments) == 1 and self.__segments[0][1] == '':
+            if len(self.__segments) == 1 and self.__segments[0][1] == "":
                 self.__segments = []
                 simple_append = True
             else:
-                simple_append = (offset >= self.length)
+                simple_append = offset >= self.length
 
-            _logger.debug("Applying update of (%d) bytes at offset (%d). "
-                          "Current segment count is (%d). Total length is "
-                          "(%d). APPEND= [%s]",
-                          data_len, offset, len(self.__segments), self.length, 
-                          simple_append)
+            _logger.debug(
+                "Applying update of (%d) bytes at offset (%d). "
+                "Current segment count is (%d). Total length is "
+                "(%d). APPEND= [%s]",
+                data_len,
+                offset,
+                len(self.__segments),
+                self.length,
+                simple_append,
+            )
 
             if not simple_append:
                 seg_index = self.__find_segment(offset)
 
-                # Split the existing segment(s) rather than doing any 
-                # concatenation. Theoretically, the effort of writing to an 
+                # Split the existing segment(s) rather than doing any
+                # concatenation. Theoretically, the effort of writing to an
                 # existing file should shrink over time.
 
                 (seg_offset, seg_data) = self.__segments[seg_index]
                 # seg_len = len(seg_data)
-                
-                # If our data is to be written into the middle of the segment, 
-                # split the segment such that the unnecessary prefixing bytes are 
+
+                # If our data is to be written into the middle of the segment,
+                # split the segment such that the unnecessary prefixing bytes are
                 # moved to a new segment preceding the current.
                 if seg_offset < offset:
                     prefix_len = offset - seg_offset
-                    _logger.debug("Splitting-of PREFIX of segment (%d). Prefix "
-                                  "length is (%d). Segment offset is (%d) and "
-                                  "length is (%d).",
-                                  seg_index, prefix_len, seg_offset, 
-                                  len(seg_data))
+                    _logger.debug(
+                        "Splitting-of PREFIX of segment (%d). Prefix "
+                        "length is (%d). Segment offset is (%d) and "
+                        "length is (%d).",
+                        seg_index,
+                        prefix_len,
+                        seg_offset,
+                        len(seg_data),
+                    )
 
-                    (_, (seg_offset, seg_data)) = self.__split(seg_index, 
-                                                               prefix_len)
+                    (_, (seg_offset, seg_data)) = self.__split(seg_index, prefix_len)
 
                     # seg_len = prefix_len
                     seg_index += 1
 
-                # Now, apply the update. Collect the number of segments that will 
-                # be affected, and reduce to two (at most): the data that we're 
-                # applying, and the second part of the last affected one (if 
-                # applicable). If the incoming data exceeds the length of the 
+                # Now, apply the update. Collect the number of segments that will
+                # be affected, and reduce to two (at most): the data that we're
+                # applying, and the second part of the last affected one (if
+                # applicable). If the incoming data exceeds the length of the
                 # existing data, it is a trivial consideration.
 
                 stop_offset = offset + data_len
                 seg_stop = seg_index
                 while 1:
-                    # Since the insertion offset must be within the given data 
-                    # (otherwise it'd be an append, above), it looks like we're 
+                    # Since the insertion offset must be within the given data
+                    # (otherwise it'd be an append, above), it looks like we're
                     # inserting into the last segment.
                     if seg_stop >= len(self.__segments):
                         break
-                
+
                     # If our offset is within the current set of data, this is not
                     # going to be an append operation.
                     if self.__segments[seg_stop][0] >= stop_offset:
                         break
-                    
+
                     seg_stop += 1
 
                 seg_stop -= 1
 
-# TODO: Make sure that updates applied at the front of a segment are correct.
+                # TODO: Make sure that updates applied at the front of a segment are correct.
 
-                _logger.debug("Replacement interval is [%d, %d]. Current "
-                              "segments= (%d)",
-                              seg_index, seg_stop, len(self.__segments))
+                _logger.debug(
+                    "Replacement interval is [%d, %d]. Current " "segments= (%d)",
+                    seg_index,
+                    seg_stop,
+                    len(self.__segments),
+                )
 
                 # How much of the last segment that we touch will be affected?
-                (lastseg_offset, lastseg_data) = self.__segments[seg_stop] 
+                (lastseg_offset, lastseg_data) = self.__segments[seg_stop]
 
                 lastseg_len = len(lastseg_data)
                 affected_len = (offset + data_len) - lastseg_offset
                 if affected_len > 0 and affected_len < lastseg_len:
-                    _logger.debug("Splitting-of suffix of segment (%d). "
-                                  "Suffix length is (%d). Segment offset "
-                                  "is (%d) and length is (%d).",
-                                  seg_stop, lastseg_len - affected_len, 
-                                  lastseg_offset, lastseg_len)
+                    _logger.debug(
+                        "Splitting-of suffix of segment (%d). "
+                        "Suffix length is (%d). Segment offset "
+                        "is (%d) and length is (%d).",
+                        seg_stop,
+                        lastseg_len - affected_len,
+                        lastseg_offset,
+                        lastseg_len,
+                    )
 
                     self.__split(seg_stop, affected_len)
 
-                # We now have a distinct range of segments to replace with the new 
+                # We now have a distinct range of segments to replace with the new
                 # data. We are implicitly accounting for the situation in which our
                 # data is longer than the remaining number of bytes in the file.
 
-                _logger.debug("Replacing segment(s) (%d)->(%d) with new "
-                              "segment having offset (%d) and length "
-                              "(%d).", 
-                              seg_index, seg_stop + 1, seg_offset, len(data))
+                _logger.debug(
+                    "Replacing segment(s) (%d)->(%d) with new "
+                    "segment having offset (%d) and length "
+                    "(%d).",
+                    seg_index,
+                    seg_stop + 1,
+                    seg_offset,
+                    len(data),
+                )
 
-                self.__segments[seg_index:seg_stop + 1] = [(seg_offset, data)]
+                self.__segments[seg_index : seg_stop + 1] = [(seg_offset, data)]
             else:
                 self.__segments.append((offset, data))
 

          
@@ 187,8 209,12 @@ class BufferSegments:
         """
 
         with self.__locker:
-            _logger.debug("Reading at offset (%d) for length [%s]. Total "
-                          "length is [%s].", offset, length, self.length)
+            _logger.debug(
+                "Reading at offset (%d) for length [%s]. Total " "length is [%s].",
+                offset,
+                length,
+                self.length,
+            )
 
             if length is None:
                 length = self.length

          
@@ 198,7 224,7 @@ class BufferSegments:
 
             boundary_offset = offset + length
 
-            # The WHILE condition should only catch if the given length exceeds 
+            # The WHILE condition should only catch if the given length exceeds
             # the actual length. Else, the BREAK should always be sufficient.
             last_segindex = None
             (seg_offset, seg_data, seg_len) = (None, None, None)

          
@@ 211,14 237,19 @@ class BufferSegments:
                 grab_at = current_offset - seg_offset
                 remaining_bytes = boundary_offset - current_offset
 
-                # Determine how many bytes we're looking for, and how many we 
+                # Determine how many bytes we're looking for, and how many we
                 # can get from this segment.
 
-                grab_len = min(remaining_bytes,                         # Number of remaining, requested bytes.
-                               seg_len - (current_offset - seg_offset), # Number of available bytes in segment.
-                               self.__block_size)                       # Maximum block size.
+                grab_len = min(
+                    remaining_bytes,  # Number of remaining, requested bytes.
+                    seg_len
+                    - (
+                        current_offset - seg_offset
+                    ),  # Number of available bytes in segment.
+                    self.__block_size,
+                )  # Maximum block size.
 
-                grabbed = seg_data[grab_at:grab_at + grab_len]
+                grabbed = seg_data[grab_at : grab_at + grab_len]
                 current_offset += grab_len
                 yield grabbed
 

          
@@ 226,7 257,7 @@ class BufferSegments:
                 if current_offset >= boundary_offset:
                     break
 
-                # Are we going to have to read from the next segment, next 
+                # Are we going to have to read from the next segment, next
                 # time?
                 if current_offset >= (seg_offset + seg_len):
                     current_segindex += 1

          
@@ 238,4 269,3 @@ class BufferSegments:
 
         last_segment = self.__segments[-1]
         return last_segment[0] + len(last_segment[1])
-

          
M gdrivefs/cache_agent.py +86 -70
@@ 13,21 13,23 @@ import time
 class CacheAgent:
     """A particular namespace within the cache."""
 
-    registry        = None
-    resource_name   = None
-    max_age         = None
+    registry = None
+    resource_name = None
+    max_age = None
 
-    fault_handler       = None
-    cleanup_pretrigger  = None
+    fault_handler = None
+    cleanup_pretrigger = None
 
-    report              = None
-    report_source_name  = None
+    report = None
+    report_source_name = None
 
-    def __init__(self, resource_name, max_age, fault_handler=None, 
-                 cleanup_pretrigger=None):
-        _logger.debug("CacheAgent(%s,%s,%s,%s)" % (resource_name, max_age, 
-                                                   type(fault_handler), 
-                                                   cleanup_pretrigger))
+    def __init__(
+        self, resource_name, max_age, fault_handler=None, cleanup_pretrigger=None
+    ):
+        _logger.debug(
+            "CacheAgent(%s,%s,%s,%s)"
+            % (resource_name, max_age, type(fault_handler), cleanup_pretrigger)
+        )
 
         self.registry = CacheRegistry.get_instance(resource_name)
         self.resource_name = resource_name

          
@@ 36,8 38,8 @@ class CacheAgent:
         self.fault_handler = fault_handler
         self.cleanup_pretrigger = cleanup_pretrigger
 
-#        self.report = Report.get_instance()
-#        self.report_source_name = ("cache-%s" % (self.resource_name))
+        #        self.report = Report.get_instance()
+        #        self.report_source_name = ("cache-%s" % (self.resource_name))
 
         self.__t = None
         self.__t_quit_ev = threading.Event()

          
@@ 47,70 49,78 @@ class CacheAgent:
     def __del__(self):
         self.__stop_cleanup()
 
-# TODO(dustin): Currently disabled. The system doesn't rely on it, and it's 
-#               just another thread that unnecessarily runs, and trips up our 
-#               ability to test individual components in simple isolation. It
-#               needs to be refactored.
-#
-#               We'd like to either refactor into a multiprocessing worker, or
-#               just send to statsd (which would be kindof cool).
-#        self.__post_status()
+    # TODO(dustin): Currently disabled. The system doesn't rely on it, and it's
+    #               just another thread that unnecessarily runs, and trips up our
+    #               ability to test individual components in simple isolation. It
+    #               needs to be refactored.
+    #
+    #               We'd like to either refactor into a multiprocessing worker, or
+    #               just send to statsd (which would be kindof cool).
+    #        self.__post_status()
 
-#    def __del__(self):
-#
-#        if self.report.is_source(self.report_source_name):
-#            self.report.remove_all_values(self.report_source_name)
-#        pass
+    #    def __del__(self):
+    #
+    #        if self.report.is_source(self.report_source_name):
+    #            self.report.remove_all_values(self.report_source_name)
+    #        pass
 
-#    def __post_status(self):
-#        """Send the current status to our reporting tool."""
-#
-#        num_values = self.registry.count(self.resource_name)
-#
-#        self.report.set_values(self.report_source_name, 'count', 
-#                               num_values)
-#
-#        status_post_interval_s = Conf.get('cache_status_post_frequency_s')
-#        status_timer = Timer(status_post_interval_s, self.__post_status)
-#
-#        Timers.get_instance().register_timer('status', status_timer)
+    #    def __post_status(self):
+    #        """Send the current status to our reporting tool."""
+    #
+    #        num_values = self.registry.count(self.resource_name)
+    #
+    #        self.report.set_values(self.report_source_name, 'count',
+    #                               num_values)
+    #
+    #        status_post_interval_s = Conf.get('cache_status_post_frequency_s')
+    #        status_timer = Timer(status_post_interval_s, self.__post_status)
+    #
+    #        Timers.get_instance().register_timer('status', status_timer)
 
     def __cleanup(self):
-        """Scan the current cache and determine items old-enough to be 
+        """Scan the current cache and determine items old-enough to be
         removed.
         """
 
-        cleanup_interval_s = Conf.get('cache_cleanup_check_frequency_s')
+        cleanup_interval_s = Conf.get("cache_cleanup_check_frequency_s")
 
         _logger.info("Cache-cleanup thread running: %s", self)
 
-        while self.__t_quit_ev.is_set() is False and \
-                  gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False:
-            _logger.debug("Doing clean-up for cache resource with name [%s]." % 
-                          (self.resource_name))
+        while (
+            self.__t_quit_ev.is_set() is False
+            and gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False
+        ):
+            _logger.debug(
+                "Doing clean-up for cache resource with name [%s]."
+                % (self.resource_name)
+            )
 
             cache_dict = self.registry.list_raw(self.resource_name)
 
             # total_keys = [ (key, value_tuple[1]) for key, value_tuple \
             #                    in cache_dict.iteritems() ]
 
-            cleanup_keys = [ key for key, value_tuple \
-                                in list(cache_dict.items()) \
-                                if (datetime.datetime.now() - value_tuple[1]).seconds > \
-                                        self.max_age ]
+            cleanup_keys = [
+                key
+                for key, value_tuple in list(cache_dict.items())
+                if (datetime.datetime.now() - value_tuple[1]).seconds > self.max_age
+            ]
 
-            _logger.debug("Found (%d) entries to clean-up from entry-cache." % 
-                          (len(cleanup_keys)))
+            _logger.debug(
+                "Found (%d) entries to clean-up from entry-cache." % (len(cleanup_keys))
+            )
 
             if cleanup_keys:
                 for key in cleanup_keys:
-                    _logger.debug("Cache entry [%s] under resource-name [%s] "
-                                  "will be cleaned-up." % 
-                                  (key, self.resource_name))
+                    _logger.debug(
+                        "Cache entry [%s] under resource-name [%s] "
+                        "will be cleaned-up." % (key, self.resource_name)
+                    )
 
                     if self.exists(key, no_fault_check=True) == False:
-                        _logger.debug("Entry with ID [%s] has already been "
-                                      "cleaned-up." % (key))
+                        _logger.debug(
+                            "Entry with ID [%s] has already been " "cleaned-up." % (key)
+                        )
                     else:
                         self.remove(key)
             else:

          
@@ 139,11 149,11 @@ class CacheAgent:
     def remove(self, key):
         _logger.debug("CacheAgent.remove(%s)" % (key))
 
-        return self.registry.remove(self.resource_name, 
-                                    key, 
-                                    cleanup_pretrigger=self.cleanup_pretrigger)
+        return self.registry.remove(
+            self.resource_name, key, cleanup_pretrigger=self.cleanup_pretrigger
+        )
 
-    def get(self, key, handle_fault = None):
+    def get(self, key, handle_fault=None):
 
         if handle_fault == None:
             handle_fault = True

          
@@ 151,13 161,16 @@ class CacheAgent:
         _logger.debug("CacheAgent.get(%s)" % (key))
 
         try:
-            result = self.registry.get(self.resource_name, 
-                                       key, 
-                                       max_age=self.max_age, 
-                                       cleanup_pretrigger=self.cleanup_pretrigger)
+            result = self.registry.get(
+                self.resource_name,
+                key,
+                max_age=self.max_age,
+                cleanup_pretrigger=self.cleanup_pretrigger,
+            )
         except CacheFault:
-            _logger.debug("There was a cache-miss while requesting item with "
-                          "ID (key).")
+            _logger.debug(
+                "There was a cache-miss while requesting item with " "ID (key)."
+            )
 
             if self.fault_handler == None or not handle_fault:
                 raise

          
@@ 171,10 184,13 @@ class CacheAgent:
     def exists(self, key, no_fault_check=False):
         _logger.debug("CacheAgent.exists(%s)" % (key))
 
-        return self.registry.exists(self.resource_name, key, 
-                                    max_age=self.max_age,
-                                    cleanup_pretrigger=self.cleanup_pretrigger,
-                                    no_fault_check=no_fault_check)
+        return self.registry.exists(
+            self.resource_name,
+            key,
+            max_age=self.max_age,
+            cleanup_pretrigger=self.cleanup_pretrigger,
+            no_fault_check=no_fault_check,
+        )
 
     def __getitem__(self, key):
         return self.get(key)

          
M gdrivefs/cache_registry.py +47 -38
@@ 16,26 16,25 @@ class CacheRegistry:
     __rlock = RLock()
 
     def __init__(self):
-        self.__cache = { }
+        self.__cache = {}
 
     @staticmethod
     def get_instance(resource_name):
-    
+
         with CacheRegistry.__rlock:
             try:
-                CacheRegistry.__instance;
+                CacheRegistry.__instance
             except:
                 CacheRegistry.__instance = CacheRegistry()
 
             if resource_name not in CacheRegistry.__instance.__cache:
-                CacheRegistry.__instance.__cache[resource_name] = { }
+                CacheRegistry.__instance.__cache[resource_name] = {}
 
         return CacheRegistry.__instance
 
     def set(self, resource_name, key, value):
 
-        _logger.debug("CacheRegistry.set(%s,%s,%s)" % 
-                      (resource_name, key, value))
+        _logger.debug("CacheRegistry.set(%s,%s,%s)" % (resource_name, key, value))
 
         with CacheRegistry.__rlock:
             try:

          
@@ 49,28 48,28 @@ class CacheRegistry:
 
     def remove(self, resource_name, key, cleanup_pretrigger=None):
 
-        _logger.debug("CacheRegistry.remove(%s,%s,%s)" % 
-                      (resource_name, key, type(cleanup_pretrigger)))
+        _logger.debug(
+            "CacheRegistry.remove(%s,%s,%s)"
+            % (resource_name, key, type(cleanup_pretrigger))
+        )
 
         with CacheRegistry.__rlock:
             old_tuple = self.__cache[resource_name][key]
 
             self.__cleanup_entry(
-                resource_name, 
-                key, 
-                True, 
-                cleanup_pretrigger=cleanup_pretrigger)
+                resource_name, key, True, cleanup_pretrigger=cleanup_pretrigger
+            )
 
         return old_tuple[0]
 
     def get(self, resource_name, key, max_age, cleanup_pretrigger=None):
-        
-        trigger_given_phrase = ('None' 
-                                if cleanup_pretrigger == None 
-                                else '<given>')
+
+        trigger_given_phrase = "None" if cleanup_pretrigger == None else "<given>"
 
-        _logger.debug("CacheRegistry.get(%s,%s,%s,%s)" % 
-                      (resource_name, key, max_age, trigger_given_phrase))
+        _logger.debug(
+            "CacheRegistry.get(%s,%s,%s,%s)"
+            % (resource_name, key, max_age, trigger_given_phrase)
+        )
 
         with CacheRegistry.__rlock:
             try:

          
@@ 78,37 77,44 @@ class CacheRegistry:
             except:
                 raise CacheFault("NonExist")
 
-            if max_age != None and \
-               (datetime.now() - timestamp).seconds > max_age:
-                self.__cleanup_entry(resource_name, key, False, 
-                                     cleanup_pretrigger=cleanup_pretrigger)
+            if max_age != None and (datetime.now() - timestamp).seconds > max_age:
+                self.__cleanup_entry(
+                    resource_name, key, False, cleanup_pretrigger=cleanup_pretrigger
+                )
                 raise CacheFault("Stale")
 
         return value
 
     def list_raw(self, resource_name):
-        
+
         _logger.debug("CacheRegistry.list(%s)" % (resource_name))
 
         with CacheRegistry.__rlock:
             return self.__cache[resource_name]
 
-    def exists(self, resource_name, key, max_age, cleanup_pretrigger=None, 
-               no_fault_check=False):
+    def exists(
+        self, resource_name, key, max_age, cleanup_pretrigger=None, no_fault_check=False
+    ):
 
-        _logger.debug("CacheRegistry.exists(%s,%s,%s,%s)" % 
-                      (resource_name, key, max_age, cleanup_pretrigger))
-        
+        _logger.debug(
+            "CacheRegistry.exists(%s,%s,%s,%s)"
+            % (resource_name, key, max_age, cleanup_pretrigger)
+        )
+
         with CacheRegistry.__rlock:
             try:
                 (value, timestamp) = self.__cache[resource_name][key]
             except:
                 return False
 
-            if max_age is not None and not no_fault_check and \
-                    (datetime.now() - timestamp).seconds > max_age:
-                self.__cleanup_entry(resource_name, key, False, 
-                                     cleanup_pretrigger=cleanup_pretrigger)
+            if (
+                max_age is not None
+                and not no_fault_check
+                and (datetime.now() - timestamp).seconds > max_age
+            ):
+                self.__cleanup_entry(
+                    resource_name, key, False, cleanup_pretrigger=cleanup_pretrigger
+                )
                 return False
 
         return True

          
@@ 117,15 123,18 @@ class CacheRegistry:
 
         return len(self.__cache[resource_name])
 
-    def __cleanup_entry(self, resource_name, key, force, 
-                        cleanup_pretrigger=None):
+    def __cleanup_entry(self, resource_name, key, force, cleanup_pretrigger=None):
 
-        _logger.debug("Doing clean-up for resource_name [%s] and key "
-                      "[%s]." % (resource_name, key))
+        _logger.debug(
+            "Doing clean-up for resource_name [%s] and key "
+            "[%s]." % (resource_name, key)
+        )
 
         if cleanup_pretrigger is not None:
-            _logger.debug("Running pre-cleanup trigger for resource_name "
-                          "[%s] and key [%s]." % (resource_name, key))
+            _logger.debug(
+                "Running pre-cleanup trigger for resource_name "
+                "[%s] and key [%s]." % (resource_name, key)
+            )
 
             cleanup_pretrigger(resource_name, key, force)
 

          
M gdrivefs/cacheclient_base.py +17 -19
@@ 5,19 5,15 @@ import logging
 
 
 class CacheClientBase:
-    """Meant to be inherited by a class. Is used to configure a particular 
+    """Meant to be inherited by a class. Is used to configure a particular
     namespace within the cache.
     """
 
-
-
-# TODO(dustin): This is a terrible object, and needs to be refactored. It 
-#               doesn't provide any way to cleanup itself or CacheAgent, or any 
-#               way to invoke a singleton of CacheAgent whose thread we can 
-#               easier start or stop. Since this larger *wraps* CacheAgent, we 
-#               might just dispose of it.
-
-
+    # TODO(dustin): This is a terrible object, and needs to be refactored. It
+    #               doesn't provide any way to cleanup itself or CacheAgent, or any
+    #               way to invoke a singleton of CacheAgent whose thread we can
+    #               easier start or stop. Since this larger *wraps* CacheAgent, we
+    #               might just dispose of it.
 
     @property
     def cache(self):

          
@@ 26,16 22,19 @@ class CacheClientBase:
         except:
             pass
 
-        self.__cache = CacheAgent(self.child_type, self.max_age, 
-                                 fault_handler=self.fault_handler, 
-                                 cleanup_pretrigger=self.cleanup_pretrigger)
+        self.__cache = CacheAgent(
+            self.child_type,
+            self.max_age,
+            fault_handler=self.fault_handler,
+            cleanup_pretrigger=self.cleanup_pretrigger,
+        )
 
         return self.__cache
 
     def __init__(self):
         child_type = self.__class__.__bases__[0].__name__
         max_age = self.get_max_cache_age_seconds()
-        
+
         _logger.debug("CacheClientBase(%s,%s)" % (child_type, max_age))
 
         self.child_type = child_type

          
@@ 56,8 55,9 @@ class CacheClientBase:
         pass
 
     def get_max_cache_age_seconds(self):
-        raise NotImplementedError("get_max_cache_age() must be implemented in "
-                                  "the CacheClientBase child.")
+        raise NotImplementedError(
+            "get_max_cache_age() must be implemented in " "the CacheClientBase child."
+        )
 
     @classmethod
     def get_instance(cls):

          
@@ 70,12 70,10 @@ class CacheClientBase:
         try:
             CacheClientBase.__instances
         except:
-            CacheClientBase.__instances = { }
+            CacheClientBase.__instances = {}
 
         try:
             return CacheClientBase.__instances[class_name]
         except:
             CacheClientBase.__instances[class_name] = cls()
             return CacheClientBase.__instances[class_name]
-
-

          
M gdrivefs/change.py +54 -35
@@ 14,8 14,7 @@ import time
 class _ChangeManager:
     def __init__(self):
         self.at_change_id = AccountInfo.get_instance().largest_change_id
-        _logger.debug("Latest change-ID at startup is (%d)." % 
-                      (self.at_change_id))
+        _logger.debug("Latest change-ID at startup is (%d)." % (self.at_change_id))
 
         self.__t = None
         self.__t_quit_ev = threading.Event()

          
@@ 33,30 32,35 @@ class _ChangeManager:
     def __check_changes(self):
         _logger.info("Change-processing thread running.")
 
-        interval_s = Conf.get('change_check_frequency_s')
+        interval_s = Conf.get("change_check_frequency_s")
         cm = get_change_manager()
 
-        while self.__t_quit_ev.is_set() is False and \
-                gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False:
+        while (
+            self.__t_quit_ev.is_set() is False
+            and gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False
+        ):
             _logger.debug("Checking for changes.")
 
             try:
                 is_done = cm.process_updates()
             except:
-                _logger.exception("Squelching an exception that occurred "
-                                  "while reading/processing changes.")
+                _logger.exception(
+                    "Squelching an exception that occurred "
+                    "while reading/processing changes."
+                )
 
                 # Force another check, soon.
                 is_done = False
 
-            # If there are still more changes, take them as quickly as 
+            # If there are still more changes, take them as quickly as
             # possible.
             if is_done is True:
                 _logger.debug("No more changes. Waiting.")
                 time.sleep(interval_s)
             else:
-                _logger.debug("There are more changes to be applied. Cycling "
-                              "immediately.")
+                _logger.debug(
+                    "There are more changes to be applied. Cycling " "immediately."
+                )
 
         _logger.info("Change-processing thread terminating.")
 

          
@@ 76,64 80,76 @@ class _ChangeManager:
         """Process any changes to our files. Return True if everything is up to
         date or False if we need to be run again.
         """
-# TODO(dustin): Reimplement using the "watch" interface. We'll have to find 
-#               more documentation:
-#
-#               https://developers.google.com/drive/v2/reference/changes/watch
-#
-        start_at_id = (self.at_change_id + 1)
+        # TODO(dustin): Reimplement using the "watch" interface. We'll have to find
+        #               more documentation:
+        #
+        #               https://developers.google.com/drive/v2/reference/changes/watch
+        #
+        start_at_id = self.at_change_id + 1
 
         gd = get_gdrive()
         result = gd.list_changes(start_change_id=start_at_id)
 
         (largest_change_id, next_page_token, changes) = result
 
-        _logger.debug("The latest reported change-ID is (%d) and we're "
-                      "currently at change-ID (%d).",
-                      largest_change_id, self.at_change_id)
+        _logger.debug(
+            "The latest reported change-ID is (%d) and we're "
+            "currently at change-ID (%d).",
+            largest_change_id,
+            self.at_change_id,
+        )
 
         _logger.info("(%d) changes will now be applied." % (len(changes)))
 
         for change_id, change_tuple in changes:
-            # Apply the changes. We expect to be running them from oldest to 
+            # Apply the changes. We expect to be running them from oldest to
             # newest.
 
-            _logger.info("========== Change with ID (%d) will now be applied. ==========" %
-                            (change_id))
+            _logger.info(
+                "========== Change with ID (%d) will now be applied. =========="
+                % (change_id)
+            )
 
             try:
                 self.__apply_change(change_id, change_tuple)
             except:
-                _logger.exception("There was a problem while processing change"
-                                  " with ID (%d). No more changes will be "
-                                  "applied." % (change_id))
+                _logger.exception(
+                    "There was a problem while processing change"
+                    " with ID (%d). No more changes will be "
+                    "applied." % (change_id)
+                )
                 return False
 
             self.at_change_id = change_id
 
-        return (next_page_token is None)
+        return next_page_token is None
 
     def __apply_change(self, change_id, change_tuple):
-        """Apply changes to our filesystem reported by GD. All we do is remove 
-        the current record components, if it's valid, and then reload it with 
+        """Apply changes to our filesystem reported by GD. All we do is remove
+        the current record components, if it's valid, and then reload it with
         what we were given. Note that since we don't necessarily know
         about the entries that have been changed, this also allows us to slowly
-        increase our knowledge of the filesystem (of, obviously, only those 
+        increase our knowledge of the filesystem (of, obviously, only those
         things that change).
         """
 
         (entry_id, was_deleted, entry) = change_tuple
-        
+
         is_visible = entry.is_visible if entry else None
 
-        _logger.info("Applying change with change-ID (%d), entry-ID [%s], "
-                     "and is-visible of [%s]",
-                     change_id, entry_id, is_visible)
+        _logger.info(
+            "Applying change with change-ID (%d), entry-ID [%s], "
+            "and is-visible of [%s]",
+            change_id,
+            entry_id,
+            is_visible,
+        )
 
         # First, remove any current knowledge from the system.
 
-        _logger.debug("Removing all trace of entry with ID [%s] "
-                      "(apply_change).", entry_id)
+        _logger.debug(
+            "Removing all trace of entry with ID [%s] " "(apply_change).", entry_id
+        )
 
         PathRelations.get_instance().remove_entry_all(entry_id)
 

          
@@ 145,7 161,10 @@ class _ChangeManager:
             path_relations = PathRelations.get_instance()
             path_relations.register_entry(entry)
 
+
 _instance = None
+
+
 def get_change_manager():
     global _instance
 

          
M gdrivefs/chunked_download.py +53 -42
@@ 4,6 4,7 @@ import gdrivefs.config
 import logging
 import random
 import time
+
 try:
     from oauth2client import util
 except ImportError:

          
@@ 17,7 18,7 @@ DEFAULT_CHUNK_SIZE = 1024 * 512
 
 
 class ChunkedDownload:
-    """"Download an entry, chunk by chunk. This code is mostly identical to
+    """ "Download an entry, chunk by chunk. This code is mostly identical to
     MediaIoBaseDownload, which couldn't be used because we have a specific URL
     that needs to be downloaded (not a request object, which doesn't apply here).
     """

          
@@ 67,20 68,25 @@ class ChunkedDownload:
         """
 
         headers = {
-            'range': 'bytes=%d-%d' % (
-                self._progress, self._progress + self._chunksize)
-            }
+            "range": "bytes=%d-%d" % (self._progress, self._progress + self._chunksize)
+        }
 
         for retry_num in range(num_retries + 1):
-            _logger.debug("Attempting to read chunk. ATTEMPT=(%d)/(%d)", 
-                          retry_num + 1, num_retries + 1)
+            _logger.debug(
+                "Attempting to read chunk. ATTEMPT=(%d)/(%d)",
+                retry_num + 1,
+                num_retries + 1,
+            )
 
             resp, content = self._http.request(self._uri, headers=headers)
             if retry_num > 0:
-                self._sleep(self._rand() * 2**retry_num)
-                _logger.warning("Retry #%d for media download: GET %s, "
-                                "following status: %d", 
-                                retry_num, self._uri, resp.status)
+                self._sleep(self._rand() * 2 ** retry_num)
+                _logger.warning(
+                    "Retry #%d for media download: GET %s, " "following status: %d",
+                    retry_num,
+                    self._uri,
+                    resp.status,
+                )
 
             if resp.status < 500:
                 break

          
@@ 89,8 95,8 @@ class ChunkedDownload:
 
         if resp.status in [200, 206]:
             try:
-                if resp['content-location'] != self._uri:
-                    self._uri = resp['content-location']
+                if resp["content-location"] != self._uri:
+                    self._uri = resp["content-location"]
             except KeyError:
                 pass
 

          
@@ 98,55 104,60 @@ class ChunkedDownload:
             self._progress += received_size_b
             self._fd.write(content)
 
-            # This seems to be the most correct method to get the filesize, but 
+            # This seems to be the most correct method to get the filesize, but
             # we've seen it not exist.
-            if 'content-range' in resp:
+            if "content-range" in resp:
                 if self._total_size is None:
-                    content_range = resp['content-range']
-                    length = content_range.rsplit('/', 1)[1]
+                    content_range = resp["content-range"]
+                    length = content_range.rsplit("/", 1)[1]
                     length = int(length)
 
                     self._total_size = length
 
-                    _logger.debug("Received download size (content-range): "
-                                  "(%d)", self._total_size)
+                    _logger.debug(
+                        "Received download size (content-range): " "(%d)",
+                        self._total_size,
+                    )
 
             # There's a chance that "content-range" will be omitted for zero-
-            # length files (or maybe files that are complete within the first 
+            # length files (or maybe files that are complete within the first
             # chunk).
 
             else:
-# TODO(dustin): Is this a valid assumption, or should it be an error?
-                _logger.warning("No 'content-range' found in response. "
-                                "Assuming that we've received all data.")
+                # TODO(dustin): Is this a valid assumption, or should it be an error?
+                _logger.warning(
+                    "No 'content-range' found in response. "
+                    "Assuming that we've received all data."
+                )
 
                 self._total_size = received_size_b
 
-# TODO(dustin): We were using this for a while, but it appears to be no larger 
-#               then a single chunk.
-#
-#            # This method doesn't seem documented, but we've seen cases where 
-#            # this is available, but "content-range" isn't.
-#            if 'content-length' in resp:
-#                self._total_size = int(resp['content-length'])
-#
-#                _logger.debug("Received download size (content-length): "
-#                              "(%d)", self._total_size)
+            # TODO(dustin): We were using this for a while, but it appears to be no larger
+            #               then a single chunk.
+            #
+            #            # This method doesn't seem documented, but we've seen cases where
+            #            # this is available, but "content-range" isn't.
+            #            if 'content-length' in resp:
+            #                self._total_size = int(resp['content-length'])
+            #
+            #                _logger.debug("Received download size (content-length): "
+            #                              "(%d)", self._total_size)
 
+            assert self._total_size is not None, "File-size was not provided."
 
-            assert self._total_size is not None, \
-                   "File-size was not provided."
-
-            _logger.debug("Checking if done. PROGRESS=(%d) TOTAL-SIZE=(%d)", 
-                          self._progress, self._total_size)
+            _logger.debug(
+                "Checking if done. PROGRESS=(%d) TOTAL-SIZE=(%d)",
+                self._progress,
+                self._total_size,
+            )
 
             if self._progress == self._total_size:
                 self._done = True
 
-            return (apiclient.http.MediaDownloadProgress(
-                        self._progress, 
-                        self._total_size), \
-                    self._done, \
-                    self._total_size)
+            return (
+                apiclient.http.MediaDownloadProgress(self._progress, self._total_size),
+                self._done,
+                self._total_size,
+            )
         else:
             raise apiclient.errors.HttpError(resp, content, uri=self._uri)

          
M gdrivefs/conf.py +29 -27
@@ 11,36 11,38 @@ class Conf:
     """Manages options."""
 
     api_credentials = {
-        "web": { "client_id": "1004122597540-ne2btnejcbr319ukdh9soke1rrldl27f.apps.googleusercontent.com",
-                 "client_secret": "TwkunuaxFi9IMs218VkJEkCX",
-                 "redirect_uris": [],
-                 "auth_uri": "https://accounts.google.com/o/oauth2/auth",
-                 "token_uri": "https://accounts.google.com/o/oauth2/token"
-               }}
+        "web": {
+            "client_id": "1004122597540-ne2btnejcbr319ukdh9soke1rrldl27f.apps.googleusercontent.com",
+            "client_secret": "TwkunuaxFi9IMs218VkJEkCX",
+            "redirect_uris": [],
+            "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+            "token_uri": "https://accounts.google.com/o/oauth2/token",
+        }
+    }
 
-    auth_cache_filepath                 = None
-#    gd_to_normal_mapping_filepath       = '/etc/gdfs/mime_mapping.json'
-    extension_mapping_filepath          = '/etc/gdfs/extension_mapping.json'
-    query_decay_intermed_prefix_length  = 7
-    file_jobthread_max_idle_time        = 60
-    file_chunk_size_kb                  = 1024
-    file_download_temp_max_age_s        = 86400
-    change_check_frequency_s            = 3
-    hidden_flags_list_local             = ['trashed', 'restricted']
-    hidden_flags_list_remote            = ['trashed']
-    cache_cleanup_check_frequency_s     = 60
-    cache_entries_max_age               = 8 * 60 * 60
-    cache_status_post_frequency_s       = 10
+    auth_cache_filepath = None
+    #    gd_to_normal_mapping_filepath       = '/etc/gdfs/mime_mapping.json'
+    extension_mapping_filepath = "/etc/gdfs/extension_mapping.json"
+    query_decay_intermed_prefix_length = 7
+    file_jobthread_max_idle_time = 60
+    file_chunk_size_kb = 1024
+    file_download_temp_max_age_s = 86400
+    change_check_frequency_s = 3
+    hidden_flags_list_local = ["trashed", "restricted"]
+    hidden_flags_list_remote = ["trashed"]
+    cache_cleanup_check_frequency_s = 60
+    cache_entries_max_age = 8 * 60 * 60
+    cache_status_post_frequency_s = 10
 
-# Deimplementing report functionality.
-#    report_emit_frequency_s             = 60
+    # Deimplementing report functionality.
+    #    report_emit_frequency_s             = 60
 
-    google_discovery_service_url        = DISCOVERY_URI
-    default_buffer_read_blocksize       = 65536
-    directory_mimetype                  = 'application/vnd.google-apps.folder'
-    default_perm_folder                 = '777'
-    default_perm_file_editable          = '666'
-    default_perm_file_noneditable       = '444'
+    google_discovery_service_url = DISCOVERY_URI
+    default_buffer_read_blocksize = 65536
+    directory_mimetype = "application/vnd.google-apps.folder"
+    default_perm_folder = "777"
+    default_perm_file_editable = "666"
+    default_perm_file_noneditable = "444"
 
     # How many extra entries to retrieve when an entry is accessed that is not
     # currently cached.

          
M gdrivefs/config/__init__.py +5 -5
@@ 1,7 1,7 @@ 
 import os
 
-IS_DEBUG = bool(int(os.environ.get('GD_DEBUG', '0')))
-NO_THREADS = bool(int(os.environ.get('GD_NOTHREADS', '0')))
-DO_LOG_FUSE_MESSAGES = bool(int(os.environ.get('GD_DO_LOG_FUSE_MESSAGES', '0')))
-DEFAULT_CREDENTIALS_FILEPATH = os.path.expandvars('$HOME/.gdfs/creds')
-DEFAULT_RETRIES = int(os.environ.get('GD_RETRIES', '3'))
+IS_DEBUG = bool(int(os.environ.get("GD_DEBUG", "0")))
+NO_THREADS = bool(int(os.environ.get("GD_NOTHREADS", "0")))
+DO_LOG_FUSE_MESSAGES = bool(int(os.environ.get("GD_DO_LOG_FUSE_MESSAGES", "0")))
+DEFAULT_CREDENTIALS_FILEPATH = os.path.expandvars("$HOME/.gdfs/creds")
+DEFAULT_RETRIES = int(os.environ.get("GD_RETRIES", "3"))

          
M gdrivefs/config/changes.py +1 -1
@@ 1,3 1,3 @@ 
 import os
 
-MONITOR_CHANGES = bool(int(os.environ.get('GD_MONITOR_CHANGES', '1')))
+MONITOR_CHANGES = bool(int(os.environ.get("GD_MONITOR_CHANGES", "1")))

          
M gdrivefs/config/log.py +9 -4
@@ 5,6 5,7 @@ import gdrivefs.config
 
 logger = logging.getLogger()
 
+
 def configure(is_debug=gdrivefs.config.IS_DEBUG):
     if is_debug:
         logger.setLevel(logging.DEBUG)

          
@@ 14,20 15,24 @@ def configure(is_debug=gdrivefs.config.I
     def _configure_syslog():
         facility = logging.handlers.SysLogHandler.LOG_LOCAL0
         sh = logging.handlers.SysLogHandler(facility=facility)
-        formatter = logging.Formatter('GD: %(name)-12s %(levelname)-7s %(message)s')
+        formatter = logging.Formatter("GD: %(name)-12s %(levelname)-7s %(message)s")
         sh.setFormatter(formatter)
         logger.addHandler(sh)
 
     def _configure_file():
-        filepath = os.environ.get('GD_LOG_FILEPATH', '/tmp/gdrivefs.log')
+        filepath = os.environ.get("GD_LOG_FILEPATH", "/tmp/gdrivefs.log")
         fh = logging.FileHandler(filepath)
-        formatter = logging.Formatter('%(asctime)s [%(name)s %(levelname)s] %(message)s')
+        formatter = logging.Formatter(
+            "%(asctime)s [%(name)s %(levelname)s] %(message)s"
+        )
         fh.setFormatter(formatter)
         logger.addHandler(fh)
 
     def _configure_console():
         sh = logging.StreamHandler()
-        formatter = logging.Formatter('%(asctime)s [%(name)s %(levelname)s] %(message)s')
+        formatter = logging.Formatter(
+            "%(asctime)s [%(name)s %(levelname)s] %(message)s"
+        )
         sh.setFormatter(formatter)
         logger.addHandler(sh)
 

          
M gdrivefs/constants.py +1 -2
@@ 1,2 1,1 @@ 
-OCTET_STREAM_MIMETYPE = 'application/octet-stream'
-
+OCTET_STREAM_MIMETYPE = "application/octet-stream"

          
M gdrivefs/displaced_file.py +27 -24
@@ 13,8 13,9 @@ class DisplacedFile:
     file_size = 1000
 
     def __init__(self, normalized_entry):
-        assert issubclass(normalized_entry.__class__, NormalEntry) is True, \
-               "DisplacedFile can not wrap a non-NormalEntry object."
+        assert (
+            issubclass(normalized_entry.__class__, NormalEntry) is True
+        ), "DisplacedFile can not wrap a non-NormalEntry object."
 
         self.__normalized_entry = normalized_entry
         self.__filepath = tempfile.NamedTemporaryFile(delete=False).name

          
@@ 23,48 24,50 @@ class DisplacedFile:
         os.unlink(self.__filepath)
 
     def deposit_file(self, mime_type):
-        """Write the file to a temporary path, and present a stub (JSON) to the 
-        user. This is the only way of getting files that don't have a 
+        """Write the file to a temporary path, and present a stub (JSON) to the
+        user. This is the only way of getting files that don't have a
         well-defined filesize without providing a type, ahead of time.
         """
 
         gd = get_gdrive()
 
         result = gd.download_to_local(
-                    self.__filepath, 
-                    self.__normalized_entry,
-                    mime_type)
+            self.__filepath, self.__normalized_entry, mime_type
+        )
 
         (length, cache_fault) = result
 
-        _logger.debug("Displaced entry [%s] deposited to [%s] with length "
-                      "(%d).", self.__normalized_entry, self.__filepath, length)
+        _logger.debug(
+            "Displaced entry [%s] deposited to [%s] with length " "(%d).",
+            self.__normalized_entry,
+            self.__filepath,
+            length,
+        )
 
         return self.get_stub(mime_type, length, self.__filepath)
 
     def get_stub(self, mime_type, file_size=0, file_path=None):
         """Return the content for an info ("stub") file."""
 
-        if file_size == 0 and \
-           self.__normalized_entry.requires_displaceable is False:
+        if file_size == 0 and self.__normalized_entry.requires_displaceable is False:
             file_size = self.__normalized_entry.file_size
 
         stub_data = {
-                'EntryId':              self.__normalized_entry.id,
-                'OriginalMimeType':     self.__normalized_entry.mime_type,
-                'ExportTypes':          self.__normalized_entry.download_types,
-                'Title':                self.__normalized_entry.title,
-                'Labels':               self.__normalized_entry.labels,
-                'FinalMimeType':        mime_type,
-                'Length':               file_size,
-                'RequiresMimeType':     self.__normalized_entry.requires_mimetype,
-                'ImageMediaMetadata':   self.__normalized_entry.image_media_metadata
-            }
+            "EntryId": self.__normalized_entry.id,
+            "OriginalMimeType": self.__normalized_entry.mime_type,
+            "ExportTypes": self.__normalized_entry.download_types,
+            "Title": self.__normalized_entry.title,
+            "Labels": self.__normalized_entry.labels,
+            "FinalMimeType": mime_type,
+            "Length": file_size,
+            "RequiresMimeType": self.__normalized_entry.requires_mimetype,
+            "ImageMediaMetadata": self.__normalized_entry.image_media_metadata,
+        }
 
         if file_path:
-            stub_data['FilePath'] = file_path
+            stub_data["FilePath"] = file_path
 
             result = json.dumps(stub_data)
-            padding = (' ' * (self.file_size - len(result) - 1))
+            padding = " " * (self.file_size - len(result) - 1)
 
-            return ("%s%s\n" % (result, padding))
+            return "%s%s\n" % (result, padding)

          
M gdrivefs/drive.py +371 -298
@@ 24,58 24,68 @@ import time
 
 httplib2shim.patch()
 
-_CONF_SERVICE_NAME = 'drive'
-_CONF_SERVICE_VERSION = 'v2'
+_CONF_SERVICE_NAME = "drive"
+_CONF_SERVICE_VERSION = "v2"
 
 _MAX_EMPTY_CHUNKS = 3
 _DEFAULT_UPLOAD_CHUNK_SIZE_B = 1024 * 1024
 
-logging.getLogger('apiclient.discovery').setLevel(logging.WARNING)
+logging.getLogger("apiclient.discovery").setLevel(logging.WARNING)
 
 _logger = logging.getLogger(__name__)
 
+
 def _marshall(f):
-    """A method wrapper that will reauth and/or reattempt where reasonable.
-    """
+    """A method wrapper that will reauth and/or reattempt where reasonable."""
 
     auto_refresh = True
 
     @functools.wraps(f)
     def wrapper(*args, **kwargs):
-        # Now, try to invoke the mechanism. If we succeed, return 
-        # immediately. If we get an authorization-fault (a resolvable 
-        # authorization problem), fall through and attempt to fix it. Allow 
+        # Now, try to invoke the mechanism. If we succeed, return
+        # immediately. If we get an authorization-fault (a resolvable
+        # authorization problem), fall through and attempt to fix it. Allow
         # any other error to bubble up.
-        
+
         for n in range(0, 5):
             try:
                 return f(*args, **kwargs)
             except (ssl.SSLError, http.client.BadStatusLine) as e:
                 # These happen sporadically. Use backoff.
-                _logger.exception("There was a transient connection "
-                                  "error (%s). Trying again [%s]: %s",
-                                  e.__class__.__name__, str(e), n)
+                _logger.exception(
+                    "There was a transient connection "
+                    "error (%s). Trying again [%s]: %s",
+                    e.__class__.__name__,
+                    str(e),
+                    n,
+                )
 
                 time.sleep((2 ** n) + random.randint(0, 1000) / 1000)
             except apiclient.errors.HttpError as e:
-                if e.content == '':
+                if e.content == "":
                     raise
 
                 try:
                     error = json.loads(e.content)
                 except ValueError:
-                    _logger.error("Non-JSON error while doing chunked "
-                                  "download: [%s]", e.content) 
+                    _logger.error(
+                        "Non-JSON error while doing chunked " "download: [%s]",
+                        e.content,
+                    )
                     raise e
 
-                if error.get('code') == 403 and \
-                   error.get('errors')[0].get('reason') \
-                        in ['rateLimitExceeded', 'userRateLimitExceeded']:
+                if error.get("code") == 403 and error.get("errors")[0].get(
+                    "reason"
+                ) in ["rateLimitExceeded", "userRateLimitExceeded"]:
                     # Apply exponential backoff.
-                    _logger.exception("There was a transient HTTP "
-                                      "error (%s). Trying again (%d): "
-                                      "%s",
-                                      e.__class__.__name__, str(e), n)
+                    _logger.exception(
+                        "There was a transient HTTP "
+                        "error (%s). Trying again (%d): "
+                        "%s",
+                        e.__class__.__name__,
+                        str(e),
+                        n,
+                    )
 
                     time.sleep((2 ** n) + random.randint(0, 1000) / 1000)
                 else:

          
@@ 89,16 99,20 @@ def _marshall(f):
 
                 # We had a resolvable authorization problem.
 
-                _logger.info("There was an authorization fault under "
-                             "action [%s]. Attempting refresh.", n)
-                
+                _logger.info(
+                    "There was an authorization fault under "
+                    "action [%s]. Attempting refresh.",
+                    n,
+                )
+
                 authorize = gdrivefs.oauth_authorize.get_auth()
                 authorize.check_credential_state()
 
                 # Re-attempt the action.
 
-                _logger.info("Refresh seemed successful. Reattempting "
-                             "action [%s].", n)
+                _logger.info(
+                    "Refresh seemed successful. Reattempting " "action [%s].", n
+                )
 
     return wrapper
 

          
@@ 117,7 131,7 @@ class GdriveAuth:
         if self.__http is None:
             self.__check_authorization()
             _logger.debug("Getting authorized HTTP tunnel.")
-                
+
             http = httplib2shim.Http()
             self.__credentials.authorize(http)
 

          
@@ 130,33 144,35 @@ class GdriveAuth:
     def get_client(self):
         if self.__client is None:
             authed_http = self.get_authed_http()
-        
+
             # Build a client from the passed discovery document path
-            
-            discoveryUrl = \
-                gdrivefs.conf.Conf.get('google_discovery_service_url')
-# TODO: We should cache this, since we have, so often, had a problem 
-#       retrieving it. If there's no other way, grab it directly, and then pass
-#       via a file:// URI.
-        
+
+            discoveryUrl = gdrivefs.conf.Conf.get("google_discovery_service_url")
+            # TODO: We should cache this, since we have, so often, had a problem
+            #       retrieving it. If there's no other way, grab it directly, and then pass
+            #       via a file:// URI.
+
             try:
-                client = \
-                    apiclient.discovery.build(
-                        _CONF_SERVICE_NAME, 
-                        _CONF_SERVICE_VERSION, 
-                        http=authed_http, 
-                        discoveryServiceUrl=discoveryUrl)
+                client = apiclient.discovery.build(
+                    _CONF_SERVICE_NAME,
+                    _CONF_SERVICE_VERSION,
+                    http=authed_http,
+                    discoveryServiceUrl=discoveryUrl,
+                )
             except apiclient.errors.HttpError as e:
                 # We've seen situations where the discovery URL's server is down,
                 # with an alternate one to be used.
                 #
-                # An error here shouldn't leave GDFS in an unstable state (the 
-                # current command should just fail). Hoepfully, the failure is 
+                # An error here shouldn't leave GDFS in an unstable state (the
+                # current command should just fail). Hoepfully, the failure is
                 # momentary, and the next command succeeds.
 
-                _logger.exception("There was an HTTP response-code of (%d) while "
-                                  "building the client with discovery URL [%s].",
-                                  e.resp.status, discoveryUrl)
+                _logger.exception(
+                    "There was an HTTP response-code of (%d) while "
+                    "building the client with discovery URL [%s].",
+                    e.resp.status,
+                    discoveryUrl,
+                )
                 raise
 
             self.__client = client

          
@@ 166,7 182,7 @@ class GdriveAuth:
 
 class _GdriveManager:
     """Handles all basic communication with Google Drive. All methods should
-    try to invoke only one call, or make sure they handle authentication 
+    try to invoke only one call, or make sure they handle authentication
     refreshing when necessary.
     """
 

          
@@ 174,10 190,12 @@ class _GdriveManager:
         self.__auth = GdriveAuth()
 
     def __assert_response_kind(self, response, expected_kind):
-        actual_kind = response['kind']
+        actual_kind = response["kind"]
         if actual_kind != str(expected_kind):
-            raise ValueError("Received response of type [%s] instead of "
-                             "[%s]." % (actual_kind, expected_kind))
+            raise ValueError(
+                "Received response of type [%s] instead of "
+                "[%s]." % (actual_kind, expected_kind)
+            )
 
     @_marshall
     def get_about_info(self):

          
@@ 185,59 203,59 @@ class _GdriveManager:
 
         client = self.__auth.get_client()
         response = client.about().get().execute()
-        self.__assert_response_kind(response, 'drive#about')
+        self.__assert_response_kind(response, "drive#about")
 
         return response
 
     @_marshall
     def list_changes(self, start_change_id=None, page_token=None):
-        """Get a list of the most recent changes from GD, with the earliest 
-        changes first. This only returns one page at a time. start_change_id 
-        doesn't have to be valid.. It's just the lower limit to what you want 
+        """Get a list of the most recent changes from GD, with the earliest
+        changes first. This only returns one page at a time. start_change_id
+        doesn't have to be valid.. It's just the lower limit to what you want
         back. Change-IDs are integers, but are not necessarily sequential.
         """
 
         client = self.__auth.get_client()
 
-        response = client.changes().list(
-                    pageToken=page_token, 
-                    startChangeId=start_change_id).execute()
+        response = (
+            client.changes()
+            .list(pageToken=page_token, startChangeId=start_change_id)
+            .execute()
+        )
 
-        self.__assert_response_kind(response, 'drive#changeList')
+        self.__assert_response_kind(response, "drive#changeList")
 
-        items = response['items']
+        items = response["items"]
 
         if items:
             _logger.debug("We received (%d) changes to apply.", len(items))
 
-        largest_change_id = int(response['largestChangeId'])
-        next_page_token = response.get('nextPageToken')
+        largest_change_id = int(response["largestChangeId"])
+        next_page_token = response.get("nextPageToken")
 
         changes = []
         # last_change_id = None
         for item in items:
-            change_id = int(item['id'])
-            entry_id = item['fileId']
+            change_id = int(item["id"])
+            entry_id = item["fileId"]
 
-            if item['deleted']:
+            if item["deleted"]:
                 was_deleted = True
                 entry = None
 
                 _logger.debug("CHANGE: [%s] (DELETED)", entry_id)
             else:
                 was_deleted = False
-                entry = item['file']
+                entry = item["file"]
 
-                _logger.debug("CHANGE: [%s] [%s] (UPDATED)", 
-                              entry_id, entry['title'])
+                _logger.debug("CHANGE: [%s] [%s] (UPDATED)", entry_id, entry["title"])
 
             if was_deleted:
                 normalized_entry = None
             else:
-                normalized_entry = \
-                    gdrivefs.normal_entry.NormalEntry(
-                        'list_changes', 
-                        entry)
+                normalized_entry = gdrivefs.normal_entry.NormalEntry(
+                    "list_changes", entry
+                )
 
             changes.append((change_id, (entry_id, was_deleted, normalized_entry)))
             # last_change_id = change_id

          
@@ 246,7 264,7 @@ class _GdriveManager:
 
     @_marshall
     def get_parents_containing_id(self, child_id, max_results=None):
-        
+
         _logger.info("Getting client for parent-listing.")
 
         client = self.__auth.get_client()

          
@@ 254,51 272,60 @@ class _GdriveManager:
         _logger.info("Listing entries over child with ID [%s].", child_id)
 
         response = client.parents().list(fileId=child_id).execute()
-        self.__assert_response_kind(response, 'drive#parentList')
+        self.__assert_response_kind(response, "drive#parentList")
 
-        return [ entry['id'] for entry in response['items'] ]
+        return [entry["id"] for entry in response["items"]]
 
     @_marshall
-    def get_children_under_parent_id(self,
-                                     parent_id,
-                                     query_contains_string=None,
-                                     query_is_string=None,
-                                     max_results=None):
+    def get_children_under_parent_id(
+        self,
+        parent_id,
+        query_contains_string=None,
+        query_is_string=None,
+        max_results=None,
+    ):
 
         _logger.info("Getting client for child-listing.")
 
         client = self.__auth.get_client()
 
-        assert \
-            (query_contains_string is not None and \
-             query_is_string is not None) is False, \
-            "The query_contains_string and query_is_string parameters are "\
+        assert (
+            query_contains_string is not None and query_is_string is not None
+        ) is False, (
+            "The query_contains_string and query_is_string parameters are "
             "mutually exclusive."
+        )
 
         if query_is_string:
-            query = ("title='%s'" % 
-                     (gdrivefs.fsutility.escape_filename_for_query(query_is_string)))
+            query = "title='%s'" % (
+                gdrivefs.fsutility.escape_filename_for_query(query_is_string)
+            )
         elif query_contains_string:
-            query = ("title contains '%s'" % 
-                     (gdrivefs.fsutility.escape_filename_for_query(query_contains_string)))
+            query = "title contains '%s'" % (
+                gdrivefs.fsutility.escape_filename_for_query(query_contains_string)
+            )
         else:
             query = None
 
-        _logger.info("Listing entries under parent with ID [%s].  QUERY= "
-                     "[%s]", parent_id, query)
+        _logger.info(
+            "Listing entries under parent with ID [%s].  QUERY= " "[%s]",
+            parent_id,
+            query,
+        )
 
-        response = client.children().list(
-                    q=query, 
-                    folderId=parent_id,
-                    maxResults=max_results).execute()
+        response = (
+            client.children()
+            .list(q=query, folderId=parent_id, maxResults=max_results)
+            .execute()
+        )
 
-        self.__assert_response_kind(response, 'drive#childList')
+        self.__assert_response_kind(response, "drive#childList")
 
-        return [ entry['id'] for entry in response['items'] ]
+        return [entry["id"] for entry in response["items"]]
 
     @_marshall
     def get_entries(self, entry_ids):
-        retrieved = { }
+        retrieved = {}
         for entry_id in entry_ids:
             retrieved[entry_id] = self.get_entry(entry_id)
 

          
@@ 311,26 338,21 @@ class _GdriveManager:
         client = self.__auth.get_client()
 
         response = client.files().get(fileId=entry_id).execute()
-        self.__assert_response_kind(response, 'drive#file')
+        self.__assert_response_kind(response, "drive#file")
 
-        return \
-            gdrivefs.normal_entry.NormalEntry('direct_read', response)
+        return gdrivefs.normal_entry.NormalEntry("direct_read", response)
 
     @_marshall
-    def list_files(self, query_contains_string=None, query_is_string=None, 
-                   parent_id=None):
-        
-        _logger.info("Listing all files. CONTAINS=[%s] IS=[%s] "
-                     "PARENT_ID=[%s]",
-                     query_contains_string 
-                        if query_contains_string is not None 
-                        else '<none>', 
-                     query_is_string 
-                        if query_is_string is not None 
-                        else '<none>', 
-                     parent_id 
-                        if parent_id is not None 
-                        else '<none>')
+    def list_files(
+        self, query_contains_string=None, query_is_string=None, parent_id=None
+    ):
+
+        _logger.info(
+            "Listing all files. CONTAINS=[%s] IS=[%s] " "PARENT_ID=[%s]",
+            query_contains_string if query_contains_string is not None else "<none>",
+            query_is_string if query_is_string is not None else "<none>",
+            parent_id if parent_id is not None else "<none>",
+        )
 
         client = self.__auth.get_client()
 

          
@@ 340,96 362,120 @@ class _GdriveManager:
             query_components.append("'%s' in parents" % (parent_id))
 
         if query_is_string:
-            query_components.append("title='%s'" % 
-                                    (gdrivefs.fsutility.escape_filename_for_query(query_is_string)))
+            query_components.append(
+                "title='%s'"
+                % (gdrivefs.fsutility.escape_filename_for_query(query_is_string))
+            )
         elif query_contains_string:
-            query_components.append("title contains '%s'" % 
-                                    (gdrivefs.fsutility.escape_filename_for_query(query_contains_string)))
+            query_components.append(
+                "title contains '%s'"
+                % (gdrivefs.fsutility.escape_filename_for_query(query_contains_string))
+            )
 
         # Make sure that we don't get any entries that we would have to ignore.
 
-        hidden_flags = gdrivefs.conf.Conf.get('hidden_flags_list_remote')
+        hidden_flags = gdrivefs.conf.Conf.get("hidden_flags_list_remote")
         if hidden_flags:
             for hidden_flag in hidden_flags:
                 query_components.append("%s = false" % (hidden_flag))
 
-        query = ' and '.join(query_components) if query_components else None
+        query = " and ".join(query_components) if query_components else None
 
         page_token = None
         page_num = 0
         entries = []
         while 1:
-            _logger.debug("Doing request for listing of files with page-"
-                          "token [%s] and page-number (%d): %s",
-                          page_token, page_num, query)
+            _logger.debug(
+                "Doing request for listing of files with page-"
+                "token [%s] and page-number (%d): %s",
+                page_token,
+                page_num,
+                query,
+            )
 
-            result = client.files().list(q=query, pageToken=page_token).\
-                        execute()
-
-            self.__assert_response_kind(result, 'drive#fileList')
+            result = client.files().list(q=query, pageToken=page_token).execute()
 
-            _logger.debug("(%d) entries were presented for page-number "
-                          "(%d).", len(result['items']), page_num)
+            self.__assert_response_kind(result, "drive#fileList")
 
-            for entry_raw in result['items']:
-                entry = \
-                    gdrivefs.normal_entry.NormalEntry(
-                        'list_files', 
-                        entry_raw)
+            _logger.debug(
+                "(%d) entries were presented for page-number " "(%d).",
+                len(result["items"]),
+                page_num,
+            )
+
+            for entry_raw in result["items"]:
+                entry = gdrivefs.normal_entry.NormalEntry("list_files", entry_raw)
 
                 entries.append(entry)
 
-            if 'nextPageToken' not in result:
+            if "nextPageToken" not in result:
                 _logger.debug("No more pages in file listing.")
                 break
 
-            _logger.debug("Next page-token in file-listing is [%s].", 
-                          result['nextPageToken'])
+            _logger.debug(
+                "Next page-token in file-listing is [%s].", result["nextPageToken"]
+            )
 
-            page_token = result['nextPageToken']
+            page_token = result["nextPageToken"]
             page_num += 1
 
         return entries
 
     @_marshall
-    def download_to_local(self, output_file_path, normalized_entry, 
-                          mime_type=None, allow_cache=True):
-        """Download the given file. If we've cached a previous download and the 
-        mtime hasn't changed, re-use. The third item returned reflects whether 
+    def download_to_local(
+        self, output_file_path, normalized_entry, mime_type=None, allow_cache=True
+    ):
+        """Download the given file. If we've cached a previous download and the
+        mtime hasn't changed, re-use. The third item returned reflects whether
         the data has changed since any prior attempts.
         """
 
-        _logger.info("Downloading entry with ID [%s] and mime-type [%s] to "
-                     "[%s].", normalized_entry.id, mime_type, output_file_path)
+        _logger.info(
+            "Downloading entry with ID [%s] and mime-type [%s] to " "[%s].",
+            normalized_entry.id,
+            mime_type,
+            output_file_path,
+        )
 
         if mime_type is None:
             if normalized_entry.mime_type in normalized_entry.download_links:
                 mime_type = normalized_entry.mime_type
 
-                _logger.debug("Electing file mime-type for download: [%s]", 
-                              normalized_entry.mime_type)
-            elif gdrivefs.constants.OCTET_STREAM_MIMETYPE \
-                    in normalized_entry.download_links:
+                _logger.debug(
+                    "Electing file mime-type for download: [%s]",
+                    normalized_entry.mime_type,
+                )
+            elif (
+                gdrivefs.constants.OCTET_STREAM_MIMETYPE
+                in normalized_entry.download_links
+            ):
                 mime_type = gdrivefs.constants.OCTET_STREAM_MIMETYPE
 
                 _logger.debug("Electing octet-stream for download.")
             else:
-                raise ValueError("Could not determine what to fallback to for "
-                                 "the mimetype: {}".format(
-                                 normalized_entry.mime_type))
+                raise ValueError(
+                    "Could not determine what to fallback to for "
+                    "the mimetype: {}".format(normalized_entry.mime_type)
+                )
 
-        if mime_type != normalized_entry.mime_type and \
-                mime_type not in normalized_entry.download_links:
-            message = ("Entry with ID [%s] can not be exported to type [%s]. "
-                       "The available types are: %s" % 
-                       (normalized_entry.id, mime_type, 
-                        ', '.join(list(normalized_entry.download_links.keys()))))
+        if (
+            mime_type != normalized_entry.mime_type
+            and mime_type not in normalized_entry.download_links
+        ):
+            message = (
+                "Entry with ID [%s] can not be exported to type [%s]. "
+                "The available types are: %s"
+                % (
+                    normalized_entry.id,
+                    mime_type,
+                    ", ".join(list(normalized_entry.download_links.keys())),
+                )
+            )
 
             _logger.warning(message)
             raise gdrivefs.errors.ExportFormatError(message)
 
-        gd_mtime_epoch = time.mktime(
-                            normalized_entry.modified_date.timetuple())
+        gd_mtime_epoch = time.mktime(normalized_entry.modified_date.timetuple())
 
         _logger.info("File will be downloaded to [%s].", output_file_path)
 

          
@@ 444,8 490,9 @@ class _GdriveManager:
         if use_cache:
             # Use the cache. It's fine.
 
-            _logger.info("File retrieved from the previously downloaded, "
-                         "still-current file.")
+            _logger.info(
+                "File retrieved from the previously downloaded, " "still-current file."
+            )
 
             return (stat_info.st_size, False)
 

          
@@ 455,37 502,40 @@ class _GdriveManager:
 
         url = normalized_entry.download_links[mime_type]
 
-        with open(output_file_path, 'wb') as f:
-            downloader = gdrivefs.chunked_download.ChunkedDownload(
-                            f, 
-                            authed_http, 
-                            url)
+        with open(output_file_path, "wb") as f:
+            downloader = gdrivefs.chunked_download.ChunkedDownload(f, authed_http, url)
 
             progresses = []
 
             while 1:
                 status, done, total_size = downloader.next_chunk()
-                assert status.total_size is not None, \
-                       "total_size is None"
+                assert status.total_size is not None, "total_size is None"
 
-                _logger.debug("Read chunk: STATUS=[%s] DONE=[%s] "
-                              "TOTAL_SIZE=[%s]", status, done, total_size)
+                _logger.debug(
+                    "Read chunk: STATUS=[%s] DONE=[%s] " "TOTAL_SIZE=[%s]",
+                    status,
+                    done,
+                    total_size,
+                )
 
                 if status.total_size > 0:
                     percent = status.progress()
                 else:
                     percent = 100.0
 
-                _logger.debug("Chunk: PROGRESS=[%s] TOTAL-SIZE=[%s] "
-                              "RESUMABLE-PROGRESS=[%s]",
-                              percent, status.total_size, 
-                              status.resumable_progress)
+                _logger.debug(
+                    "Chunk: PROGRESS=[%s] TOTAL-SIZE=[%s] " "RESUMABLE-PROGRESS=[%s]",
+                    percent,
+                    status.total_size,
+                    status.resumable_progress,
+                )
 
-# TODO(dustin): This just places an arbitrary limit on the number of empty 
-#               chunks we can receive. Can we drop this to 1?
+                # TODO(dustin): This just places an arbitrary limit on the number of empty
+                #               chunks we can receive. Can we drop this to 1?
                 if len(progresses) >= _MAX_EMPTY_CHUNKS:
-                    assert percent > progresses[0], \
-                           "Too many empty chunks have been received."
+                    assert (
+                        percent > progresses[0]
+                    ), "Too many empty chunks have been received."
 
                 progresses.append(percent)
 

          
@@ 505,35 555,35 @@ class _GdriveManager:
     @_marshall
     def create_directory(self, filename, parents, **kwargs):
 
-        mimetype_directory = gdrivefs.conf.Conf.get('directory_mimetype')
+        mimetype_directory = gdrivefs.conf.Conf.get("directory_mimetype")
         return self.__insert_entry(
-                False,
-                filename, 
-                parents,
-                mimetype_directory, 
-                **kwargs)
+            False, filename, parents, mimetype_directory, **kwargs
+        )
 
     @_marshall
-    def create_file(self, filename, parents, mime_type, data_filepath=None, 
-                    **kwargs):
-# TODO: It doesn't seem as if the created file is being registered.
-        # Even though we're supposed to provide an extension, we can get away 
-        # without having one. We don't want to impose this when acting like a 
+    def create_file(self, filename, parents, mime_type, data_filepath=None, **kwargs):
+        # TODO: It doesn't seem as if the created file is being registered.
+        # Even though we're supposed to provide an extension, we can get away
+        # without having one. We don't want to impose this when acting like a
         # normal FS.
 
         return self.__insert_entry(
-                True,
-                filename,
-                parents,
-                mime_type,
-                data_filepath=data_filepath,
-                **kwargs)
+            True, filename, parents, mime_type, data_filepath=data_filepath, **kwargs
+        )
 
     @_marshall
-    def __insert_entry(self, is_file, filename, parents, mime_type, 
-                       data_filepath=None, modified_datetime=None, 
-                       accessed_datetime=None, is_hidden=False, 
-                       description=None):
+    def __insert_entry(
+        self,
+        is_file,
+        filename,
+        parents,
+        mime_type,
+        data_filepath=None,
+        modified_datetime=None,
+        accessed_datetime=None,
+        is_hidden=False,
+        description=None,
+    ):
 
         if parents is None:
             parents = []

          
@@ 541,72 591,73 @@ class _GdriveManager:
         now_phrase = gdrivefs.time_support.get_flat_normal_fs_time_from_dt()
 
         if modified_datetime is None:
-            modified_datetime = now_phrase 
-    
+            modified_datetime = now_phrase
+
         if accessed_datetime is None:
-            accessed_datetime = now_phrase 
+            accessed_datetime = now_phrase
 
-        _logger.info("Creating entry with filename [%s] under parent(s) "
-                     "[%s] with mime-type [%s]. MTIME=[%s] ATIME=[%s] "
-                     "DATA_FILEPATH=[%s]",
-                     filename, ', '.join(parents), mime_type, 
-                     modified_datetime, accessed_datetime, data_filepath)
+        _logger.info(
+            "Creating entry with filename [%s] under parent(s) "
+            "[%s] with mime-type [%s]. MTIME=[%s] ATIME=[%s] "
+            "DATA_FILEPATH=[%s]",
+            filename,
+            ", ".join(parents),
+            mime_type,
+            modified_datetime,
+            accessed_datetime,
+            data_filepath,
+        )
 
         client = self.__auth.get_client()
 
         ## Create request-body.
 
-        body = { 
-                'title': filename, 
-                'parents': [dict(id=parent) for parent in parents], 
-                'labels': { "hidden": is_hidden }, 
-                'mimeType': mime_type,
-            }
+        body = {
+            "title": filename,
+            "parents": [dict(id=parent) for parent in parents],
+            "labels": {"hidden": is_hidden},
+            "mimeType": mime_type,
+        }
 
         if description is not None:
-            body['description'] = description
+            body["description"] = description
 
         if modified_datetime is not None:
-            body['modifiedDate'] = modified_datetime
+            body["modifiedDate"] = modified_datetime
 
         if accessed_datetime is not None:
-            body['lastViewedByMeDate'] = accessed_datetime
+            body["lastViewedByMeDate"] = accessed_datetime
 
         ## Create request-arguments.
 
         args = {
-            'body': body,
+            "body": body,
         }
 
         if data_filepath:
-            args.update({
-                'media_body': 
-                    apiclient.http.MediaFileUpload(
-                        data_filepath, 
-                        mimetype=mime_type, 
+            args.update(
+                {
+                    "media_body": apiclient.http.MediaFileUpload(
+                        data_filepath,
+                        mimetype=mime_type,
                         resumable=True,
-                        chunksize=_DEFAULT_UPLOAD_CHUNK_SIZE_B),
-# TODO(dustin): Documented, but does not exist.
-#                'uploadType': 'resumable',
-            })
+                        chunksize=_DEFAULT_UPLOAD_CHUNK_SIZE_B,
+                    ),
+                    # TODO(dustin): Documented, but does not exist.
+                    #                'uploadType': 'resumable',
+                }
+            )
 
         if gdrivefs.config.IS_DEBUG is True:
-            _logger.debug("Doing file-insert with:\n%s", 
-                          pprint.pformat(args))
+            _logger.debug("Doing file-insert with:\n%s", pprint.pformat(args))
 
         request = client.files().insert(**args)
 
-        response = self.__finish_upload(
-                    filename,
-                    request,
-                    data_filepath is not None)
+        response = self.__finish_upload(filename, request, data_filepath is not None)
 
-        self.__assert_response_kind(response, 'drive#file')
+        self.__assert_response_kind(response, "drive#file")
 
-        normalized_entry = \
-            gdrivefs.normal_entry.NormalEntry(
-                'insert_entry', 
-                response)
+        normalized_entry = gdrivefs.normal_entry.NormalEntry("insert_entry", response)
 
         _logger.info("New entry created with ID [%s].", normalized_entry.id)
 

          
@@ 619,120 670,130 @@ class _GdriveManager:
 
         client = self.__auth.get_client()
 
-        file_ = \
-            apiclient.http.MediaFileUpload(
-                '/dev/null',
-                mimetype=normalized_entry.mime_type)
+        file_ = apiclient.http.MediaFileUpload(
+            "/dev/null", mimetype=normalized_entry.mime_type
+        )
 
-        args = { 
-            'fileId': normalized_entry.id, 
-# TODO(dustin): Can we omit 'body'?
-            'body': {}, 
-            'media_body': file_,
+        args = {
+            "fileId": normalized_entry.id,
+            # TODO(dustin): Can we omit 'body'?
+            "body": {},
+            "media_body": file_,
         }
 
         response = client.files().update(**args).execute()
-        self.__assert_response_kind(response, 'drive#file')
+        self.__assert_response_kind(response, "drive#file")
 
         _logger.debug("Truncate complete: [%s]", normalized_entry.id)
 
         return response
 
     @_marshall
-    def update_entry(self, normalized_entry, filename=None, data_filepath=None, 
-                     mime_type=None, parents=None, modified_datetime=None, 
-                     accessed_datetime=None, is_hidden=False, 
-                     description=None):
+    def update_entry(
+        self,
+        normalized_entry,
+        filename=None,
+        data_filepath=None,
+        mime_type=None,
+        parents=None,
+        modified_datetime=None,
+        accessed_datetime=None,
+        is_hidden=False,
+        description=None,
+    ):
 
         _logger.info("Updating entry [%s].", normalized_entry)
 
         client = self.__auth.get_client()
 
         # Build request-body.
-        
+
         body = {}
 
         if mime_type is None:
             mime_type = normalized_entry.mime_type
 
-        body['mimeType'] = mime_type 
+        body["mimeType"] = mime_type
 
         if filename is not None:
-            body['title'] = filename
-        
+            body["title"] = filename
+
         if parents is not None:
-            body['parents'] = parents
+            body["parents"] = parents
 
         if is_hidden is not None:
-            body['labels'] = { "hidden": is_hidden }
+            body["labels"] = {"hidden": is_hidden}
 
         if description is not None:
-            body['description'] = description
+            body["description"] = description
 
         set_mtime = True
         if modified_datetime is not None:
-            body['modifiedDate'] = modified_datetime
+            body["modifiedDate"] = modified_datetime
         else:
-            body['modifiedDate'] = \
-                gdrivefs.time_support.get_flat_normal_fs_time_from_dt()
+            body[
+                "modifiedDate"
+            ] = gdrivefs.time_support.get_flat_normal_fs_time_from_dt()
 
         if accessed_datetime is not None:
             set_atime = True
-            body['lastViewedByMeDate'] = accessed_datetime
+            body["lastViewedByMeDate"] = accessed_datetime
         else:
             set_atime = False
 
         # Build request-arguments.
 
-        args = { 
-            'fileId': normalized_entry.id, 
-            'body': body, 
-            'setModifiedDate': set_mtime, 
-            'updateViewedDate': set_atime,
+        args = {
+            "fileId": normalized_entry.id,
+            "body": body,
+            "setModifiedDate": set_mtime,
+            "updateViewedDate": set_atime,
         }
 
         if data_filepath is not None:
-            _logger.debug("We'll be sending a file in the update: [%s] [%s]", 
-                          normalized_entry.id, data_filepath)
+            _logger.debug(
+                "We'll be sending a file in the update: [%s] [%s]",
+                normalized_entry.id,
+                data_filepath,
+            )
 
             # We can only upload large files using resumable-uploads.
-            args.update({
-                'media_body': 
-                    apiclient.http.MediaFileUpload(
-                        data_filepath, 
-                        mimetype=mime_type, 
+            args.update(
+                {
+                    "media_body": apiclient.http.MediaFileUpload(
+                        data_filepath,
+                        mimetype=mime_type,
                         resumable=True,
-                        chunksize=_DEFAULT_UPLOAD_CHUNK_SIZE_B),
-# TODO(dustin): Documented, but does not exist.
-#                'uploadType': 'resumable',
-            })
+                        chunksize=_DEFAULT_UPLOAD_CHUNK_SIZE_B,
+                    ),
+                    # TODO(dustin): Documented, but does not exist.
+                    #                'uploadType': 'resumable',
+                }
+            )
 
         _logger.debug("Sending entry update: [%s]", normalized_entry.id)
 
         request = client.files().update(**args)
 
         result = self.__finish_upload(
-                    normalized_entry.title,
-                    request,
-                    data_filepath is not None)
+            normalized_entry.title, request, data_filepath is not None
+        )
 
-        normalized_entry = \
-            gdrivefs.normal_entry.NormalEntry('update_entry', result)
+        normalized_entry = gdrivefs.normal_entry.NormalEntry("update_entry", result)
 
         _logger.debug("Entry updated: [%s]", normalized_entry)
 
         return normalized_entry
 
     def __finish_upload(self, filename, request, has_file):
-        """Finish a resumable-upload is a file was given, or just execute the 
+        """Finish a resumable-upload is a file was given, or just execute the
         request if not.
         """
 
         if has_file is False:
             return request.execute()
 
-        _logger.debug("We need to finish updating the entry's data: [%s]", 
-                      filename)
+        _logger.debug("We need to finish updating the entry's data: [%s]", filename)
 
         result = None
         while result is None:

          
@@ 742,8 803,9 @@ class _GdriveManager:
                 if status.total_size == 0:
                     _logger.debug("Uploaded (zero-length): [%s]", filename)
                 else:
-                    _logger.debug("Uploaded [%s]: %.2f%%", 
-                                  filename, status.progress() * 100)
+                    _logger.debug(
+                        "Uploaded [%s]: %.2f%%", filename, status.progress() * 100
+                    )
 
         return result
 

          
@@ 753,11 815,16 @@ class _GdriveManager:
         result = gdrivefs.fsutility.split_path_nolookups(new_filename)
         (path, filename_stripped, mime_type, is_hidden) = result
 
-        _logger.debug("Renaming entry [%s] to [%s]. IS_HIDDEN=[%s]",
-                      normalized_entry, filename_stripped, is_hidden)
+        _logger.debug(
+            "Renaming entry [%s] to [%s]. IS_HIDDEN=[%s]",
+            normalized_entry,
+            filename_stripped,
+            is_hidden,
+        )
 
-        return self.update_entry(normalized_entry, filename=filename_stripped, 
-                                 is_hidden=is_hidden)
+        return self.update_entry(
+            normalized_entry, filename=filename_stripped, is_hidden=is_hidden
+        )
 
     @_marshall
     def remove_entry(self, normalized_entry):

          
@@ 766,24 833,30 @@ class _GdriveManager:
 
         client = self.__auth.get_client()
 
-        args = { 'fileId': normalized_entry.id }
+        args = {"fileId": normalized_entry.id}
 
         try:
             result = client.files().delete(**args).execute()
         except Exception as e:
-            if e.__class__.__name__ == 'HttpError' and \
-               str(e).find('File not found') != -1:
+            if (
+                e.__class__.__name__ == "HttpError"
+                and str(e).find("File not found") != -1
+            ):
                 raise NameError(normalized_entry.id)
 
-            _logger.exception("Could not send delete for entry with ID [%s].",
-                              normalized_entry.id)
+            _logger.exception(
+                "Could not send delete for entry with ID [%s].", normalized_entry.id
+            )
             raise
 
         _logger.info("Entry deleted successfully.")
 
+
 _THREAD_STORAGE = None
+
+
 def get_gdrive():
-    """Return an instance of _GdriveManager unique to each thread (we can't 
+    """Return an instance of _GdriveManager unique to each thread (we can't
     reuse sockets between threads).
     """
 

          
M gdrivefs/errors.py +8 -1
@@ 4,34 4,41 @@ class GdFsError(Exception):
 
 class AuthorizationError(GdFsError):
     """All authorization-related errors inherit from this."""
+
     pass
 
 
 class AuthorizationFailureError(AuthorizationError):
     """There was a general authorization failure."""
+
     pass
-        
+
 
 class AuthorizationFaultError(AuthorizationError):
     """Our authorization is not available or has expired."""
+
     pass
 
 
 class MustIgnoreFileError(GdFsError):
     """An error requiring us to ignore the file."""
+
     pass
 
 
 class FilenameQuantityError(MustIgnoreFileError):
     """Too many filenames share the same name in a single directory."""
+
     pass
 
 
 class ExportFormatError(GdFsError):
     """A format was not available for export."""
+
     pass
 
 
 class GdNotFoundError(GdFsError):
     """A file/path was not found."""
+
     pass

          
M gdrivefs/fsutility.py +56 -43
@@ 7,41 7,42 @@ import re
 
 _logger = logging.getLogger(__name__)
 
-def dec_hint(argument_names=[], excluded=[], prefix='', otherdata_cb=None):
-    """A decorator for the calling of functions to be emphasized in the 
+
+def dec_hint(argument_names=[], excluded=[], prefix="", otherdata_cb=None):
+    """A decorator for the calling of functions to be emphasized in the
     logging. Displays prefix and suffix information in the logs.
     """
 
     # We use a serial-number so that we can eyeball corresponding pairs of
     # beginning and ending statements in the logs.
-    sn = getattr(dec_hint, 'sn', 0) + 1
+    sn = getattr(dec_hint, "sn", 0) + 1
     dec_hint.sn = sn
 
-    prefix = ("%s: " % (prefix)) if prefix else ''
+    prefix = ("%s: " % (prefix)) if prefix else ""
 
     def real_decorator(f):
         def wrapper(*args, **kwargs):
-        
+
             try:
                 pid = fuse_get_context()[2]
             except:
                 # Just in case.
                 pid = 0
-        
+
             if not prefix:
-                _logger.debug("-----------------------------------------------"
-                              "---")
+                _logger.debug("-----------------------------------------------" "---")
 
-            _logger.debug("%s>>>>>>>>>> %s(%d) >>>>>>>>>> (%d)",
-                          prefix, f.__name__, sn, pid)
-        
+            _logger.debug(
+                "%s>>>>>>>>>> %s(%d) >>>>>>>>>> (%d)", prefix, f.__name__, sn, pid
+            )
+
             if args or kwargs:
                 condensed = {}
                 for i in range(len(args)):
                     # Skip the 'self' argument.
                     if i == 0:
                         continue
-                
+
                     if i - 1 >= len(argument_names):
                         break
 

          
@@ 50,67 51,78 @@ def dec_hint(argument_names=[], excluded
                 for k, v in list(kwargs.items()):
                     condensed[k] = v
 
-                values_nice = [("%s= [%s]" % (k, v)) for k, v \
-                                                     in list(condensed.items()) \
-                                                     if k not in excluded]
-                
+                values_nice = [
+                    ("%s= [%s]" % (k, v))
+                    for k, v in list(condensed.items())
+                    if k not in excluded
+                ]
+
                 if otherdata_cb:
                     data = otherdata_cb(*args, **kwargs)
                     for k, v in list(data.items()):
                         values_nice[k] = v
-                
+
                 if values_nice:
-                    values_string = '  '.join(values_nice)
+                    values_string = "  ".join(values_nice)
                     _logger.debug("DATA: %s", values_string)
 
-            suffix = ''
+            suffix = ""
 
             try:
                 result = f(*args, **kwargs)
             except FuseOSError as e:
                 if e.errno not in (errno.ENOENT,):
-                    _logger.error("FUSE error [%s] (%s) will be forwarded "
-                                  "back to GDFS from [%s]: %s", 
-                                  e.__class__.__name__, e.errno, f.__name__, 
-                                  str(e))
+                    _logger.error(
+                        "FUSE error [%s] (%s) will be forwarded "
+                        "back to GDFS from [%s]: %s",
+                        e.__class__.__name__,
+                        e.errno,
+                        f.__name__,
+                        str(e),
+                    )
                 raise
             except Exception as e:
                 _logger.exception("There was an exception in [%s]", f.__name__)
-                suffix = (' (E(%s): "%s")' % (e.__class__.__name__, str(e)))
+                suffix = ' (E(%s): "%s")' % (e.__class__.__name__, str(e))
                 raise
             finally:
-                _logger.debug("%s<<<<<<<<<< %s(%d) (%d)%s", 
-                              prefix, f.__name__, sn, pid, suffix)
-            
+                _logger.debug(
+                    "%s<<<<<<<<<< %s(%d) (%d)%s", prefix, f.__name__, sn, pid, suffix
+                )
+
             return result
+
         return wrapper
+
     return real_decorator
 
+
 def strip_export_type(path):
 
     matched = re.search(
-                r'#([a-zA-Z0-9\-]+\\+[a-zA-Z0-9\-]+)?$'.encode('utf-8'),
-                path.encode('utf-8'))
+        r"#([a-zA-Z0-9\-]+\\+[a-zA-Z0-9\-]+)?$".encode("utf-8"), path.encode("utf-8")
+    )
 
     mime_type = None
 
     if matched:
         fragment = matched.group(0)
         mime_type = matched.group(1)
-        
+
         if mime_type is not None:
-            mime_type = mime_type.replace('+', '/')
+            mime_type = mime_type.replace("+", "/")
 
-        path = path[:-len(fragment)]
+        path = path[: -len(fragment)]
 
     return (path, mime_type)
 
+
 def split_path(filepath_original, pathresolver_cb):
     """Completely process and distill the requested file-path. The filename can"
-    be padded to adjust what's being requested. This will remove all such 
+    be padded to adjust what's being requested. This will remove all such
     information, and return the actual file-path along with the extra meta-
     information. pathresolver_cb should expect a single parameter of a path,
-    and return a NormalEntry object. This can be used for both directories and 
+    and return a NormalEntry object. This can be used for both directories and
     files.
     """
 

          
@@ 124,10 136,9 @@ def split_path(filepath_original, pathre
 
     try:
         path_resolution = pathresolver_cb(path)
-# TODO(dustin): We need to specify the exception for when a file doesn't exist.
+    # TODO(dustin): We need to specify the exception for when a file doesn't exist.
     except:
-        _logger.exception("Exception while getting entry from path [%s].", 
-                          path)
+        _logger.exception("Exception while getting entry from path [%s].", path)
 
         raise GdNotFoundError()
 

          
@@ 136,12 147,13 @@ def split_path(filepath_original, pathre
 
     (parent_entry, parent_clause) = path_resolution
 
-    is_hidden = (filename[0] == '.') if filename else False
+    is_hidden = (filename[0] == ".") if filename else False
 
     return (parent_clause, path, filename, mime_type, is_hidden)
 
+
 def split_path_nolookups(filepath_original):
-    """This allows us to get the is-hidden flag, mimetype info, path, and 
+    """This allows us to get the is-hidden flag, mimetype info, path, and
     filename, without doing the [time consuming] lookup if unnecessary.
     """
 

          
@@ 152,15 164,16 @@ def split_path_nolookups(filepath_origin
     (path, filename) = split(filepath)
 
     # We don't remove the period, if we will mark it as hidden, as appropriate.
-    is_hidden = (filename[0] == '.') if filename else False
+    is_hidden = (filename[0] == ".") if filename else False
 
     return (path, filename, mime_type, is_hidden)
 
+
 def build_filepath(path, filename):
-    separator = '/' if path != '/' else ''
+    separator = "/" if path != "/" else ""
 
-    return ('%s%s%s' % (path, separator, filename))
+    return "%s%s%s" % (path, separator, filename)
+
 
 def escape_filename_for_query(filename):
     return filename.replace("\\", "\\\\").replace("'", "\\'")
-

          
M gdrivefs/gdfuse.py +241 -212
@@ 26,28 26,30 @@ import stat
 
 # TODO: make sure strip_extension and split_path are used when each are relevant
 # TODO: make sure create path reserves a file-handle, uploads the data, and then registers the open-file with the file-handle.
-# TODO: Make sure that we rely purely on the FH, whenever it is given, 
+# TODO: Make sure that we rely purely on the FH, whenever it is given,
 #       whereever it appears. This will be to accomodate system calls that can work either via file-path or file-handle.
 
+
 def set_datetime_tz(datetime_obj, tz):
     return datetime_obj.replace(tzinfo=tz)
 
+
 def get_entry_or_raise(raw_path, allow_normal_for_missing=False):
     try:
         result = split_path(raw_path, path_resolver)
         (parent_clause, path, filename, mime_type, is_hidden) = result
     except GdNotFoundError:
-        _logger.exception("Could not retrieve clause for non-existent "
-                          "file-path [%s] (parent does not exist)." % 
-                          (raw_path))
+        _logger.exception(
+            "Could not retrieve clause for non-existent "
+            "file-path [%s] (parent does not exist)." % (raw_path)
+        )
 
         if allow_normal_for_missing is True:
             raise
         else:
             raise FuseOSError(ENOENT)
     except:
-        _logger.exception("Could not process file-path [%s]." % 
-                          (raw_path))
+        _logger.exception("Could not process file-path [%s]." % (raw_path))
         raise FuseOSError(EIO)
 
     filepath = build_filepath(path, filename)

          
@@ 56,17 58,17 @@ def get_entry_or_raise(raw_path, allow_n
     try:
         entry_clause = path_relations.get_clause_from_path(filepath)
     except GdNotFoundError:
-        _logger.exception("Could not retrieve clause for non-existent "
-                          "file-path [%s] (parent exists)." % 
-                          (filepath))
+        _logger.exception(
+            "Could not retrieve clause for non-existent "
+            "file-path [%s] (parent exists)." % (filepath)
+        )
 
         if allow_normal_for_missing is True:
             raise
         else:
             raise FuseOSError(ENOENT)
     except:
-        _logger.exception("Could not retrieve clause for path [%s]. " %
-                          (filepath))
+        _logger.exception("Could not retrieve clause for path [%s]. " % (filepath))
         raise FuseOSError(EIO)
 
     if not entry_clause:

          
@@ 102,62 104,64 @@ class _GdfsMixin:
         block_size_b = gdrivefs.config.fs.CALCULATION_BLOCK_SIZE
 
         if entry.is_directory:
-            effective_permission = int(Conf.get('default_perm_folder'), 
-                                       8)
+            effective_permission = int(Conf.get("default_perm_folder"), 8)
         elif entry.editable:
-            effective_permission = int(Conf.get('default_perm_file_editable'), 
-                                       8)
+            effective_permission = int(Conf.get("default_perm_file_editable"), 8)
         else:
-            effective_permission = int(Conf.get(
-                                            'default_perm_file_noneditable'), 
-                                       8)
+            effective_permission = int(Conf.get("default_perm_file_noneditable"), 8)
 
-        stat_result = { "st_mtime": entry.modified_date_epoch, # modified time.
-                        "st_ctime": entry.modified_date_epoch, # changed time.
-                        "st_atime": time(),
-                        "st_uid":   uid,
-                        "st_gid":   gid }
-        
+        stat_result = {
+            "st_mtime": entry.modified_date_epoch,  # modified time.
+            "st_ctime": entry.modified_date_epoch,  # changed time.
+            "st_atime": time(),
+            "st_uid": uid,
+            "st_gid": gid,
+        }
+
         if entry.is_directory:
-            # Per http://sourceforge.net/apps/mediawiki/fuse/index.php?title=SimpleFilesystemHowto, 
+            # Per http://sourceforge.net/apps/mediawiki/fuse/index.php?title=SimpleFilesystemHowto,
             # default size should be 4K.
-# TODO(dustin): Should we just make this (0), since that's what it is?
+            # TODO(dustin): Should we just make this (0), since that's what it is?
             stat_result["st_size"] = 1024 * 4
-            stat_result["st_mode"] = (stat.S_IFDIR | effective_permission)
+            stat_result["st_mode"] = stat.S_IFDIR | effective_permission
             stat_result["st_nlink"] = 2
         else:
-            stat_result["st_size"] = DisplacedFile.file_size \
-                                        if entry.requires_mimetype \
-                                        else entry.file_size
+            stat_result["st_size"] = (
+                DisplacedFile.file_size if entry.requires_mimetype else entry.file_size
+            )
 
-            stat_result["st_mode"] = (stat.S_IFREG | effective_permission)
+            stat_result["st_mode"] = stat.S_IFREG | effective_permission
             stat_result["st_nlink"] = 1
 
-        stat_result["st_blocks"] = \
-            int(math.ceil(float(stat_result["st_size"]) / block_size_b))
-  
+        stat_result["st_blocks"] = int(
+            math.ceil(float(stat_result["st_size"]) / block_size_b)
+        )
+
         return stat_result
 
-    @dec_hint(['raw_path', 'fh'])
+    @dec_hint(["raw_path", "fh"])
     def getattr(self, raw_path, fh=None):
         """Return a stat() structure."""
-# TODO: Implement handle.
+        # TODO: Implement handle.
 
         (entry, path, filename) = get_entry_or_raise(raw_path)
         return self.__build_stat_from_entry(entry)
 
-    @dec_hint(['path', 'offset'])
+    @dec_hint(["path", "offset"])
     def readdir(self, path, offset):
         """A generator returning one base filename at a time."""
 
         # We expect "offset" to always be (0).
         if offset != 0:
-            _logger.warning("readdir() has been invoked for path [%s] and "
-                            "non-zero offset (%d). This is not allowed.",
-                            path, offset)
+            _logger.warning(
+                "readdir() has been invoked for path [%s] and "
+                "non-zero offset (%d). This is not allowed.",
+                path,
+                offset,
+            )
 
-# TODO: Once we start working on the cache, make sure we don't make this call, 
-#       constantly.
+        # TODO: Once we start working on the cache, make sure we don't make this call,
+        #       constantly.
 
         path_relations = PathRelations.get_instance()
 

          
@@ 167,37 171,38 @@ class _GdfsMixin:
             _logger.exception("Could not process [%s] (readdir).")
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not get clause from path [%s] "
-                              "(readdir)." % (path))
+            _logger.exception(
+                "Could not get clause from path [%s] " "(readdir)." % (path)
+            )
             raise FuseOSError(EIO)
 
         if not entry_clause:
             raise FuseOSError(ENOENT)
 
         try:
-            entry_tuples = path_relations.get_children_entries_from_entry_id \
-                            (entry_clause[CLAUSE_ID])
+            entry_tuples = path_relations.get_children_entries_from_entry_id(
+                entry_clause[CLAUSE_ID]
+            )
         except:
-            _logger.exception("Could not render list of filenames under path "
-                              "[%s].", path)
+            _logger.exception(
+                "Could not render list of filenames under path " "[%s].", path
+            )
 
             raise FuseOSError(EIO)
 
-        yield utility.translate_filename_charset('.')
-        yield utility.translate_filename_charset('..')
+        yield utility.translate_filename_charset(".")
+        yield utility.translate_filename_charset("..")
 
         for (filename, entry) in entry_tuples:
 
-            # Decorate any file that -requires- a mime-type (all files can 
+            # Decorate any file that -requires- a mime-type (all files can
             # merely accept a mime-type)
             if entry.requires_mimetype:
-                filename += utility.translate_filename_charset('#')
-        
-            yield (filename,
-                   self.__build_stat_from_entry(entry),
-                   0)
+                filename += utility.translate_filename_charset("#")
 
-    @dec_hint(['raw_path', 'length', 'offset', 'fh'])
+            yield (filename, self.__build_stat_from_entry(entry), 0)
+
+    @dec_hint(["raw_path", "length", "offset", "fh"])
     def read(self, raw_path, length, offset, fh):
 
         om = gdrivefs.opened_file.get_om()

          
@@ 205,8 210,9 @@ class _GdfsMixin:
         try:
             opened_file = om.get_by_fh(fh)
         except:
-            _logger.exception("Could not retrieve OpenedFile for handle with"
-                              "ID (%d) (read).", fh)
+            _logger.exception(
+                "Could not retrieve OpenedFile for handle with" "ID (%d) (read).", fh
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 216,11 222,11 @@ class _GdfsMixin:
             _logger.exception("Could not read data.")
             raise FuseOSError(EIO)
 
-    @dec_hint(['filepath', 'mode'])
+    @dec_hint(["filepath", "mode"])
     def mkdir(self, filepath, mode):
         """Create the given directory."""
 
-# TODO: Implement the "mode".
+        # TODO: Implement the "mode".
 
         try:
             result = split_path(filepath, path_resolver)

          
@@ 236,20 242,23 @@ class _GdfsMixin:
         gd = get_gdrive()
 
         try:
-            entry = gd.create_directory(
-                        filename, 
-                        [parent_id], 
-                        is_hidden=is_hidden)
+            entry = gd.create_directory(filename, [parent_id], is_hidden=is_hidden)
         except:
-            _logger.exception("Could not create directory with name [%s] "
-                              "and parent with ID [%s].",
-                              filename, parent_clause[0].id)
+            _logger.exception(
+                "Could not create directory with name [%s] " "and parent with ID [%s].",
+                filename,
+                parent_clause[0].id,
+            )
             raise FuseOSError(EIO)
 
-        _logger.info("Directory [%s] created as ID [%s] under parent with "
-                     "ID [%s].", filepath, entry.id, parent_id)
+        _logger.info(
+            "Directory [%s] created as ID [%s] under parent with " "ID [%s].",
+            filepath,
+            entry.id,
+            parent_id,
+        )
 
-        #parent_clause[4] = False
+        # parent_clause[4] = False
 
         path_relations = PathRelations.get_instance()
 

          
@@ 259,15 268,15 @@ class _GdfsMixin:
             _logger.exception("Could not register new directory in cache.")
             raise FuseOSError(EIO)
 
-# TODO: Find a way to implement or enforce 'mode'.
+    # TODO: Find a way to implement or enforce 'mode'.
     def __create(self, filepath, mode=None):
         """Create a new file.
-                
-        We don't implement "mode" (permissions) because the model doesn't agree 
+
+        We don't implement "mode" (permissions) because the model doesn't agree
         with GD.
         """
 
-# TODO: Fail if it already exists.
+        # TODO: Fail if it already exists.
 
         try:
             result = split_path(filepath, path_resolver)

          
@@ 276,13 285,12 @@ class _GdfsMixin:
             _logger.exception("Could not process [%s] (i-create).", filepath)
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not split path [%s] (i-create).",
-                              filepath)
+            _logger.exception("Could not split path [%s] (i-create).", filepath)
             raise FuseOSError(EIO)
 
         if mime_type is None:
             _, ext = os.path.splitext(filename)
-            if ext != '':
+            if ext != "":
                 ext = ext[1:]
 
             mime_type = utility.get_first_mime_type_by_extension(ext)

          
@@ 293,14 301,14 @@ class _GdfsMixin:
 
         try:
             entry = gd.create_file(
-                        filename, 
-                        [parent_clause[3]], 
-                        mime_type,
-                        is_hidden=is_hidden)
+                filename, [parent_clause[3]], mime_type, is_hidden=is_hidden
+            )
         except:
-            _logger.exception("Could not create empty file [%s] under "
-                              "parent with ID [%s].",
-                              filename, parent_clause[3])
+            _logger.exception(
+                "Could not create empty file [%s] under " "parent with ID [%s].",
+                filename,
+                parent_clause[3],
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 316,7 324,7 @@ class _GdfsMixin:
 
         return (entry, path, filename, mime_type)
 
-    @dec_hint(['filepath', 'mode'])
+    @dec_hint(["filepath", "mode"])
     def create(self, raw_filepath, mode):
         """Create a new file. This always precedes a write."""
 

          
@@ 325,8 333,9 @@ class _GdfsMixin:
         try:
             fh = om.get_new_handle()
         except:
-            _logger.exception("Could not acquire file-handle for create of "
-                              "[%s].", raw_filepath)
+            _logger.exception(
+                "Could not acquire file-handle for create of " "[%s].", raw_filepath
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 334,27 343,29 @@ class _GdfsMixin:
 
         try:
             opened_file = gdrivefs.opened_file.OpenedFile(
-                            entry.id, 
-                            path, 
-                            filename, 
-                            not entry.is_visible, 
-                            mime_type)
+                entry.id, path, filename, not entry.is_visible, mime_type
+            )
         except:
-            _logger.exception("Could not create OpenedFile object for "
-                              "created file.")
+            _logger.exception("Could not create OpenedFile object for " "created file.")
 
             raise FuseOSError(EIO)
 
-        _logger.debug("Registering OpenedFile object with handle (%d), "
-                      "path [%s], and ID [%s].", fh, raw_filepath, entry.id)
+        _logger.debug(
+            "Registering OpenedFile object with handle (%d), "
+            "path [%s], and ID [%s].",
+            fh,
+            raw_filepath,
+            entry.id,
+        )
 
         om = gdrivefs.opened_file.get_om()
 
         try:
             om.add(opened_file, fh=fh)
         except:
-            _logger.exception("Could not register OpenedFile for created "
-                              "file: [%s]", opened_file)
+            _logger.exception(
+                "Could not register OpenedFile for created " "file: [%s]", opened_file
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 362,20 373,22 @@ class _GdfsMixin:
 
         return fh
 
-    @dec_hint(['filepath', 'flags'])
+    @dec_hint(["filepath", "flags"])
     def open(self, filepath, flags):
-# TODO: Fail if does not exist and the mode/flags is read only.
+        # TODO: Fail if does not exist and the mode/flags is read only.
 
         try:
-            opened_file = gdrivefs.opened_file.\
-                            create_for_existing_filepath(filepath)
+            opened_file = gdrivefs.opened_file.create_for_existing_filepath(filepath)
         except GdNotFoundError:
-            _logger.exception("Could not create handle for requested [%s] "
-                              "(open)." % (filepath))
+            _logger.exception(
+                "Could not create handle for requested [%s] " "(open)." % (filepath)
+            )
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not create OpenedFile object for "
-                                 "opened filepath [%s].", filepath)
+            _logger.exception(
+                "Could not create OpenedFile object for " "opened filepath [%s].",
+                filepath,
+            )
             raise FuseOSError(EIO)
 
         om = gdrivefs.opened_file.get_om()

          
@@ 383,8 396,7 @@ class _GdfsMixin:
         try:
             fh = om.add(opened_file)
         except:
-            _logger.exception("Could not register OpenedFile for opened "
-                              "file.")
+            _logger.exception("Could not register OpenedFile for opened " "file.")
 
             raise FuseOSError(EIO)
 

          
@@ 392,7 404,7 @@ class _GdfsMixin:
 
         return fh
 
-    @dec_hint(['filepath', 'fh'])
+    @dec_hint(["filepath", "fh"])
     def release(self, filepath, fh):
         """Close a file."""
 

          
@@ 401,12 413,13 @@ class _GdfsMixin:
         try:
             om.remove_by_fh(fh)
         except:
-            _logger.exception("Could not remove OpenedFile for handle with "
-                              "ID (%d) (release).", fh)
+            _logger.exception(
+                "Could not remove OpenedFile for handle with " "ID (%d) (release).", fh
+            )
 
             raise FuseOSError(EIO)
 
-    @dec_hint(['filepath', 'data', 'offset', 'fh'], ['data'])
+    @dec_hint(["filepath", "data", "offset", "fh"], ["data"])
     def write(self, filepath, data, offset, fh):
         om = gdrivefs.opened_file.get_om()
 

          
@@ 424,9 437,9 @@ class _GdfsMixin:
 
         return len(data)
 
-    @dec_hint(['filepath', 'fh'])
+    @dec_hint(["filepath", "fh"])
     def flush(self, filepath, fh):
-        
+
         om = gdrivefs.opened_file.get_om()
 
         try:

          
@@ 441,7 454,7 @@ class _GdfsMixin:
             _logger.exception("Could not flush local updates.")
             raise FuseOSError(EIO)
 
-    @dec_hint(['filepath'])
+    @dec_hint(["filepath"])
     def rmdir(self, filepath):
         """Remove a directory."""
 

          
@@ 453,8 466,9 @@ class _GdfsMixin:
             _logger.exception("Could not process [%s] (rmdir).", filepath)
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not get clause from file-path [%s] "
-                              "(rmdir).", filepath)
+            _logger.exception(
+                "Could not get clause from file-path [%s] " "(rmdir).", filepath
+            )
             raise FuseOSError(EIO)
 
         if not entry_clause:

          
@@ 467,8 481,9 @@ class _GdfsMixin:
         # Check if not a directory.
 
         if not normalized_entry.is_directory:
-            _logger.error("Can not rmdir() non-directory [%s] with ID [%s].", 
-                          filepath, entry_id)
+            _logger.error(
+                "Can not rmdir() non-directory [%s] with ID [%s].", filepath, entry_id
+            )
 
             raise FuseOSError(ENOTDIR)
 

          
@@ 477,12 492,12 @@ class _GdfsMixin:
         gd = get_gdrive()
 
         try:
-            found = gd.get_children_under_parent_id(
-                        entry_id,
-                        max_results=1)
+            found = gd.get_children_under_parent_id(entry_id, max_results=1)
         except:
-            _logger.exception("Could not determine if directory to be removed "
-                              "has children.", entry_id)
+            _logger.exception(
+                "Could not determine if directory to be removed " "has children.",
+                entry_id,
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 494,39 509,41 @@ class _GdfsMixin:
         except (NameError):
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not remove directory [%s] with ID [%s].",
-                              filepath, entry_id)
+            _logger.exception(
+                "Could not remove directory [%s] with ID [%s].", filepath, entry_id
+            )
 
             raise FuseOSError(EIO)
-# TODO: Remove from cache.
+
+    # TODO: Remove from cache.
 
     # Not supported. Google Drive doesn't fit within this model.
-    @dec_hint(['filepath', 'mode'])
+    @dec_hint(["filepath", "mode"])
     def chmod(self, filepath, mode):
         # Return successfully, or rsync might have a problem.
-#        raise FuseOSError(EPERM) # Operation not permitted.
+        #        raise FuseOSError(EPERM) # Operation not permitted.
         pass
 
     # Not supported. Google Drive doesn't fit within this model.
-    @dec_hint(['filepath', 'uid', 'gid'])
+    @dec_hint(["filepath", "uid", "gid"])
     def chown(self, filepath, uid, gid):
         # Return successfully, or rsync might have a problem.
-#        raise FuseOSError(EPERM) # Operation not permitted.
+        #        raise FuseOSError(EPERM) # Operation not permitted.
         pass
 
     # Not supported.
-    @dec_hint(['target', 'source'])
+    @dec_hint(["target", "source"])
     def symlink(self, target, source):
 
         raise FuseOSError(EPERM)
 
     # Not supported.
-    @dec_hint(['filepath'])
+    @dec_hint(["filepath"])
     def readlink(self, filepath):
 
         raise FuseOSError(EPERM)
 
-    @dec_hint(['filepath'])
+    @dec_hint(["filepath"])
     def statfs(self, filepath):
         """Return filesystem status info (for df).
 

          
@@ 549,31 566,24 @@ class _GdfsMixin:
 
         return {
             # Optimal transfer block size.
-            'f_bsize': block_size_b,
-
+            "f_bsize": block_size_b,
             # Total data blocks in file system.
-            'f_blocks': total,
-
+            "f_blocks": total,
             # Fragment size.
-            'f_frsize': block_size_b,
-
+            "f_frsize": block_size_b,
             # Free blocks in filesystem.
-            'f_bfree': free,
-
+            "f_bfree": free,
             # Free blocks avail to non-superuser.
-            'f_bavail': free
-
+            "f_bavail": free
             # Total file nodes in filesystem.
-#            'f_files': 0,
-
+            #            'f_files': 0,
             # Free file nodes in filesystem.
-#            'f_ffree': 0,
-
+            #            'f_ffree': 0,
             # Free inodes for unprivileged users.
-#            'f_favail': 0
+            #            'f_favail': 0
         }
 
-    @dec_hint(['filepath_old', 'filepath_new'])
+    @dec_hint(["filepath_old", "filepath_new"])
     def rename(self, filepath_old, filepath_new):
         # Make sure the old filepath exists.
         (entry, path, filename_old) = get_entry_or_raise(filepath_old)

          
@@ 607,7 617,7 @@ class _GdfsMixin:
             _logger.exception("Could not register renamed entry: %s", entry)
             raise FuseOSError(EIO)
 
-    @dec_hint(['filepath', 'length', 'fh'])
+    @dec_hint(["filepath", "length", "fh"])
     def truncate(self, filepath, length, fh=None):
         if fh is not None:
             om = gdrivefs.opened_file.get_om()

          
@@ 615,8 625,11 @@ class _GdfsMixin:
             try:
                 opened_file = om.get_by_fh(fh)
             except:
-                _logger.exception("Could not retrieve OpenedFile for handle "
-                                  "with ID (%d) (truncate).", fh)
+                _logger.exception(
+                    "Could not retrieve OpenedFile for handle "
+                    "with ID (%d) (truncate).",
+                    fh,
+                )
 
                 raise FuseOSError(EIO)
 

          
@@ 640,17 653,17 @@ class _GdfsMixin:
             _logger.exception("Could not truncate entry [%s].", entry)
             raise FuseOSError(EIO)
 
-# TODO(dustin): It would be a lot quicker if we truncate our temporary file 
-#               here, and make sure its mtime matches.
+    # TODO(dustin): It would be a lot quicker if we truncate our temporary file
+    #               here, and make sure its mtime matches.
 
-        # We don't need to update our internal representation of the file (just 
-        # our file-handle and its related buffering).
+    # We don't need to update our internal representation of the file (just
+    # our file-handle and its related buffering).
 
-    @dec_hint(['file_path'])
+    @dec_hint(["file_path"])
     def unlink(self, file_path):
         """Remove a file."""
-# TODO: Change to simply move to "trash". Have a FUSE option to elect this
-# behavior.
+        # TODO: Change to simply move to "trash". Have a FUSE option to elect this
+        # behavior.
         path_relations = PathRelations.get_instance()
 
         try:

          
@@ 659,14 672,14 @@ class _GdfsMixin:
             _logger.exception("Could not process [%s] (unlink).", file_path)
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not get clause from file-path [%s] "
-                              "(unlink).", file_path)
+            _logger.exception(
+                "Could not get clause from file-path [%s] " "(unlink).", file_path
+            )
 
             raise FuseOSError(EIO)
 
         if not entry_clause:
-            _logger.error("Path [%s] does not exist for unlink().",
-                          file_path)
+            _logger.error("Path [%s] does not exist for unlink().", file_path)
 
             raise FuseOSError(ENOENT)
 

          
@@ 676,12 689,15 @@ class _GdfsMixin:
         # Check if a directory.
 
         if normalized_entry.is_directory:
-            _logger.error("Can not unlink() directory [%s] with ID [%s]. "
-                          "Must be file.", file_path, entry_id)
+            _logger.error(
+                "Can not unlink() directory [%s] with ID [%s]. " "Must be file.",
+                file_path,
+                entry_id,
+            )
 
             raise FuseOSError(EISDIR)
 
-        # Remove online. Complements local removal (if not found locally, a 
+        # Remove online. Complements local removal (if not found locally, a
         # follow-up request checks online).
 
         gd = get_gdrive()

          
@@ 691,8 707,9 @@ class _GdfsMixin:
         except NameError:
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not remove file [%s] with ID [%s].",
-                              file_path, entry_id)
+            _logger.exception(
+                "Could not remove file [%s] with ID [%s].", file_path, entry_id
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 706,12 723,15 @@ class _GdfsMixin:
         try:
             opened_file = om.remove_by_filepath(file_path)
         except:
-            _logger.exception("There was an error while removing all "
-                                 "opened-file instances for file [%s] "
-                                 "(remove).", file_path)
+            _logger.exception(
+                "There was an error while removing all "
+                "opened-file instances for file [%s] "
+                "(remove).",
+                file_path,
+            )
             raise FuseOSError(EIO)
 
-    @dec_hint(['raw_path', 'times'])
+    @dec_hint(["raw_path", "times"])
     def utimens(self, raw_path, times=None):
         """Set the file times."""
 

          
@@ 730,18 750,16 @@ class _GdfsMixin:
 
         try:
             entry = gd.update_entry(
-                        entry, 
-                        modified_datetime=mtime_phrase,
-                        accessed_datetime=atime_phrase)
+                entry, modified_datetime=mtime_phrase, accessed_datetime=atime_phrase
+            )
         except:
-            _logger.exception("Could not update entry [%s] for times.",
-                              entry)
+            _logger.exception("Could not update entry [%s] for times.", entry)
 
             raise FuseOSError(EIO)
 
         return 0
 
-    @dec_hint(['path'])
+    @dec_hint(["path"])
     def init(self, path):
         """Called on filesystem mount. Path is always /."""
 

          
@@ 751,7 769,7 @@ class _GdfsMixin:
         else:
             _logger.warning("We were told not to monitor changes.")
 
-    @dec_hint(['path'])
+    @dec_hint(["path"])
     def destroy(self, path):
         """Called on filesystem destruction. Path is always /."""
 

          
@@ 759,41 777,47 @@ class _GdfsMixin:
             _logger.info("Stopping change-monitor.")
             get_change_manager().mount_destroy()
 
-    @dec_hint(['path'])
+    @dec_hint(["path"])
     def listxattr(self, raw_path):
         (entry, path, filename) = get_entry_or_raise(raw_path)
 
         return list(entry.xattr_data.keys())
 
-    @dec_hint(['path', 'name', 'position'])
+    @dec_hint(["path", "name", "position"])
     def getxattr(self, raw_path, name, position=0):
         (entry, path, filename) = get_entry_or_raise(raw_path)
 
         try:
             return entry.xattr_data[name] + "\n"
         except:
-            return ''
+            return ""
+
 
 if gdrivefs.config.DO_LOG_FUSE_MESSAGES is True:
+
     class GDriveFS(_GdfsMixin, LoggingMixIn, Operations):
         pass
+
+
 else:
+
     class GDriveFS(_GdfsMixin, Operations):
         pass
 
-def mount(auth_storage_filepath, mountpoint, debug=None, nothreads=None, 
-          option_string=None):
+
+def mount(
+    auth_storage_filepath, mountpoint, debug=None, nothreads=None, option_string=None
+):
 
     if os.path.exists(auth_storage_filepath) is False:
-        raise ValueError("Credential path is not valid: [%s]" %
-                         (auth_storage_filepath,))
+        raise ValueError(
+            "Credential path is not valid: [%s]" % (auth_storage_filepath,)
+        )
 
     fuse_opts = {}
-    
+
     if option_string:
-        for opt_parts in [opt.split('=', 1) \
-                          for opt \
-                          in option_string.split(',') ]:
+        for opt_parts in [opt.split("=", 1) for opt in option_string.split(",")]:
             k = opt_parts[0]
 
             # We need to present a bool type for on/off flags. Since all we

          
@@ 803,14 827,14 @@ def mount(auth_storage_filepath, mountpo
                 v = opt_parts[1]
                 v_lower = v.lower()
 
-                if v_lower == 'true':
+                if v_lower == "true":
                     v = True
-                elif v_lower == 'false':
+                elif v_lower == "false":
                     v = False
             else:
                 v = True
 
-            # We have a list of provided options. See which match against our 
+            # We have a list of provided options. See which match against our
             # application options.
 
             _logger.debug("Setting option [%s] to [%s].", k, v)

          
@@ 818,10 842,11 @@ def mount(auth_storage_filepath, mountpo
             try:
                 Conf.set(k, v)
             except KeyError as e:
-                _logger.debug("Forwarding option [%s] with value [%s] to "
-                              "FUSE.", k, v)
+                _logger.debug(
+                    "Forwarding option [%s] with value [%s] to " "FUSE.", k, v
+                )
 
-                if k not in ('user', '_netdev'):
+                if k not in ("user", "_netdev"):
                     fuse_opts[k] = v
 
     if gdrivefs.config.IS_DEBUG is True:

          
@@ 829,12 854,14 @@ def mount(auth_storage_filepath, mountpo
         if nothreads:
             _logger.debug("FUSE is running in single thread mode.")
 
-    _logger.debug("PERMS: F=%s E=%s NE=%s",
-                  Conf.get('default_perm_folder'), 
-                  Conf.get('default_perm_file_editable'), 
-                  Conf.get('default_perm_file_noneditable'))
+    _logger.debug(
+        "PERMS: F=%s E=%s NE=%s",
+        Conf.get("default_perm_folder"),
+        Conf.get("default_perm_file_editable"),
+        Conf.get("default_perm_file_noneditable"),
+    )
 
-    # Assume that any option that wasn't an application option is a FUSE 
+    # Assume that any option that wasn't an application option is a FUSE
     # option. The Python-FUSE interface that we're using is beautiful/elegant,
     # but there's no help support. The user is just going to have to know the
     # options.

          
@@ 842,21 869,23 @@ def mount(auth_storage_filepath, mountpo
     set_auth_cache_filepath(auth_storage_filepath)
 
     # How we'll appear in diskfree, mtab, etc..
-    name = ("gdfs(%s)" % (auth_storage_filepath,))
+    name = "gdfs(%s)" % (auth_storage_filepath,)
 
     # Make sure we can connect.
     gdrivefs.account_info.AccountInfo().get_data()
 
     fuse = FUSE(
-            GDriveFS(), 
-            mountpoint, 
-            debug=debug, 
-            foreground=debug, 
-            nothreads=nothreads, 
-            fsname=name, 
-            **fuse_opts)
+        GDriveFS(),
+        mountpoint,
+        debug=debug,
+        foreground=debug,
+        nothreads=nothreads,
+        fsname=name,
+        **fuse_opts,
+    )
+
 
 def set_auth_cache_filepath(auth_storage_filepath):
     auth_storage_filepath = os.path.abspath(auth_storage_filepath)
 
-    Conf.set('auth_cache_filepath', auth_storage_filepath)
+    Conf.set("auth_cache_filepath", auth_storage_filepath)

          
M gdrivefs/livereader_base.py +3 -2
@@ 21,8 21,9 @@ class LiveReaderBase:
         return self.__data[key]
 
     def get_data(self):
-        raise NotImplementedError("get_data() method must be implemented in "
-                                  "the LiveReaderBase child.")
+        raise NotImplementedError(
+            "get_data() method must be implemented in " "the LiveReaderBase child."
+        )
 
     @classmethod
     def get_instance(cls):

          
M gdrivefs/normal_entry.py +105 -107
@@ 14,19 14,19 @@ import time
 
 
 class NormalEntry:
-    __directory_mimetype = Conf.get('directory_mimetype')
+    __directory_mimetype = Conf.get("directory_mimetype")
 
     __properties_extra = [
-        'is_directory',
-        'is_visible',
-        'parents',
-        'download_types',
-        'modified_date',
-        'modified_date_epoch',
-        'mtime_byme_date',
-        'mtime_byme_date_epoch',
-        'atime_byme_date',
-        'atime_byme_date_epoch',
+        "is_directory",
+        "is_visible",
+        "parents",
+        "download_types",
+        "modified_date",
+        "modified_date_epoch",
+        "mtime_byme_date",
+        "mtime_byme_date_epoch",
+        "atime_byme_date",
+        "atime_byme_date_epoch",
     ]
 
     def __init__(self, gd_resource_type, raw_data):

          
@@ 42,89 42,81 @@ class NormalEntry:
         # can get a file-size up-front, or we have to decide on a specific
         # mime-type in order to do so.
 
-        requires_mimetype = 'fileSize' not in self.__raw_data and \
-                            raw_data['mimeType'] != self.__directory_mimetype
-
-        self.__info['requires_mimetype'] = \
-            requires_mimetype
+        requires_mimetype = (
+            "fileSize" not in self.__raw_data
+            and raw_data["mimeType"] != self.__directory_mimetype
+        )
 
-        self.__info['title'] = \
-            raw_data['title']
-
-        self.__info['mime_type'] = \
-            raw_data['mimeType']
+        self.__info["requires_mimetype"] = requires_mimetype
 
-        self.__info['labels'] = \
-            raw_data['labels']
+        self.__info["title"] = raw_data["title"]
 
-        self.__info['id'] = \
-            raw_data['id']
+        self.__info["mime_type"] = raw_data["mimeType"]
 
-        self.__info['last_modifying_user_name'] = \
-            raw_data.get('lastModifyingUserName')
+        self.__info["labels"] = raw_data["labels"]
 
-        self.__info['writers_can_share'] = \
-            raw_data['writersCanShare']
+        self.__info["id"] = raw_data["id"]
+
+        self.__info["last_modifying_user_name"] = raw_data.get("lastModifyingUserName")
 
-        self.__info['owner_names'] = \
-            raw_data['ownerNames']
+        self.__info["writers_can_share"] = raw_data["writersCanShare"]
 
-        self.__info['editable'] = \
-            raw_data['editable']
+        self.__info["owner_names"] = raw_data["ownerNames"]
 
-        self.__info['user_permission'] = \
-            raw_data['userPermission']
+        self.__info["editable"] = raw_data["editable"]
 
-        self.__info['link'] = \
-            raw_data.get('embedLink')
+        self.__info["user_permission"] = raw_data["userPermission"]
+
+        self.__info["link"] = raw_data.get("embedLink")
 
-        self.__info['file_size'] = \
-            int(raw_data.get('fileSize', 0))
+        self.__info["file_size"] = int(raw_data.get("fileSize", 0))
 
-        self.__info['file_extension'] = \
-            raw_data.get('fileExtension')
+        self.__info["file_extension"] = raw_data.get("fileExtension")
 
-        self.__info['md5_checksum'] = \
-            raw_data.get('md5Checksum')
+        self.__info["md5_checksum"] = raw_data.get("md5Checksum")
 
-        self.__info['image_media_metadata'] = \
-            raw_data.get('imageMediaMetadata')
+        self.__info["image_media_metadata"] = raw_data.get("imageMediaMetadata")
 
-        self.__info['download_links'] = \
-            raw_data.get('exportLinks', {})
+        self.__info["download_links"] = raw_data.get("exportLinks", {})
 
         try:
-            self.__info['download_links'][self.__info['mime_type']] = \
-                raw_data['downloadUrl']
+            self.__info["download_links"][self.__info["mime_type"]] = raw_data[
+                "downloadUrl"
+            ]
         except KeyError:
             pass
 
         self.__update_display_name()
 
-        for parent in raw_data['parents']:
-            self.__parents.append(parent['id'])
+        for parent in raw_data["parents"]:
+            self.__parents.append(parent["id"])
 
     def __getattr__(self, key):
         return self.__info[key]
 
     def __str__(self):
-        return ("<NORMAL ID= [%s] MIME= [%s] NAME= [%s] URIS= (%d)>" %
-                (self.id, self.mime_type, self.title,
-                 len(self.download_links)))
+        return "<NORMAL ID= [%s] MIME= [%s] NAME= [%s] URIS= (%d)>" % (
+            self.id,
+            self.mime_type,
+            self.title,
+            len(self.download_links),
+        )
 
     def __repr__(self):
         return str(self)
 
     def __update_display_name(self):
         # This is encoded for displaying locally.
-        self.__info['title_fs'] = utility.translate_filename_charset(self.__info['title'])
+        self.__info["title_fs"] = utility.translate_filename_charset(
+            self.__info["title"]
+        )
 
     def temp_rename(self, new_filename):
         """Set the name to something else, here, while we, most likely, wait
         for the change at the server to propogate.
         """
 
-        self.__info['title'] = new_filename
+        self.__info["title"] = new_filename
         self.__update_display_name()
 
     def normalize_download_mimetype(self, specific_mimetype=None):

          
@@ 140,14 132,17 @@ class NormalEntry:
 
         if specific_mimetype is not None:
             if specific_mimetype not in self.__cache_mimetypes[0]:
-                _logger.debug("Normalizing mime-type [%s] for download.  "
-                              "Options: %s",
-                              specific_mimetype, self.download_types)
+                _logger.debug(
+                    "Normalizing mime-type [%s] for download.  " "Options: %s",
+                    specific_mimetype,
+                    self.download_types,
+                )
 
                 if specific_mimetype not in self.download_links:
-                    raise ExportFormatError("Mime-type [%s] is not available for "
-                                            "download. Options: %s" %
-                                            (self.download_types))
+                    raise ExportFormatError(
+                        "Mime-type [%s] is not available for "
+                        "download. Options: %s" % (self.download_types)
+                    )
 
                 self.__cache_mimetypes[0].append(specific_mimetype)
 

          
@@ 157,8 152,10 @@ class NormalEntry:
             # Try to derive a mimetype from the filename, and see if it matches
             # against available export types.
             (mimetype_candidate, _) = guess_type(self.title_fs, True)
-            if mimetype_candidate is not None and \
-               mimetype_candidate in self.download_links:
+            if (
+                mimetype_candidate is not None
+                and mimetype_candidate in self.download_links
+            ):
                 mime_type = mimetype_candidate
 
             # If there's only one download link, resort to using it (perhaps it was

          
@@ 167,9 164,10 @@ class NormalEntry:
                 mime_type = list(self.download_links.keys())[0]
 
             else:
-                raise ExportFormatError("A correct mime-type needs to be "
-                                        "specified. Options: %s" %
-                                        (self.download_types))
+                raise ExportFormatError(
+                    "A correct mime-type needs to be "
+                    "specified. Options: %s" % (self.download_types)
+                )
 
             self.__cache_mimetypes[1] = mime_type
 

          
@@ 177,17 175,17 @@ class NormalEntry:
 
     def __convert(self, data):
         if isinstance(data, dict):
-            list_ = [("K(%s)=V(%s)" % (self.__convert(key),
-                                  self.__convert(value))) \
-                     for key, value \
-                     in list(data.items())]
+            list_ = [
+                ("K(%s)=V(%s)" % (self.__convert(key), self.__convert(value)))
+                for key, value in list(data.items())
+            ]
 
-            final = '; '.join(list_)
+            final = "; ".join(list_)
             return final
         elif isinstance(data, list):
-            final = ', '.join([('LI(%s)' % (self.__convert(element))) \
-                               for element \
-                               in data])
+            final = ", ".join(
+                [("LI(%s)" % (self.__convert(element))) for element in data]
+            )
             return final
         elif isinstance(data, str):
             return utility.translate_filename_charset(data)

          
@@ 200,22 198,16 @@ class NormalEntry:
 
     def get_data(self):
         original = {
-            key.encode('utf8'): value
-            for key, value
-            in list(self.__raw_data.items())
+            key.encode("utf8"): value for key, value in list(self.__raw_data.items())
         }
 
         # distilled = self.__info
 
-        extra = {
-            key: getattr(self, key)
-            for key
-            in self.__properties_extra
-        }
+        extra = {key: getattr(self, key) for key in self.__properties_extra}
 
         data_dict = {
-            'original': original,
-            'extra': extra,
+            "original": original,
+            "extra": extra,
         }
 
         return data_dict

          
@@ 228,7 220,7 @@ class NormalEntry:
             attrs = {}
             for a_type, a_dict in list(data_dict.items()):
                 for key, value in list(a_dict.items()):
-                    fqkey = ('user.%s.%s' % (a_type, key))
+                    fqkey = "user.%s.%s" % (a_type, key)
                     attrs[fqkey] = self.__convert(value)
 
             self.__cache_data = attrs

          
@@ 238,14 230,15 @@ class NormalEntry:
     @property
     def is_directory(self):
         """Return True if we represent a directory."""
-        return (self.__info['mime_type'] == self.__directory_mimetype)
+        return self.__info["mime_type"] == self.__directory_mimetype
 
     @property
     def is_visible(self):
-        if [ flag
-             for flag, value
-             in list(self.labels.items())
-             if flag in Conf.get('hidden_flags_list_local') and value ]:
+        if [
+            flag
+            for flag, value in list(self.labels.items())
+            if flag in Conf.get("hidden_flags_list_local") and value
+        ]:
             return False
         else:
             return True

          
@@ 260,11 253,12 @@ class NormalEntry:
 
     @property
     def modified_date(self):
-        if 'modified_date' not in self.__cache_dict:
-            self.__cache_dict['modified_date'] = \
-                dateutil.parser.parse(self.__raw_data['modifiedDate'])
+        if "modified_date" not in self.__cache_dict:
+            self.__cache_dict["modified_date"] = dateutil.parser.parse(
+                self.__raw_data["modifiedDate"]
+            )
 
-        return self.__cache_dict['modified_date']
+        return self.__cache_dict["modified_date"]
 
     @property
     def modified_date_epoch(self):

          
@@ 274,11 268,12 @@ class NormalEntry:
 
     @property
     def mtime_byme_date(self):
-        if 'modified_byme_date' not in self.__cache_dict:
-            self.__cache_dict['modified_byme_date'] = \
-                dateutil.parser.parse(self.__raw_data['modifiedByMeDate'])
+        if "modified_byme_date" not in self.__cache_dict:
+            self.__cache_dict["modified_byme_date"] = dateutil.parser.parse(
+                self.__raw_data["modifiedByMeDate"]
+            )
 
-        return self.__cache_dict['modified_byme_date']
+        return self.__cache_dict["modified_byme_date"]
 
     @property
     def mtime_byme_date_epoch(self):

          
@@ 286,16 281,19 @@ class NormalEntry:
 
     @property
     def atime_byme_date(self):
-        if 'viewed_byme_date' not in self.__cache_dict:
-            self.__cache_dict['viewed_byme_date'] = \
-                dateutil.parser.parse(self.__raw_data['lastViewedByMeDate']) \
-                if 'lastViewedByMeDate' in self.__raw_data \
+        if "viewed_byme_date" not in self.__cache_dict:
+            self.__cache_dict["viewed_byme_date"] = (
+                dateutil.parser.parse(self.__raw_data["lastViewedByMeDate"])
+                if "lastViewedByMeDate" in self.__raw_data
                 else None
+            )
 
-        return self.__cache_dict['viewed_byme_date']
+        return self.__cache_dict["viewed_byme_date"]
 
     @property
     def atime_byme_date_epoch(self):
-        return mktime(self.atime_byme_date.timetuple()) - time.timezone \
-                if self.atime_byme_date \
-                else None
+        return (
+            mktime(self.atime_byme_date.timetuple()) - time.timezone
+            if self.atime_byme_date
+            else None
+        )

          
M gdrivefs/oauth_authorize.py +25 -28
@@ 18,33 18,27 @@ httplib2shim.patch()
 class OauthAuthorize:
     """Manages authorization process."""
 
-    def __init__(
-            self, redirect_uri=oauth2client.client.OOB_CALLBACK_URN):
-        creds_filepath  = gdrivefs.conf.Conf.get('auth_cache_filepath')
+    def __init__(self, redirect_uri=oauth2client.client.OOB_CALLBACK_URN):
+        creds_filepath = gdrivefs.conf.Conf.get("auth_cache_filepath")
 
-        assert \
-            creds_filepath is not None, \
-            "Credentials file-path not set."
+        assert creds_filepath is not None, "Credentials file-path not set."
 
         creds_path = os.path.dirname(creds_filepath)
-        if creds_path != '' and \
-           os.path.exists(creds_path) is False:
+        if creds_path != "" and os.path.exists(creds_path) is False:
             os.makedirs(creds_path)
 
         self.__creds_filepath = creds_filepath
         self.__credentials = None
 
-        api_credentials = gdrivefs.conf.Conf.get('api_credentials')
+        api_credentials = gdrivefs.conf.Conf.get("api_credentials")
 
-        with tempfile.NamedTemporaryFile(mode='w+') as f:
+        with tempfile.NamedTemporaryFile(mode="w+") as f:
             json.dump(api_credentials, f)
             f.flush()
 
-            self.flow = \
-                oauth2client.client.flow_from_clientsecrets(
-                    f.name,
-                    scope=self.__get_scopes(),
-                    redirect_uri=redirect_uri)
+            self.flow = oauth2client.client.flow_from_clientsecrets(
+                f.name, scope=self.__get_scopes(), redirect_uri=redirect_uri
+            )
 
     def __get_scopes(self):
         scopes = [

          
@@ 52,7 46,7 @@ class OauthAuthorize:
             "https://www.googleapis.com/auth/drive.file",
         ]
 
-        return ' '.join(scopes)
+        return " ".join(scopes)
 
     def step1_get_auth_url(self):
         return self.flow.step1_get_authorize_url()

          
@@ 72,7 66,9 @@ class OauthAuthorize:
         try:
             self.__credentials.refresh(http)
         except:
-            raise gdrivefs.errors.AuthorizationFailureError("Could not refresh credentials.")
+            raise gdrivefs.errors.AuthorizationFailureError(
+                "Could not refresh credentials."
+            )
 
         self.__update_cache(self.__credentials)
 

          
@@ 85,10 81,9 @@ class OauthAuthorize:
             raise ValueError("Credentials file-path is not set.")
 
         if self.__credentials is None:
-            _LOGGER.debug("Checking for cached credentials: %s",
-                          self.__creds_filepath)
+            _LOGGER.debug("Checking for cached credentials: %s", self.__creds_filepath)
 
-            with open(self.__creds_filepath, 'rb') as cache:
+            with open(self.__creds_filepath, "rb") as cache:
                 credentials_serialized = cache.read()
 
             # If we're here, we have serialized credentials information.

          
@@ 104,11 99,11 @@ class OauthAuthorize:
 
             # Credentials restored. Check expiration date.
 
-            expiry_phrase = self.__credentials.token_expiry.strftime(
-                                '%Y%m%d-%H%M%S')
+            expiry_phrase = self.__credentials.token_expiry.strftime("%Y%m%d-%H%M%S")
 
-            _LOGGER.debug("Cached credentials found with expire-date [%s].",
-                          expiry_phrase)
+            _LOGGER.debug(
+                "Cached credentials found with expire-date [%s].", expiry_phrase
+            )
 
             self.check_credential_state()
 

          
@@ 118,9 113,8 @@ class OauthAuthorize:
         """Do all of the regular checks necessary to keep our access going,
         such as refreshing when we expire.
         """
-        if(datetime.datetime.today() >= self.__credentials.token_expiry):
-            _LOGGER.debug("Credentials have expired. Attempting to refresh "
-                          "them.")
+        if datetime.datetime.today() >= self.__credentials.token_expiry:
+            _LOGGER.debug("Credentials have expired. Attempting to refresh " "them.")
 
             self.__refresh_credentials()
             return self.__credentials

          
@@ 138,7 132,7 @@ class OauthAuthorize:
 
         # Write cache file.
 
-        with open(self.__creds_filepath, 'wb') as cache:
+        with open(self.__creds_filepath, "wb") as cache:
             cache.write(credentials_serialized)
 
     def step2_doexchange(self, auth_code):

          
@@ 153,9 147,12 @@ class OauthAuthorize:
         self.__update_cache(credentials)
         self.__credentials = credentials
 
+
 # A singleton, for general use.
 
 oauth = None
+
+
 def get_auth():
     global oauth
     if oauth is None:

          
M gdrivefs/opened_file.py +180 -132
@@ 3,8 3,13 @@ from gdrivefs.displaced_file import Disp
 from gdrivefs.drive import get_gdrive
 from gdrivefs.errors import ExportFormatError, GdNotFoundError
 from gdrivefs.fsutility import dec_hint, split_path, build_filepath
-from gdrivefs.volume import PathRelations, EntryCache, path_resolver, \
-                            CLAUSE_ID, CLAUSE_ENTRY
+from gdrivefs.volume import (
+    PathRelations,
+    EntryCache,
+    path_resolver,
+    CLAUSE_ID,
+    CLAUSE_ENTRY,
+)
 import fuse
 import logging
 import os

          
@@ 43,7 48,7 @@ class _OpenedManager:
         return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
 
     def get_new_handle(self):
-        """Get a handle for a file that's about to be opened. Note that the 
+        """Get a handle for a file that's about to be opened. Note that the
         handles start at (1), so there are a lot of "+ 1" occurrences below.
         """
 

          
@@ 64,11 69,10 @@ class _OpenedManager:
                     cls.__fh_counter = 1
 
                 if cls.__fh_counter not in self.__opened:
-                    _LOGGER.debug("Assigning file-handle (%d).",
-                                  cls.__fh_counter)
+                    _LOGGER.debug("Assigning file-handle (%d).", cls.__fh_counter)
 
                     return cls.__fh_counter
-                
+
         message = "Could not allocate new file handle. Safety breach."
         _LOGGER.error(message)
         raise Exception(message)

          
@@ 78,16 82,19 @@ class _OpenedManager:
 
         cls = self.__class__
 
-        assert issubclass(opened_file.__class__, OpenedFile) is True, \
-               "Can only register an OpenedFile as an opened-file."
+        assert (
+            issubclass(opened_file.__class__, OpenedFile) is True
+        ), "Can only register an OpenedFile as an opened-file."
 
         with cls.__opened_lock:
             if not fh:
                 fh = self.get_new_handle()
 
             elif fh in self.__opened:
-                message = ("Opened-file with file-handle (%d) has already been"
-                           " registered." % (opened_file.fh))
+                message = (
+                    "Opened-file with file-handle (%d) has already been"
+                    " registered." % (opened_file.fh)
+                )
 
                 _LOGGER.error(message)
                 raise Exception(message)

          
@@ 112,14 119,15 @@ class _OpenedManager:
 
             file_path = self.__opened[fh].file_path
             del self.__opened[fh]
-            
+
             try:
                 self.__opened_byfile[file_path].remove(fh)
             except ValueError:
-                raise ValueError("Could not remove handle (%d) from list of "
-                                 "open-handles for file-path [%s]: %s" % 
-                                 (fh, file_path, 
-                                  self.__opened_byfile[file_path]))
+                raise ValueError(
+                    "Could not remove handle (%d) from list of "
+                    "open-handles for file-path [%s]: %s"
+                    % (fh, file_path, self.__opened_byfile[file_path])
+                )
 
             if not self.__opened_byfile[file_path]:
                 del self.__opened_byfile[file_path]

          
@@ 128,8 136,7 @@ class _OpenedManager:
 
         cls = self.__class__
 
-        _LOGGER.debug("Removing all open handles for file-path [%s].",
-                      file_path)
+        _LOGGER.debug("Removing all open handles for file-path [%s].", file_path)
 
         count = 0
 

          
@@ 141,8 148,7 @@ class _OpenedManager:
             except KeyError:
                 pass
 
-        _LOGGER.debug("(%d) file-handles removed for file-path [%s].",
-                      count, file_path)
+        _LOGGER.debug("(%d) file-handles removed for file-path [%s].", count, file_path)
 
     def get_by_fh(self, fh):
         """Retrieve an opened-file, by the handle."""

          
@@ 151,8 157,10 @@ class _OpenedManager:
 
         with cls.__opened_lock:
             if fh not in self.__opened:
-                message = ("Opened-file with file-handle (%d) is not "
-                          "registered (get_by_fh)." % (fh))
+                message = (
+                    "Opened-file with file-handle (%d) is not "
+                    "registered (get_by_fh)." % (fh)
+                )
 
                 _LOGGER.error(message)
                 raise Exception(message)

          
@@ 167,6 175,7 @@ class _OpenedManager:
     def temp_path(self):
         return self.__temp_path
 
+
 _OPENED_ENTRIES_LOCK = threading.Lock()
 _OPENED_ENTRIES = set()
 

          
@@ 175,56 184,62 @@ class OpenedFile:
     """This class describes a single open file, and manages changes."""
 
     def __init__(self, entry_id, path, filename, is_hidden, mime_type):
-# TODO(dustin): Until we can gracely orchestrate concurrent handles on the same 
-#               entry, we can't allow it. This is referenced, just below.
+        # TODO(dustin): Until we can gracely orchestrate concurrent handles on the same
+        #               entry, we can't allow it. This is referenced, just below.
         with _OPENED_ENTRIES_LOCK:
-            assert entry_id not in _OPENED_ENTRIES, \
-                   "Access to the same file from multiple file-handles is "\
-                   "not currently supported."
+            assert entry_id not in _OPENED_ENTRIES, (
+                "Access to the same file from multiple file-handles is "
+                "not currently supported."
+            )
 
             _OPENED_ENTRIES.add(entry_id)
 
-        _LOGGER.info("Opened-file object created for entry-ID [%s] and path "
-                     "(%s).", entry_id, path)
+        _LOGGER.info(
+            "Opened-file object created for entry-ID [%s] and path " "(%s).",
+            entry_id,
+            path,
+        )
 
         self.__entry_id = entry_id
         self.__path = path
         self.__filename = filename
         self.__is_hidden = is_hidden
-        
+
         self.__mime_type = mime_type
         self.__cache = EntryCache.get_instance().cache
 
         self.__is_loaded = False
         self.__is_dirty = False
 
-        # Use the monotonically incremented `opened_count` to produce a unique 
+        # Use the monotonically incremented `opened_count` to produce a unique
         # temporary filepath.
 
         om = get_om()
-        self.__temp_filepath = \
-            os.path.join(om.temp_path, str(om.opened_count))
+        self.__temp_filepath = os.path.join(om.temp_path, str(om.opened_count))
 
         self.__fh = None
 
-        # We need to load this up-front. Since we can't do partial updates, we 
-        # have to keep one whole, local copy, apply updates to it, and then 
+        # We need to load this up-front. Since we can't do partial updates, we
+        # have to keep one whole, local copy, apply updates to it, and then
         # post it on flush.
-# TODO(dustin): Until we finish working on the download-agent so that we can 
-#               have a way to orchestrate concurrent handles on the same file, 
-#               we'll just have to accept the fact that concurrent access will 
-#               require multiple downloads of the same file to multiple 
-#               temporary files (one for each).
+        # TODO(dustin): Until we finish working on the download-agent so that we can
+        #               have a way to orchestrate concurrent handles on the same file,
+        #               we'll just have to accept the fact that concurrent access will
+        #               require multiple downloads of the same file to multiple
+        #               temporary files (one for each).
         self.__load_base_from_remote()
 
     def __del__(self):
-        """This handle is being closed. Notice that we don't flush here because 
+        """This handle is being closed. Notice that we don't flush here because
         we expect that the VFS will.
         """
 
         if self.__fh is not None:
-            _LOGGER.debug("Removing temporary file [%s] ([%s]).", 
-                          self.__temp_filepath, self.file_path)
+            _LOGGER.debug(
+                "Removing temporary file [%s] ([%s]).",
+                self.__temp_filepath,
+                self.file_path,
+            )
 
             self.__fh.close()
             os.unlink(self.__temp_filepath)

          
@@ 233,27 248,29 @@ class OpenedFile:
             _OPENED_ENTRIES.remove(self.__entry_id)
 
     def __repr__(self):
-        replacements = { 
-            'entry_id': self.__entry_id, 
-            'filename': self.__filename, 
-            'mime_type': self.__mime_type, 
-            'is_loaded': self.__is_loaded, 
-            'is_dirty': self.__is_dirty
+        replacements = {
+            "entry_id": self.__entry_id,
+            "filename": self.__filename,
+            "mime_type": self.__mime_type,
+            "is_loaded": self.__is_loaded,
+            "is_dirty": self.__is_dirty,
         }
 
-        return ("<OF [%(entry_id)s] F=[%(filename)s] MIME=[%(mime_type)s] "
-                "LOADED=[%(is_loaded)s] DIRTY= [%(is_dirty)s]>" % replacements)
+        return (
+            "<OF [%(entry_id)s] F=[%(filename)s] MIME=[%(mime_type)s] "
+            "LOADED=[%(is_loaded)s] DIRTY= [%(is_dirty)s]>" % replacements
+        )
 
-# TODO: We should be able to safely assume that we won't get a change event for 
-#       a file until its been entirely updated, online. Therefore, the change 
-#       processor should checkin, here, and make sure that any handles are 
-#       closed for changed files.
-#
-#       We should also make sure to remove temporary file-paths in the OM temp-
-#       path (if one exists) if we get a "delete" change.
+    # TODO: We should be able to safely assume that we won't get a change event for
+    #       a file until its been entirely updated, online. Therefore, the change
+    #       processor should checkin, here, and make sure that any handles are
+    #       closed for changed files.
+    #
+    #       We should also make sure to remove temporary file-paths in the OM temp-
+    #       path (if one exists) if we get a "delete" change.
 
     def __load_base_from_remote(self):
-        """Download the data for the entry that we represent. This is probably 
+        """Download the data for the entry that we represent. This is probably
         a file, but could also be a stub for -any- entry.
         """
 

          
@@ 268,68 285,75 @@ class OpenedFile:
 
         entry = self.__cache.get(self.__entry_id)
 
-        _LOGGER.debug("Ensuring local availability of [%s]: [%s]", 
-                      entry, self.__temp_filepath)
+        _LOGGER.debug(
+            "Ensuring local availability of [%s]: [%s]", entry, self.__temp_filepath
+        )
 
-        # Get the current version of the write-cache file, or note that we 
+        # Get the current version of the write-cache file, or note that we
         # don't have it.
 
-        _LOGGER.info("Attempting local cache update of file [%s] for entry "
-                     "[%s] and mime-type [%s].",
-                     self.__temp_filepath, entry, self.mime_type)
+        _LOGGER.info(
+            "Attempting local cache update of file [%s] for entry "
+            "[%s] and mime-type [%s].",
+            self.__temp_filepath,
+            entry,
+            self.mime_type,
+        )
 
         if entry.requires_mimetype:
             # length = DisplacedFile.file_size
 
             d = DisplacedFile(entry)
-            stub_data = d.deposit_file(self.mime_type).encode('utf-8')
+            stub_data = d.deposit_file(self.mime_type).encode("utf-8")
 
-            self.__fh = open(self.__temp_filepath, 'w+b')
+            self.__fh = open(self.__temp_filepath, "w+b")
             self.__fh.write(stub_data)
         else:
-            _LOGGER.debug("Executing the download: [%s] => [%s]", 
-                          entry.id, self.__temp_filepath)
-            
+            _LOGGER.debug(
+                "Executing the download: [%s] => [%s]", entry.id, self.__temp_filepath
+            )
+
             try:
-# TODO(dustin): We need to inherit a file that we might've already cached by 
-#               opening.
-# TODO(dustin): Any call to download_to_local should use a local, temporarily 
-#               file is already established. We can't use it in the reverse 
-#               order though: It's one thing to already have a cache from 
-#               having opened it, and it's a another thing to maintain a cache 
-#               of every file that is copied.
+                # TODO(dustin): We need to inherit a file that we might've already cached by
+                #               opening.
+                # TODO(dustin): Any call to download_to_local should use a local, temporarily
+                #               file is already established. We can't use it in the reverse
+                #               order though: It's one thing to already have a cache from
+                #               having opened it, and it's a another thing to maintain a cache
+                #               of every file that is copied.
                 gd = get_gdrive()
                 result = gd.download_to_local(
-                            self.__temp_filepath,
-                            entry,
-                            self.mime_type)
+                    self.__temp_filepath, entry, self.mime_type
+                )
 
                 # (length, cache_fault) = result
             except ExportFormatError:
                 _LOGGER.exception("There was an export-format error.")
                 raise fuse.FuseOSError(ENOENT)
 
-            self.__fh = open(self.__temp_filepath, 'r+b')
+            self.__fh = open(self.__temp_filepath, "r+b")
 
             self.__is_dirty = False
             self.__is_loaded = True
 
-        _LOGGER.debug("Established base file-data for [%s]: [%s]", 
-                      entry, self.__temp_filepath)
+        _LOGGER.debug(
+            "Established base file-data for [%s]: [%s]", entry, self.__temp_filepath
+        )
 
-    @dec_hint(['offset', 'data'], ['data'], 'OF')
+    @dec_hint(["offset", "data"], ["data"], "OF")
     def add_update(self, offset, data):
         """Queue an update to this file."""
 
-        _LOGGER.debug("Applying update for offset (%d) and length (%d).",
-                      offset, len(data))
+        _LOGGER.debug(
+            "Applying update for offset (%d) and length (%d).", offset, len(data)
+        )
 
         self.__is_dirty = True
         self.__fh.seek(offset)
         self.__fh.write(data)
         self.__fh.flush()
 
-    @dec_hint(prefix='OF')
+    @dec_hint(prefix="OF")
     def flush(self):
         """The OS wants to effect any changes made to the file."""
 

          
@@ 338,33 362,42 @@ class OpenedFile:
         entry = self.__cache.get(self.__entry_id)
 
         if self.__is_dirty is False:
-            _LOGGER.debug("Flush will be skipped for [%s] because there "
-                          "are no changes: [%s] IS_LOADED=[%s] "
-                          "IS_DIRTY=[%d]", 
-                          entry.id, self.file_path, self.__is_loaded, 
-                          self.__is_dirty)
+            _LOGGER.debug(
+                "Flush will be skipped for [%s] because there "
+                "are no changes: [%s] IS_LOADED=[%s] "
+                "IS_DIRTY=[%d]",
+                entry.id,
+                self.file_path,
+                self.__is_loaded,
+                self.__is_dirty,
+            )
             return
         else:
             st = os.stat(self.__temp_filepath)
 
-            _LOGGER.debug("Pushing (%d) bytes for entry with ID from [%s] to "
-                          "GD for file-path [%s].",
-                          st.st_size, entry.id, self.__temp_filepath)
+            _LOGGER.debug(
+                "Pushing (%d) bytes for entry with ID from [%s] to "
+                "GD for file-path [%s].",
+                st.st_size,
+                entry.id,
+                self.__temp_filepath,
+            )
 
-# TODO: Make sure we sync the mtime to remote.
+            # TODO: Make sure we sync the mtime to remote.
             gd = get_gdrive()
             entry = gd.update_entry(
-                        entry, 
-                        filename=entry.title, 
-                        data_filepath=self.__temp_filepath, 
-                        mime_type=self.mime_type, 
-                        parents=entry.parents, 
-                        is_hidden=self.__is_hidden)
+                entry,
+                filename=entry.title,
+                data_filepath=self.__temp_filepath,
+                mime_type=self.mime_type,
+                parents=entry.parents,
+                is_hidden=self.__is_hidden,
+            )
 
             self.__is_dirty = False
 
-# TODO(dustin): For now, we don't cleanup the temporary file. We need to 
-#               schedule this using LRU-semantics.
+            # TODO(dustin): For now, we don't cleanup the temporary file. We need to
+            #               schedule this using LRU-semantics.
 
             # Immediately update our current cached entry.
 

          
@@ 375,12 408,12 @@ class OpenedFile:
 
             _LOGGER.info("Update complete on entry with ID [%s].", entry.id)
 
-    @dec_hint(['offset', 'length'], prefix='OF')
+    @dec_hint(["offset", "length"], prefix="OF")
     def read(self, offset, length):
-        
+
         _LOGGER.debug("Reading (%d) bytes at offset (%d).", length, offset)
 
-        # We don't care if the cache file is dirty (not on this system, at 
+        # We don't care if the cache file is dirty (not on this system, at
         # least).
 
         st = os.stat(self.__temp_filepath)

          
@@ 390,12 423,21 @@ class OpenedFile:
 
         len_ = len(data)
 
-        _LOGGER.debug("(%d) bytes retrieved from slice (%d):(%d)/(%d).",
-                      len_, offset, length, st.st_size)
+        _LOGGER.debug(
+            "(%d) bytes retrieved from slice (%d):(%d)/(%d).",
+            len_,
+            offset,
+            length,
+            st.st_size,
+        )
 
         if len_ != length:
-            _LOGGER.warning("Read request is only returning (%d) bytes when "
-                            "(%d) bytes were requested.", len_, length)
+            _LOGGER.warning(
+                "Read request is only returning (%d) bytes when "
+                "(%d) bytes were requested.",
+                len_,
+                length,
+            )
 
         return data
 

          
@@ 413,9 455,10 @@ class OpenedFile:
 
         return build_filepath(self.__path, self.__filename)
 
+
 def create_for_existing_filepath(filepath):
-    """Process the file/path that was requested (potential export-type 
-    directive, dot-prefix, etc..), and build an opened-file object using 
+    """Process the file/path that was requested (potential export-type
+    directive, dot-prefix, etc..), and build an opened-file object using
     the information.
     """
 

          
@@ 426,8 469,7 @@ def create_for_existing_filepath(filepat
     try:
         result = split_path(filepath, path_resolver)
     except GdNotFoundError:
-        _LOGGER.exception("Could not process [%s] (create_for_requested).",
-                          filepath)
+        _LOGGER.exception("Could not process [%s] (create_for_requested).", filepath)
 
         raise fuse.FuseOSError(ENOENT)
 

          
@@ 439,11 481,12 @@ def create_for_existing_filepath(filepat
     path_relations = PathRelations.get_instance()
 
     try:
-        entry_clause = path_relations.get_clause_from_path(
-                        distilled_filepath)
+        entry_clause = path_relations.get_clause_from_path(distilled_filepath)
     except:
-        _LOGGER.exception("Could not try to get clause from path [%s] "
-                          "(OpenedFile).", distilled_filepath)
+        _LOGGER.exception(
+            "Could not try to get clause from path [%s] " "(OpenedFile).",
+            distilled_filepath,
+        )
 
         raise fuse.FuseOSError(EIO)
 

          
@@ 453,38 496,43 @@ def create_for_existing_filepath(filepat
 
     entry = entry_clause[CLAUSE_ENTRY]
 
-    # Normalize the mime-type by considering what's available for download. 
-    # We're going to let the requests that didn't provide a mime-type fail 
-    # right here. It will give us the opportunity to try a few options to 
+    # Normalize the mime-type by considering what's available for download.
+    # We're going to let the requests that didn't provide a mime-type fail
+    # right here. It will give us the opportunity to try a few options to
     # get the file.
 
     try:
         final_mimetype = entry.normalize_download_mimetype(mime_type)
     except ExportFormatError:
-        _LOGGER.exception("There was an export-format error "
-                          "(create_for_requested_filesystem).")
+        _LOGGER.exception(
+            "There was an export-format error " "(create_for_requested_filesystem)."
+        )
 
         raise fuse.FuseOSError(ENOENT)
     except:
-        _LOGGER.exception("Could not normalize mime-type [%s] for entry"
-                          "[%s].", mime_type, entry)
+        _LOGGER.exception(
+            "Could not normalize mime-type [%s] for entry" "[%s].", mime_type, entry
+        )
 
         raise fuse.FuseOSError(EIO)
 
     if final_mimetype != mime_type:
-        _LOGGER.info("Entry being opened will be opened as [%s] rather "
-                     "than [%s].", final_mimetype, mime_type)
+        _LOGGER.info(
+            "Entry being opened will be opened as [%s] rather " "than [%s].",
+            final_mimetype,
+            mime_type,
+        )
 
     # Build the object.
 
     return OpenedFile(
-            entry_clause[CLAUSE_ID], 
-            path, 
-            filename, 
-            is_hidden, 
-            final_mimetype)
+        entry_clause[CLAUSE_ID], path, filename, is_hidden, final_mimetype
+    )
+
 
 _management_instance = None
+
+
 def get_om():
     global _management_instance
     if _management_instance is None:

          
M gdrivefs/time_support.py +25 -19
@@ 2,46 2,50 @@ from datetime import datetime
 from dateutil.tz import tzlocal, tzutc
 from math import floor
 
-DTF_DATETIME = '%Y%m%d-%H%M%S'
-DTF_DATETIMET = '%Y-%m-%dT%H:%M:%S'
-DTF_DATE = '%Y%m%d'
-DTF_TIME = '%H%M%S'
+DTF_DATETIME = "%Y%m%d-%H%M%S"
+DTF_DATETIMET = "%Y-%m-%dT%H:%M:%S"
+DTF_DATE = "%Y%m%d"
+DTF_TIME = "%H%M%S"
+
 
 def get_normal_dt_from_rfc3339_phrase(phrase):
-    stripped = phrase[:phrase.rindex('.')]
+    stripped = phrase[: phrase.rindex(".")]
     dt = datetime.strptime(stripped, DTF_DATETIMET).replace(tzinfo=tzutc())
 
-#    print("get_normal_dt_from_rfc3339_phrase(%s) => %s" % (phrase, dt))
+    #    print("get_normal_dt_from_rfc3339_phrase(%s) => %s" % (phrase, dt))
 
     return dt
 
+
 def build_rfc3339_phrase(datetime_obj):
     datetime_phrase = datetime_obj.strftime(DTF_DATETIMET)
-    us = datetime_obj.strftime('%f')
+    us = datetime_obj.strftime("%f")
 
     seconds = datetime_obj.utcoffset().total_seconds()
 
     if seconds is None:
-        datetime_phrase += 'Z'
+        datetime_phrase += "Z"
     else:
         # Append: decimal, 6-digit uS, -/+, hours, minutes
-        datetime_phrase += ('.%.6s%s%02d:%02d' % (
-                            us.zfill(6),
-                            ('-' if seconds < 0 else '+'),
-                            abs(int(floor(seconds / 3600))),
-                            abs(seconds % 3600)
-                            ))
+        datetime_phrase += ".%.6s%s%02d:%02d" % (
+            us.zfill(6),
+            ("-" if seconds < 0 else "+"),
+            abs(int(floor(seconds / 3600))),
+            abs(seconds % 3600),
+        )
 
-#    print("build_rfc3339_phrase(%s) => %s" % (datetime_obj, datetime_phrase))
+    #    print("build_rfc3339_phrase(%s) => %s" % (datetime_obj, datetime_phrase))
     return datetime_phrase
 
+
 def get_normal_dt_from_epoch(epoch):
     dt = datetime.fromtimestamp(epoch, tzlocal())
     normal_dt = normalize_dt(dt)
 
-#    print("get_normal_dt_from_epoch(%s) => %s" % (epoch, normal_dt))
+    #    print("get_normal_dt_from_epoch(%s) => %s" % (epoch, normal_dt))
     return normal_dt
 
+
 def normalize_dt(dt=None):
     if dt is None:
         dt = datetime.now()

          
@@ 51,9 55,10 @@ def normalize_dt(dt=None):
 
     normal_dt = dt.astimezone(tzutc())
 
-#    print("normalize_dt(%s) => %s" % (dt, normal_dt))
+    #    print("normalize_dt(%s) => %s" % (dt, normal_dt))
     return normal_dt
 
+
 def get_flat_normal_fs_time_from_dt(dt=None):
     if dt is None:
         dt = datetime.now()

          
@@ 61,12 66,13 @@ def get_flat_normal_fs_time_from_dt(dt=N
     dt_normal = normalize_dt(dt)
     flat_normal = build_rfc3339_phrase(dt_normal)
 
-#    print("get_flat_normal_fs_time_from_dt(%s) => %s" % (dt, flat_normal))
+    #    print("get_flat_normal_fs_time_from_dt(%s) => %s" % (dt, flat_normal))
     return flat_normal
 
+
 def get_flat_normal_fs_time_from_epoch(epoch):
     dt_normal = get_normal_dt_from_epoch(epoch)
     flat_normal = build_rfc3339_phrase(dt_normal)
 
-#    print("get_flat_normal_fs_time_from_epoch(%s) => %s" % (epoch, flat_normal))
+    #    print("get_flat_normal_fs_time_from_epoch(%s) => %s" % (epoch, flat_normal))
     return flat_normal

          
M gdrivefs/utility.py +62 -61
@@ 12,36 12,36 @@ import sys
 class _DriveUtility:
     """General utility functions loosely related to GD."""
 
-#    # Mime-types to translate to, if they appear within the "exportLinks" list.
-#    gd_to_normal_mime_mappings = {
-#            'application/vnd.google-apps.document':        
-#                'text/plain',
-#            'application/vnd.google-apps.spreadsheet':     
-#                'application/vnd.ms-excel',
-#            'application/vnd.google-apps.presentation':    
-#/gd_to_normal_mime_mappings
-#                'application/vnd.ms-powerpoint',
-#            'application/vnd.google-apps.drawing':         
-#                'application/pdf',
-#            'application/vnd.google-apps.audio':           
-#                'audio/mpeg',
-#            'application/vnd.google-apps.photo':           
-#                'image/png',
-#            'application/vnd.google-apps.video':           
-#                'video/x-flv'
-#        }
+    #    # Mime-types to translate to, if they appear within the "exportLinks" list.
+    #    gd_to_normal_mime_mappings = {
+    #            'application/vnd.google-apps.document':
+    #                'text/plain',
+    #            'application/vnd.google-apps.spreadsheet':
+    #                'application/vnd.ms-excel',
+    #            'application/vnd.google-apps.presentation':
+    # /gd_to_normal_mime_mappings
+    #                'application/vnd.ms-powerpoint',
+    #            'application/vnd.google-apps.drawing':
+    #                'application/pdf',
+    #            'application/vnd.google-apps.audio':
+    #                'audio/mpeg',
+    #            'application/vnd.google-apps.photo':
+    #                'image/png',
+    #            'application/vnd.google-apps.video':
+    #                'video/x-flv'
+    #        }
 
     # Default extensions for mime-types.
-# TODO(dustin): !! Move this to the config directory.
-    default_extensions = { 
-            'text/plain':                       'txt',
-            'application/vnd.ms-excel':         'xls',
-            'application/vnd.ms-powerpoint':    'ppt',
-            'application/pdf':                  'pdf',
-            'audio/mpeg':                       'mp3',
-            'image/png':                        'png',
-            'video/x-flv':                      'flv'
-        }
+    # TODO(dustin): !! Move this to the config directory.
+    default_extensions = {
+        "text/plain": "txt",
+        "application/vnd.ms-excel": "xls",
+        "application/vnd.ms-powerpoint": "ppt",
+        "application/pdf": "pdf",
+        "audio/mpeg": "mp3",
+        "image/png": "png",
+        "video/x-flv": "flv",
+    }
 
     local_character_set = sys.getfilesystemencoding()
 

          
@@ 51,34 51,34 @@ class _DriveUtility:
     def __load_mappings(self):
         # Allow someone to override our default mappings of the GD types.
 
-# TODO(dustin): Isn't actually used, so commenting.
-#        gd_to_normal_mapping_filepath = \
-#            gdrivefs.conf.Conf.get('gd_to_normal_mapping_filepath')
-#
-#        try:
-#            with open(gd_to_normal_mapping_filepath, 'r') as f:
-#                self.gd_to_normal_mime_mappings.extend(json.load(f))
-#        except IOError:
-#            _logger.info("No mime-mapping was found.")
+        # TODO(dustin): Isn't actually used, so commenting.
+        #        gd_to_normal_mapping_filepath = \
+        #            gdrivefs.conf.Conf.get('gd_to_normal_mapping_filepath')
+        #
+        #        try:
+        #            with open(gd_to_normal_mapping_filepath, 'r') as f:
+        #                self.gd_to_normal_mime_mappings.extend(json.load(f))
+        #        except IOError:
+        #            _logger.info("No mime-mapping was found.")
 
-        # Allow someone to set file-extensions for mime-types, and not rely on 
+        # Allow someone to set file-extensions for mime-types, and not rely on
         # Python's educated guesses.
 
-        extension_mapping_filepath = \
-            gdrivefs.conf.Conf.get('extension_mapping_filepath')
+        extension_mapping_filepath = gdrivefs.conf.Conf.get(
+            "extension_mapping_filepath"
+        )
 
         try:
-            with open(extension_mapping_filepath, 'r') as f:
+            with open(extension_mapping_filepath, "r") as f:
                 self.default_extensions.extend(json.load(f))
         except IOError:
             _logger.info("No extension-mapping was found.")
 
     def get_first_mime_type_by_extension(self, extension):
 
-        found = [ 
-            mime_type 
-            for mime_type, temp_extension 
-            in list(self.default_extensions.items())
+        found = [
+            mime_type
+            for mime_type, temp_extension in list(self.default_extensions.items())
             if temp_extension == extension
         ]
 

          
@@ 93,24 93,25 @@ class _DriveUtility:
         # fusepy doesn't support the Python 2.x Unicode type. Expect a native
         # string (anything but a byte string).
         return original_filename
-       
-#        # If we're in an older version of Python that still defines the Unicode
-#        # class and the filename isn't unicode, translate it.
-#
-#        try:
-#            sys.modules['__builtin__'].unicode
-#        except AttributeError:
-#            pass
-#        else:
-#            if issubclass(original_filename.__class__, unicode) is False:
-#                return unicode(original_filename)#original_filename.decode(self.local_character_set)
-#
-#        # It's already unicode. Don't do anything.
-#        return original_filename
+
+    #        # If we're in an older version of Python that still defines the Unicode
+    #        # class and the filename isn't unicode, translate it.
+    #
+    #        try:
+    #            sys.modules['__builtin__'].unicode
+    #        except AttributeError:
+    #            pass
+    #        else:
+    #            if issubclass(original_filename.__class__, unicode) is False:
+    #                return unicode(original_filename)#original_filename.decode(self.local_character_set)
+    #
+    #        # It's already unicode. Don't do anything.
+    #        return original_filename
 
     def make_safe_for_filename(self, text):
         """Remove any filename-invalid characters."""
-    
-        return re.sub('[^a-z0-9\-_\.]+', '', text)
+
+        return re.sub("[^a-z0-9\-_\.]+", "", text)
+
 
 utility = _DriveUtility()

          
M gdrivefs/volume.py +200 -166
@@ 9,20 9,21 @@ from gdrivefs.utility import utility
 from threading import RLock
 import logging
 
-CLAUSE_ENTRY            = 0 # Normalized entry.
-CLAUSE_PARENT           = 1 # List of parent clauses.
-CLAUSE_CHILDREN         = 2 # List of 2-tuples describing children: (filename, clause)
-CLAUSE_ID               = 3 # Entry ID.
-CLAUSE_CHILDREN_LOADED  = 4 # All children loaded?
+CLAUSE_ENTRY = 0  # Normalized entry.
+CLAUSE_PARENT = 1  # List of parent clauses.
+CLAUSE_CHILDREN = 2  # List of 2-tuples describing children: (filename, clause)
+CLAUSE_ID = 3  # Entry ID.
+CLAUSE_CHILDREN_LOADED = 4  # All children loaded?
 
 _logger = logging.getLogger(__name__)
 
+
 def path_resolver(path):
     path_relations = PathRelations.get_instance()
 
     parent_clause = path_relations.get_clause_from_path(path)
     if not parent_clause:
-#        logging.debug("Path [%s] does not exist for split.", path)
+        #        logging.debug("Path [%s] does not exist for split.", path)
         raise GdNotFoundError()
 
     return (parent_clause[CLAUSE_ENTRY], parent_clause)

          
@@ 35,16 36,16 @@ class PathRelations:
 
     rlock = RLock()
 
-    entry_ll = { }
-    path_cache = { }
-    path_cache_byid = { }
+    entry_ll = {}
+    path_cache = {}
+    path_cache_byid = {}
 
     @staticmethod
     def get_instance():
 
         with PathRelations.rlock:
             try:
-                return PathRelations.__instance;
+                return PathRelations.__instance
             except:
                 pass
 

          
@@ 56,11 57,11 @@ class PathRelations:
 
         _logger.debug("Recursively pruning entry with ID [%s].", entry_id)
 
-        to_remove = deque([ entry_id ])
+        to_remove = deque([entry_id])
         stat_placeholders = 0
         stat_folders = 0
         stat_files = 0
-        removed = { }
+        removed = {}
         while 1:
             if not to_remove:
                 break

          
@@ 68,11 69,11 @@ class PathRelations:
             current_entry_id = to_remove.popleft()
             entry_clause = self.entry_ll[current_entry_id]
 
-            # Any entry that still has children will be transformed into a 
-            # placeholder, and not actually removed. Once the children are 
-            # removed in this recursive process, we'll naturally clean-up the 
-            # parent as a last step. Therefore, the number of placeholders will 
-            # overlap with the number of folders (a placeholder must represent 
+            # Any entry that still has children will be transformed into a
+            # placeholder, and not actually removed. Once the children are
+            # removed in this recursive process, we'll naturally clean-up the
+            # parent as a last step. Therefore, the number of placeholders will
+            # overlap with the number of folders (a placeholder must represent
             # a folder. It is only there because the entry had children).
 
             if not entry_clause[0]:

          
@@ 88,8 89,9 @@ class PathRelations:
 
             (current_orphan_ids, current_children_clauses) = result
 
-            children_ids_to_remove = [ children[3] for children 
-                                                in current_children_clauses ]
+            children_ids_to_remove = [
+                children[3] for children in current_children_clauses
+            ]
 
             to_remove.extend(current_orphan_ids)
             to_remove.extend(children_ids_to_remove)

          
@@ 97,9 99,9 @@ class PathRelations:
         return (list(removed.keys()), (stat_folders + stat_files))
 
     def __remove_entry(self, entry_id, is_update=False):
-        """Remove an entry. Updates references from linked entries, but does 
-        not remove any other entries. We return a tuple, where the first item 
-        is a list of any parents that, themselves, no longer have parents or 
+        """Remove an entry. Updates references from linked entries, but does
+        not remove any other entries. We return a tuple, where the first item
+        is a list of any parents that, themselves, no longer have parents or
         children, and the second item is a list of children to this entry.
         """
 

          
@@ 107,7 109,7 @@ class PathRelations:
             # Ensure that the entry-ID is valid.
 
             entry_clause = self.entry_ll[entry_id]
-            
+
             # Clip from path cache.
 
             if entry_id in self.path_cache_byid:

          
@@ 120,47 122,56 @@ class PathRelations:
             entry_parents = entry_clause[CLAUSE_PARENT]
             entry_children_tuples = entry_clause[CLAUSE_CHILDREN]
 
-            parents_to_remove = [ ]
+            parents_to_remove = []
             # children_to_remove = [ ]
             if entry_parents:
                 for parent_clause in entry_parents:
-                    # A placeholder has an entry and parents field (fields 
+                    # A placeholder has an entry and parents field (fields
                     # 0, 1) of None.
 
-                    (parent, parent_parents, parent_children, parent_id, \
-                        all_children_loaded) = parent_clause
+                    (
+                        parent,
+                        parent_parents,
+                        parent_children,
+                        parent_id,
+                        all_children_loaded,
+                    ) = parent_clause
 
                     if all_children_loaded and not is_update:
                         all_children_loaded = False
 
-                    # Integrity-check that the parent we're referencing is 
+                    # Integrity-check that the parent we're referencing is
                     # still in the list.
                     if parent_id not in self.entry_ll:
-                        _logger.warn("Parent with ID [%s] on entry with ID "
-                                     "[%s] is not valid." % 
-                                     (parent_id, entry_id))
+                        _logger.warn(
+                            "Parent with ID [%s] on entry with ID "
+                            "[%s] is not valid." % (parent_id, entry_id)
+                        )
                         continue
-            
+
                     # old_children_filenames = [ child_tuple[0] for child_tuple
                     #                            in parent_children ]
 
-                    updated_children = [ child_tuple for child_tuple 
-                                         in parent_children 
-                                         if child_tuple[1] != entry_clause ]
+                    updated_children = [
+                        child_tuple
+                        for child_tuple in parent_children
+                        if child_tuple[1] != entry_clause
+                    ]
 
                     if parent_children != updated_children:
                         parent_children[:] = updated_children
 
                     else:
-                        _logger.error("Entry with ID [%s] referenced parent "
-                                      "with ID [%s], but not vice-versa." % 
-                                      (entry_id, parent_id))
+                        _logger.error(
+                            "Entry with ID [%s] referenced parent "
+                            "with ID [%s], but not vice-versa." % (entry_id, parent_id)
+                        )
 
                     # updated_children_filenames = [ child_tuple[0]
                     #                                for child_tuple
                     #                                in parent_children ]
 
-                    # If the parent now has no children and is a placeholder, 
+                    # If the parent now has no children and is a placeholder,
                     # advise that we remove it.
                     if not parent_children and parent == None:
                         parents_to_remove.append(parent_id)

          
@@ 170,7 181,7 @@ class PathRelations:
             set_placeholder = len(entry_children_tuples) > 0
 
             if set_placeholder:
-                # Just nullify the entry information, but leave the clause. We 
+                # Just nullify the entry information, but leave the clause. We
                 # had children that still need a parent.
 
                 entry_clause[0] = None

          
@@ 178,24 189,25 @@ class PathRelations:
             else:
                 del self.entry_ll[entry_id]
 
-        children_entry_clauses = [ child_tuple[1] for child_tuple 
-                                    in entry_children_tuples ]
+        children_entry_clauses = [
+            child_tuple[1] for child_tuple in entry_children_tuples
+        ]
 
         return (parents_to_remove, children_entry_clauses)
 
     def remove_entry_all(self, entry_id, is_update=False):
-        """Remove the the entry from both caches. EntryCache is more of an 
-        entity look-up, whereas this (PathRelations) has a bunch of expanded 
-        data regarding relationships and paths. This call will first remove the 
+        """Remove the the entry from both caches. EntryCache is more of an
+        entity look-up, whereas this (PathRelations) has a bunch of expanded
+        data regarding relationships and paths. This call will first remove the
         relationships from here, and then the entry from the EntryCache.
 
         We do it in this order because if we were to remove entry from the core
-        library (EntryCache) first, then all of the relationships here will 
+        library (EntryCache) first, then all of the relationships here will
         suddenly become invalid, and although the entry will be disregistered,
         because it has references from this linked-list, those objects will be
-        very much alive. On the other hand, if we remove the entry from 
+        very much alive. On the other hand, if we remove the entry from
         PathRelations first, then, because of the locks, PathRelations will not
-        be able to touch the relationships until after we're done, here. Ergo, 
+        be able to touch the relationships until after we're done, here. Ergo,
         the only thing that can happen is that something may look at the entry
         in the library.
         """

          
@@ 203,15 215,16 @@ class PathRelations:
         with PathRelations.rlock:
             cache = EntryCache.get_instance().cache
 
-            removed_ids = [ entry_id ]
+            removed_ids = [entry_id]
             if self.is_cached(entry_id):
                 try:
-                    removed_tuple = self.remove_entry_recursive(entry_id, \
-                                                               is_update)
+                    removed_tuple = self.remove_entry_recursive(entry_id, is_update)
                 except:
-                    _logger.exception("Could not remove entry-ID from "
-                                      "PathRelations. Still continuing, "
-                                      "though.")
+                    _logger.exception(
+                        "Could not remove entry-ID from "
+                        "PathRelations. Still continuing, "
+                        "though."
+                    )
 
                 (removed_ids, number_removed) = removed_tuple
 

          
@@ 220,40 233,45 @@ class PathRelations:
                     try:
                         cache.remove(removed_id)
                     except:
-                        _logger.exception("Could not remove entry-ID from "
-                                          "the core cache. Still "
-                                          "continuing, though.")
+                        _logger.exception(
+                            "Could not remove entry-ID from "
+                            "the core cache. Still "
+                            "continuing, though."
+                        )
 
     def get_proper_filenames(self, entry_clause):
         """Return what was determined to be the unique filename for this "
-        particular entry for each of its respective parents. This will return 
-        the standard 'title' value as a scalar when the root entry, and a 
+        particular entry for each of its respective parents. This will return
+        the standard 'title' value as a scalar when the root entry, and a
         dictionary of parent-IDs to unique-filenames when not.
 
-        This call is necessary because GD allows duplicate filenames until any 
-        one folder. Note that a consequence of both this and the fact that GD 
-        allows the same file to be listed under multiple folders means that a 
-        file may look like "filename" under one and "filename (2)" under 
+        This call is necessary because GD allows duplicate filenames until any
+        one folder. Note that a consequence of both this and the fact that GD
+        allows the same file to be listed under multiple folders means that a
+        file may look like "filename" under one and "filename (2)" under
         another.
         """
 
         with PathRelations.rlock:
-            found = { }
+            found = {}
             parents = entry_clause[1]
             if not parents:
                 return entry_clause[0].title_fs
 
             else:
                 for parent_clause in parents:
-                    matching_children = [filename for filename, child_clause 
-                                                  in parent_clause[2] 
-                                                  if child_clause == entry_clause]
+                    matching_children = [
+                        filename
+                        for filename, child_clause in parent_clause[2]
+                        if child_clause == entry_clause
+                    ]
                     if not matching_children:
-                        _logger.error("No matching entry-ID [%s] was not "
-                                      "found among children of entry's "
-                                      "parent with ID [%s] for proper-"
-                                      "filename lookup." % 
-                                      (entry_clause[3], parent_clause[3]))
+                        _logger.error(
+                            "No matching entry-ID [%s] was not "
+                            "found among children of entry's "
+                            "parent with ID [%s] for proper-"
+                            "filename lookup." % (entry_clause[3], parent_clause[3])
+                        )
 
                     else:
                         found[parent_clause[3]] = matching_children[0]

          
@@ 267,14 285,15 @@ class PathRelations:
                 return None
 
             if normalized_entry.__class__ is not NormalEntry:
-                raise Exception("PathRelations expects to register an object "
-                                "of type NormalEntry, not [%s]." % 
-                                (type(normalized_entry)))
+                raise Exception(
+                    "PathRelations expects to register an object "
+                    "of type NormalEntry, not [%s]." % (type(normalized_entry))
+                )
 
             entry_id = normalized_entry.id
 
-#            self.__log.debug("Registering entry with ID [%s] within path-"
-#                             "relations.", entry_id)
+            #            self.__log.debug("Registering entry with ID [%s] within path-"
+            #                             "relations.", entry_id)
 
             if self.is_cached(entry_id, include_placeholders=False):
                 self.remove_entry_recursive(entry_id, True)

          
@@ 285,9 304,9 @@ class PathRelations:
 
             # We do a linked list using object references.
             # (
-            #   normalized_entry, 
-            #   [ parent clause, ... ], 
-            #   [ child clause, ... ], 
+            #   normalized_entry,
+            #   [ parent clause, ... ],
+            #   [ child clause, ... ],
             #   entry-ID,
             #   < boolean indicating that we know about all children >
             # )

          
@@ 295,16 314,17 @@ class PathRelations:
             if self.is_cached(entry_id, include_placeholders=True):
                 entry_clause = self.entry_ll[entry_id]
                 entry_clause[CLAUSE_ENTRY] = normalized_entry
-                entry_clause[CLAUSE_PARENT] = [ ]
+                entry_clause[CLAUSE_PARENT] = []
             else:
-                entry_clause = [normalized_entry, [ ], [ ], entry_id, False]
+                entry_clause = [normalized_entry, [], [], entry_id, False]
                 self.entry_ll[entry_id] = entry_clause
 
             entry_parents = entry_clause[CLAUSE_PARENT]
             title_fs = normalized_entry.title_fs
 
-            parent_ids = normalized_entry.parents if normalized_entry.parents \
-                                                  is not None else []
+            parent_ids = (
+                normalized_entry.parents if normalized_entry.parents is not None else []
+            )
 
             for parent_id in parent_ids:
 

          
@@ 312,7 332,7 @@ class PathRelations:
                 if self.is_cached(parent_id, include_placeholders=True):
                     parent_clause = self.entry_ll[parent_id]
                 else:
-                    parent_clause = [None, None, [ ], parent_id, False]
+                    parent_clause = [None, None, [], parent_id, False]
                     self.entry_ll[parent_id] = parent_clause
 
                 if parent_clause not in entry_parents:

          
@@ 321,32 341,36 @@ class PathRelations:
                 parent_children = parent_clause[CLAUSE_CHILDREN]
                 filename_base = title_fs
 
-                # Register among the children of this parent, but make sure we 
+                # Register among the children of this parent, but make sure we
                 # have a unique filename among siblings.
 
                 i = 0
                 current_variation = filename_base
                 elected_variation = None
                 while i <= 255:
-                    if not [ child_name_tuple 
-                             for child_name_tuple 
-                             in parent_children 
-                             if child_name_tuple[0] == current_variation ]:
+                    if not [
+                        child_name_tuple
+                        for child_name_tuple in parent_children
+                        if child_name_tuple[0] == current_variation
+                    ]:
                         elected_variation = current_variation
                         break
-                        
+
                     i += 1
-                    current_variation = filename_base + \
-                                        utility.translate_filename_charset(
-                                            ' (%d)' % (i))
+                    current_variation = (
+                        filename_base
+                        + utility.translate_filename_charset(" (%d)" % (i))
+                    )
 
                 if elected_variation == None:
-                    _logger.error("Could not register entry with ID [%s]. "
-                                  "There are too many duplicate names in "
-                                  "that directory." % (entry_id))
+                    _logger.error(
+                        "Could not register entry with ID [%s]. "
+                        "There are too many duplicate names in "
+                        "that directory." % (entry_id)
+                    )
                     return
 
-                # Register us in the list of children on this parents 
+                # Register us in the list of children on this parents
                 # child-tuple list.
                 parent_children.append((elected_variation, entry_clause))
 

          
@@ 361,7 385,7 @@ class PathRelations:
             # child_ids = [ ]
             if children:
                 for child in children:
-                        self.register_entry(child)
+                    self.register_entry(child)
 
                 parent_clause = self.__get_entry_clause_by_id(parent_id)
 

          
@@ 370,15 394,17 @@ class PathRelations:
         return children
 
     def get_children_from_entry_id(self, entry_id):
-        """Return the filenames contained in the folder with the given 
+        """Return the filenames contained in the folder with the given
         entry-ID.
         """
 
         with PathRelations.rlock:
             entry_clause = self.__get_entry_clause_by_id(entry_id)
             if not entry_clause:
-                message = ("Can not list the children for an unavailable "
-                           "entry with ID [%s]." % (entry_id))
+                message = (
+                    "Can not list the children for an unavailable "
+                    "entry with ID [%s]." % (entry_id)
+                )
 
                 _logger.error(message)
                 raise Exception(message)

          
@@ 387,14 413,16 @@ class PathRelations:
                 self.__load_all_children(entry_id)
 
             if not entry_clause[0].is_directory:
-                message = ("Could not get child filenames for non-directory with "
-                           "entry-ID [%s]." % (entry_id))
+                message = (
+                    "Could not get child filenames for non-directory with "
+                    "entry-ID [%s]." % (entry_id)
+                )
 
                 _logger.error(message)
                 raise Exception(message)
 
-#            self.__log.debug("(%d) children found.",
-#                             len(entry_clause[CLAUSE_CHILDREN]))
+            #            self.__log.debug("(%d) children found.",
+            #                             len(entry_clause[CLAUSE_CHILDREN]))
 
             return entry_clause[CLAUSE_CHILDREN]
 

          
@@ 402,15 430,16 @@ class PathRelations:
 
         children_tuples = self.get_children_from_entry_id(entry_id)
 
-        children_entries = [(child_tuple[0], child_tuple[1][CLAUSE_ENTRY]) 
-                                for child_tuple 
-                                in children_tuples]
+        children_entries = [
+            (child_tuple[0], child_tuple[1][CLAUSE_ENTRY])
+            for child_tuple in children_tuples
+        ]
 
         return children_entries
 
     def get_clause_from_path(self, filepath):
 
-#        self.__log.debug("Getting clause for path [%s].", filepath)
+        #        self.__log.debug("Getting clause for path [%s].", filepath)
 
         with PathRelations.rlock:
             path_results = self.find_path_components_goandget(filepath)

          
@@ 420,7 449,7 @@ class PathRelations:
                 return None
 
             entry_id = path_results[0][-1]
-#            self.__log.debug("Found entry with ID [%s].", entry_id)
+            #            self.__log.debug("Found entry with ID [%s].", entry_id)
 
             # Make sure the entry is more than a placeholder.
             self.__get_entry_clause_by_id(entry_id)

          
@@ 428,8 457,8 @@ class PathRelations:
             return self.entry_ll[entry_id]
 
     def find_path_components_goandget(self, path):
-        """Do the same thing that find_path_components() does, except that 
-        when we don't have record of a path-component, try to go and find it 
+        """Do the same thing that find_path_components() does, except that
+        when we don't have record of a path-component, try to go and find it
         among the children of the previous path component, and then try again.
         """
 

          
@@ 439,8 468,8 @@ class PathRelations:
             previous_results = []
             i = 0
             while 1:
-#                self.__log.debug("Attempting to find path-components (go and "
-#                                 "get) for path [%s].  CYCLE= (%d)", path, i)
+                #                self.__log.debug("Attempting to find path-components (go and "
+                #                                 "get) for path [%s].  CYCLE= (%d)", path, i)
 
                 # See how many components can be found in our current cache.
 

          
@@ 451,8 480,8 @@ class PathRelations:
                 if result[2] == True:
                     return result
 
-                # If we could not resolve the entire path, and we're no more 
-                # successful than a prior attempt, we'll just have to return a 
+                # If we could not resolve the entire path, and we're no more
+                # successful than a prior attempt, we'll just have to return a
                 # partial.
 
                 num_results = len(result[0])

          
@@ 461,91 490,95 @@ class PathRelations:
 
                 previous_results.append(num_results)
 
-                # Else, we've encountered a component/depth of the path that we 
+                # Else, we've encountered a component/depth of the path that we
                 # don't currently know about.
-# TODO: This is going to be the general area that we'd have to adjust to 
-#        support multiple, identical entries. This currently only considers the 
-#        first result. We should rewrite this to be recursive in order to make 
-#        it easier to keep track of a list of results.
+                # TODO: This is going to be the general area that we'd have to adjust to
+                #        support multiple, identical entries. This currently only considers the
+                #        first result. We should rewrite this to be recursive in order to make
+                #        it easier to keep track of a list of results.
                 # The parent is the last one found, or the root if none.
-                parent_id = result[0][num_results - 1] \
-                                if num_results \
-                                else AccountInfo.get_instance().root_id
+                parent_id = (
+                    result[0][num_results - 1]
+                    if num_results
+                    else AccountInfo.get_instance().root_id
+                )
 
                 # The child will be the first part that was not found.
                 child_name = result[1][num_results]
 
                 children = gd.list_files(
-                                parent_id=parent_id, 
-                                query_is_string=child_name)
-                
+                    parent_id=parent_id, query_is_string=child_name
+                )
+
                 for child in children:
                     self.register_entry(child)
 
                 # filenames_phrase = ', '.join([ candidate.id for candidate
                 #                                            in children ])
-#                self.__log.debug("(%d) candidate children were found: %s",
-#                                 len(children), filenames_phrase)
+                #                self.__log.debug("(%d) candidate children were found: %s",
+                #                                 len(children), filenames_phrase)
 
                 i += 1
 
     def __find_path_components(self, path):
-        """Given a path, return a list of all Google Drive entries that 
-        comprise each component, or as many as can be found. As we've ensured 
-        that all sibling filenames are unique, there can not be multiple 
+        """Given a path, return a list of all Google Drive entries that
+        comprise each component, or as many as can be found. As we've ensured
+        that all sibling filenames are unique, there can not be multiple
         matches.
         """
 
-        if path[0] == '/':
+        if path[0] == "/":
             path = path[1:]
 
-        if len(path) and path[-1] == '/':
+        if len(path) and path[-1] == "/":
             path = path[:-1]
 
         if path in self.path_cache:
             return self.path_cache[path]
 
         with PathRelations.rlock:
-#            self.__log.debug("Locating entry information for path [%s].", path)
+            #            self.__log.debug("Locating entry information for path [%s].", path)
 
             root_id = AccountInfo.get_instance().root_id
 
             # Ensure that the root node is loaded.
             self.__get_entry_clause_by_id(root_id)
 
-            path_parts = path.split('/')
+            path_parts = path.split("/")
 
             entry_ptr = root_id
             # parent_id = None
             i = 0
             num_parts = len(path_parts)
-            results = [ ]
+            results = []
             while i < num_parts:
-                child_filename_to_search_fs = utility. \
-                    translate_filename_charset(path_parts[i])
+                child_filename_to_search_fs = utility.translate_filename_charset(
+                    path_parts[i]
+                )
 
-#                self.__log.debug("Checking for part (%d) [%s] under parent "
-#                                 "with ID [%s].",
-#                                 i, child_filename_to_search_fs, entry_ptr)
+                #                self.__log.debug("Checking for part (%d) [%s] under parent "
+                #                                 "with ID [%s].",
+                #                                 i, child_filename_to_search_fs, entry_ptr)
 
                 current_clause = self.entry_ll[entry_ptr]
-            
-                # Search this entry's children for the next filename further down 
-                # in the path among this entry's children. Any duplicates should've 
-                # already beeen handled as entries were stored. We name the variable 
-                # just to emphasize that no ambiguity -as well as- no error will 
+
+                # Search this entry's children for the next filename further down
+                # in the path among this entry's children. Any duplicates should've
+                # already beeen handled as entries were stored. We name the variable
+                # just to emphasize that no ambiguity -as well as- no error will
                 # occur in the traversal process.
                 # first_matching_child_clause = None
                 children = current_clause[2]
-            
+
                 # If they just wanted the "" path (root), return the root-ID.
                 if path == "":
-                    found = [ root_id ]
+                    found = [root_id]
                 else:
-                    found = [ child_tuple[1][3] 
-                              for child_tuple 
-                              in children 
-                              if child_tuple[0] == child_filename_to_search_fs ]
+                    found = [
+                        child_tuple[1][3]
+                        for child_tuple in children
+                        if child_tuple[0] == child_filename_to_search_fs
+                    ]
 
                 if found:
                     results.append(found[0])

          
@@ 565,7 598,7 @@ class PathRelations:
                 i += 1
 
     def __get_entry_clause_by_id(self, entry_id):
-        """We may keep a linked-list of GD entries, but what we have may just 
+        """We may keep a linked-list of GD entries, but what we have may just
         be placeholders. This function will make sure the data is actually here.
         """
 

          
@@ 580,8 613,10 @@ class PathRelations:
 
     def is_cached(self, entry_id, include_placeholders=False):
 
-        return (entry_id in self.entry_ll and (include_placeholders or \
-                                               self.entry_ll[entry_id][0]))
+        return entry_id in self.entry_ll and (
+            include_placeholders or self.entry_ll[entry_id][0]
+        )
+
 
 class EntryCache(CacheClientBase):
     """Manages our knowledge of file entries."""

          
@@ 589,9 624,9 @@ class EntryCache(CacheClientBase):
     def __init__(self, *args, **kwargs):
         super(EntryCache, self).__init__(*args, **kwargs)
 
-# TODO(dustin): This isn't used, and we don't think that it necessarily needs 
-#               to be instantiated, now.
-#        about = AccountInfo.get_instance()
+        # TODO(dustin): This isn't used, and we don't think that it necessarily needs
+        #               to be instantiated, now.
+        #        about = AccountInfo.get_instance()
         self.__gd = get_gdrive()
 
     def __get_entries_to_update(self, requested_entry_id):

          
@@ 601,7 636,7 @@ class EntryCache(CacheClientBase):
 
         affected_entries = [requested_entry_id]
         considered_entries = {}
-        max_readahead_entries = Conf.get('max_readahead_entries')
+        max_readahead_entries = Conf.get("max_readahead_entries")
         for parent_id in parent_ids:
             child_ids = self.__gd.get_children_under_parent_id(parent_id)
 

          
@@ 639,9 674,9 @@ class EntryCache(CacheClientBase):
 
         # Read the entries, now.
 
-# TODO: We have to determine when this is called, and either remove it 
-# (if it's not), or find another way to not have to load them 
-# individually.
+        # TODO: We have to determine when this is called, and either remove it
+        # (if it's not), or find another way to not have to load them
+        # individually.
 
         retrieved = self.__gd.get_entries(affected_entries)
 

          
@@ 676,5 711,4 @@ class EntryCache(CacheClientBase):
             path_relations.remove_entry_recursive(entry_id)
 
     def get_max_cache_age_seconds(self):
-        return Conf.get('cache_entries_max_age')
-
+        return Conf.get("cache_entries_max_age")

          
M setup.py +28 -28
@@ 7,48 7,48 @@ import gdrivefs
 
 _APP_PATH = os.path.dirname(gdrivefs.__file__)
 
-with open(os.path.join(_APP_PATH, 'resources', 'README.rst')) as f:
-      long_description = f.read()
+with open(os.path.join(_APP_PATH, "resources", "README.rst")) as f:
+    long_description = f.read()
 
-with open(os.path.join(_APP_PATH, 'resources', 'requirements.txt')) as f:
-      install_requires = [s.strip() for s in f.readlines()]
+with open(os.path.join(_APP_PATH, "resources", "requirements.txt")) as f:
+    install_requires = [s.strip() for s in f.readlines()]
 
 setuptools.setup(
-    name='gdrivefs',
+    name="gdrivefs",
     version=gdrivefs.__version__,
     description="A complete FUSE adapter for Google Drive.",
     long_description=long_description,
     classifiers=[
-        'Topic :: System :: Filesystems',
-        'Development Status :: 4 - Beta',
-        'Environment :: Console',
-        'Intended Audience :: End Users/Desktop',
-        'Intended Audience :: System Administrators',
-        'License :: OSI Approved :: BSD License',
-        'Natural Language :: English',
-        'Operating System :: POSIX',
-        'Programming Language :: Python',
-        'Topic :: Internet',
-        'Topic :: Utilities'
+        "Topic :: System :: Filesystems",
+        "Development Status :: 4 - Beta",
+        "Environment :: Console",
+        "Intended Audience :: End Users/Desktop",
+        "Intended Audience :: System Administrators",
+        "License :: OSI Approved :: BSD License",
+        "Natural Language :: English",
+        "Operating System :: POSIX",
+        "Programming Language :: Python",
+        "Topic :: Internet",
+        "Topic :: Utilities",
     ],
-    keywords='google-drive google drive fuse filesystem',
-    author='Dustin Oprea',
-    author_email='myselfasunder@gmail.com',
-    url='https://github.com/dsoprea/GDriveFS',
-    license='GPL 2',
-    packages=setuptools.find_packages(exclude=['tests']),
+    keywords="google-drive google drive fuse filesystem",
+    author="Dustin Oprea",
+    author_email="myselfasunder@gmail.com",
+    url="https://github.com/dsoprea/GDriveFS",
+    license="GPL 2",
+    packages=setuptools.find_packages(exclude=["tests"]),
     include_package_data=True,
     package_data={
-        'gdrivefs': [
-            'resources/README.rst',
-            'resources/requirements.txt',
+        "gdrivefs": [
+            "resources/README.rst",
+            "resources/requirements.txt",
         ],
     },
     zip_safe=False,
     install_requires=install_requires,
     scripts=[
-        'gdrivefs/resources/scripts/gdfs',
-        'gdrivefs/resources/scripts/gdfstool',
-        'gdrivefs/resources/scripts/gdfsdumpentry',
+        "gdrivefs/resources/scripts/gdfs",
+        "gdrivefs/resources/scripts/gdfstool",
+        "gdrivefs/resources/scripts/gdfsdumpentry",
     ],
 )