black reformat target py39
M gdrivefs/__init__.py +1 -1
@@ 1,1 1,1 @@ 
-__version__ = '0.14.9'
+__version__ = "0.14.9"

          
M gdrivefs/account_info.py +7 -6
@@ 8,10 8,12 @@ import logging
 class AccountInfo(LiveReaderBase):
     """Encapsulates our account info."""
 
-    __map = {'root_id': 'rootFolderId',
-             'largest_change_id': ('largestChangeId', int),
-             'quota_bytes_total': ('quotaBytesTotal', int),
-             'quota_bytes_used': ('quotaBytesUsed', int)}
+    __map = {
+        "root_id": "rootFolderId",
+        "largest_change_id": ("largestChangeId", int),
+        "quota_bytes_total": ("quotaBytesTotal", int),
+        "quota_bytes_used": ("quotaBytesUsed", int),
+    }
 
     def get_data(self):
         gd = get_gdrive()

          
@@ 20,7 22,7 @@ class AccountInfo(LiveReaderBase):
     def __getattr__(self, key):
         target = AccountInfo.__map[key]
         _type = None
-        
+
         if target.__class__ == tuple:
             (target, _type) = target
 

          
@@ 33,4 35,3 @@ class AccountInfo(LiveReaderBase):
     @property
     def keys(self):
         return list(AccountInfo.__map.keys())
-

          
M gdrivefs/auto_auth.py +21 -24
@@ 48,13 48,11 @@ class _WebserverMonitor:
         self.__server_state_e.clear()
 
     def stop(self):
-        assert \
-            self.__server_state_e is not None, \
-            "Thread doesn't appear to have ever been started."
+        assert (
+            self.__server_state_e is not None
+        ), "Thread doesn't appear to have ever been started."
 
-        assert \
-            self.__t.is_alive() is True, \
-            "Thread doesn't appear to be running."
+        assert self.__t.is_alive() is True, "Thread doesn't appear to be running."
 
         self.__server_state_e.clear()
         self.__s.shutdown()

          
@@ 95,39 93,39 @@ class _WebserverMonitor:
 
                 # It's not an authorization response. Bail with the same error
                 # the library would normally send for unhandled requests.
-                if 'code' not in arguments:
+                if "code" not in arguments:
                     self.send_error(
-                        501,
-                        "Unsupported method ({}): {}".format(
-                        self.command, hr.path))
+                        501, "Unsupported method ({}): {}".format(self.command, hr.path)
+                    )
 
                     return
 
-                authcode = arguments['code'][0]
+                authcode = arguments["code"][0]
                 _LOGGER.debug("Received authcode [{}]".format(authcode))
 
                 monitor._authcode = authcode
 
                 monitor._request_state_e.set()
 
-                self.send_response(200, message='OK')
+                self.send_response(200, message="OK")
 
-                self.send_header("Content-type", 'text/html')
+                self.send_header("Content-type", "text/html")
                 self.end_headers()
 
-                self.wfile.write(b"""\
+                self.wfile.write(
+                    b"""\
 <html>
 <head></head>
 <body>
 GDFS authorization recorded.
 </body>
 </html>
-""")
+"""
+                )
 
             def log_message(self, format, *args):
                 pass
 
-
         class Server(socketserver.TCPServer):
             def server_activate(self, *args, **kwargs):
                 r = socketserver.TCPServer.server_activate(self, *args, **kwargs)

          
@@ 139,7 137,7 @@ GDFS authorization recorded.
 
         # Our little webserver. (0) for the port will automatically assign it
         # to some unused port.
-        binding = ('localhost', 0)
+        binding = ("localhost", 0)
         self.__s = Server(binding, Handler)
 
         _LOGGER.debug("Created server.")

          
@@ 157,9 155,9 @@ GDFS authorization recorded.
 
     @property
     def port(self):
-        assert \
-            self._port is not None, \
-            "Thread hasn't been started or a port hasn't been assigned."
+        assert (
+            self._port is not None
+        ), "Thread hasn't been started or a port hasn't been assigned."
 
         return self._port
 

          
@@ 181,7 179,7 @@ class AutoAuth:
     def get_and_write_creds(self):
         _LOGGER.info("Requesting authorization.")
 
-        creds_filepath = gdrivefs.conf.Conf.get('auth_cache_filepath')
+        creds_filepath = gdrivefs.conf.Conf.get("auth_cache_filepath")
         wm = _WebserverMonitor(creds_filepath)
 
         # Start the webserver.

          
@@ 189,9 187,8 @@ class AutoAuth:
 
         # Open a browser window to request authorization.
 
-        redirect_uri = 'http://localhost:{}'.format(wm.port)
-        oa = gdrivefs.oauth_authorize.OauthAuthorize(
-                redirect_uri=redirect_uri)
+        redirect_uri = "http://localhost:{}".format(wm.port)
+        oa = gdrivefs.oauth_authorize.OauthAuthorize(redirect_uri=redirect_uri)
 
         url = oa.step1_get_auth_url()
         _LOGGER.debug("Opening browser: [{}]".format(url))

          
M gdrivefs/buffer_segments.py +105 -75
@@ 6,7 6,7 @@ import pprint
 
 
 class BufferSegments:
-    """Describe a series of strings that, when concatenated, represent the 
+    """Describe a series of strings that, when concatenated, represent the
     whole file. This is used to try and contain the amount of the data that has
     the be copied as updates are applied to the file.
     """

          
@@ 14,17 14,18 @@ class BufferSegments:
     __locker = Lock()
 
     def __init__(self, data, block_size):
-        # An array of 2-tuples: (offset, string). We should allow data to be 
-        # empty. Thus, we should allow a segment to be empty (useful, in 
+        # An array of 2-tuples: (offset, string). We should allow data to be
+        # empty. Thus, we should allow a segment to be empty (useful, in
         # general).
         self.__segments = [(0, data)]
 
         self.__block_size = block_size
 
     def __repr__(self):
-        return ("<BSEGS  SEGS= (%(segs)d) BLKSIZE= (%(block_size)d)>" % 
-                { 'segs': len(self.__segments), 
-                  'block_size': self.__block_size })
+        return "<BSEGS  SEGS= (%(segs)d) BLKSIZE= (%(block_size)d)>" % {
+            "segs": len(self.__segments),
+            "block_size": self.__block_size,
+        }
 
     def dump(self):
         pprint(self.__segments)

          
@@ 36,9 37,9 @@ class BufferSegments:
         while seg_index < len(self.__segments):
             seg_offset = self.__segments[seg_index][0]
 
-            # If the current segment starts after the point of insertion.        
+            # If the current segment starts after the point of insertion.
             if seg_offset > offset:
-                return (seg_index - 1)
+                return seg_index - 1
 
             # If the insertion point is at the beginning of this segment.
             elif seg_offset == offset:

          
@@ 46,18 47,18 @@ class BufferSegments:
 
             seg_index += 1
 
-        # If we get here, we never ran into a segment with an offset greater 
+        # If we get here, we never ran into a segment with an offset greater
         # that the insertion offset.
-        return (seg_index - 1)
+        return seg_index - 1
 
     def __split(self, seg_index, offset):
-        """Split the given segment at the given offset. Offset is relative to 
-        the particular segment (an offset of '0' refers to the beginning of the 
-        segment). At finish, seg_index will represent the segment containing 
-        the first half of the original data (and segment with index 
+        """Split the given segment at the given offset. Offset is relative to
+        the particular segment (an offset of '0' refers to the beginning of the
+        segment). At finish, seg_index will represent the segment containing
+        the first half of the original data (and segment with index
         (seg_index + 1) will contain the second).
         """
-    
+
         (seg_offset, seg_data) = self.__segments[seg_index]
 
         first_half = seg_data[0:offset]

          
@@ 65,19 66,20 @@ class BufferSegments:
         self.__segments.insert(seg_index, firsthalf_segment)
 
         second_half = seg_data[offset:]
-        if second_half == '':
-            raise IndexError("Can not use offset (%d) to split segment (%d) "
-                             "of length (%d)." % 
-                             (offset, seg_index, len(seg_data)))
-        
+        if second_half == "":
+            raise IndexError(
+                "Can not use offset (%d) to split segment (%d) "
+                "of length (%d)." % (offset, seg_index, len(seg_data))
+            )
+
         secondhalf_segment = (seg_offset + offset, second_half)
         self.__segments[seg_index + 1] = secondhalf_segment
 
         return (firsthalf_segment, secondhalf_segment)
 
     def apply_update(self, offset, data):
-        """Find the correct place to insert the data, splitting existing data 
-        segments into managable fragments ("segments"), overwriting a number of 
+        """Find the correct place to insert the data, splitting existing data
+        segments into managable fragments ("segments"), overwriting a number of
         bytes equal in size to the incoming data. If the incoming data will
         overflow the end of the list, grow the list.
         """

          
@@ 85,99 87,119 @@ class BufferSegments:
         with self.__locker:
             data_len = len(data)
 
-            if len(self.__segments) == 1 and self.__segments[0][1] == '':
+            if len(self.__segments) == 1 and self.__segments[0][1] == "":
                 self.__segments = []
                 simple_append = True
             else:
-                simple_append = (offset >= self.length)
+                simple_append = offset >= self.length
 
-            _logger.debug("Applying update of (%d) bytes at offset (%d). "
-                          "Current segment count is (%d). Total length is "
-                          "(%d). APPEND= [%s]",
-                          data_len, offset, len(self.__segments), self.length, 
-                          simple_append)
+            _logger.debug(
+                "Applying update of (%d) bytes at offset (%d). "
+                "Current segment count is (%d). Total length is "
+                "(%d). APPEND= [%s]",
+                data_len,
+                offset,
+                len(self.__segments),
+                self.length,
+                simple_append,
+            )
 
             if not simple_append:
                 seg_index = self.__find_segment(offset)
 
-                # Split the existing segment(s) rather than doing any 
-                # concatenation. Theoretically, the effort of writing to an 
+                # Split the existing segment(s) rather than doing any
+                # concatenation. Theoretically, the effort of writing to an
                 # existing file should shrink over time.
 
                 (seg_offset, seg_data) = self.__segments[seg_index]
                 # seg_len = len(seg_data)
-                
-                # If our data is to be written into the middle of the segment, 
-                # split the segment such that the unnecessary prefixing bytes are 
+
+                # If our data is to be written into the middle of the segment,
+                # split the segment such that the unnecessary prefixing bytes are
                 # moved to a new segment preceding the current.
                 if seg_offset < offset:
                     prefix_len = offset - seg_offset
-                    _logger.debug("Splitting-of PREFIX of segment (%d). Prefix "
-                                  "length is (%d). Segment offset is (%d) and "
-                                  "length is (%d).",
-                                  seg_index, prefix_len, seg_offset, 
-                                  len(seg_data))
+                    _logger.debug(
+                        "Splitting-of PREFIX of segment (%d). Prefix "
+                        "length is (%d). Segment offset is (%d) and "
+                        "length is (%d).",
+                        seg_index,
+                        prefix_len,
+                        seg_offset,
+                        len(seg_data),
+                    )
 
-                    (_, (seg_offset, seg_data)) = self.__split(seg_index, 
-                                                               prefix_len)
+                    (_, (seg_offset, seg_data)) = self.__split(seg_index, prefix_len)
 
                     # seg_len = prefix_len
                     seg_index += 1
 
-                # Now, apply the update. Collect the number of segments that will 
-                # be affected, and reduce to two (at most): the data that we're 
-                # applying, and the second part of the last affected one (if 
-                # applicable). If the incoming data exceeds the length of the 
+                # Now, apply the update. Collect the number of segments that will
+                # be affected, and reduce to two (at most): the data that we're
+                # applying, and the second part of the last affected one (if
+                # applicable). If the incoming data exceeds the length of the
                 # existing data, it is a trivial consideration.
 
                 stop_offset = offset + data_len
                 seg_stop = seg_index
                 while 1:
-                    # Since the insertion offset must be within the given data 
-                    # (otherwise it'd be an append, above), it looks like we're 
+                    # Since the insertion offset must be within the given data
+                    # (otherwise it'd be an append, above), it looks like we're
                     # inserting into the last segment.
                     if seg_stop >= len(self.__segments):
                         break
-                
+
                     # If our offset is within the current set of data, this is not
                     # going to be an append operation.
                     if self.__segments[seg_stop][0] >= stop_offset:
                         break
-                    
+
                     seg_stop += 1
 
                 seg_stop -= 1
 
-# TODO: Make sure that updates applied at the front of a segment are correct.
+                # TODO: Make sure that updates applied at the front of a segment are correct.
 
-                _logger.debug("Replacement interval is [%d, %d]. Current "
-                              "segments= (%d)",
-                              seg_index, seg_stop, len(self.__segments))
+                _logger.debug(
+                    "Replacement interval is [%d, %d]. Current " "segments= (%d)",
+                    seg_index,
+                    seg_stop,
+                    len(self.__segments),
+                )
 
                 # How much of the last segment that we touch will be affected?
-                (lastseg_offset, lastseg_data) = self.__segments[seg_stop] 
+                (lastseg_offset, lastseg_data) = self.__segments[seg_stop]
 
                 lastseg_len = len(lastseg_data)
                 affected_len = (offset + data_len) - lastseg_offset
                 if affected_len > 0 and affected_len < lastseg_len:
-                    _logger.debug("Splitting-of suffix of segment (%d). "
-                                  "Suffix length is (%d). Segment offset "
-                                  "is (%d) and length is (%d).",
-                                  seg_stop, lastseg_len - affected_len, 
-                                  lastseg_offset, lastseg_len)
+                    _logger.debug(
+                        "Splitting-of suffix of segment (%d). "
+                        "Suffix length is (%d). Segment offset "
+                        "is (%d) and length is (%d).",
+                        seg_stop,
+                        lastseg_len - affected_len,
+                        lastseg_offset,
+                        lastseg_len,
+                    )
 
                     self.__split(seg_stop, affected_len)
 
-                # We now have a distinct range of segments to replace with the new 
+                # We now have a distinct range of segments to replace with the new
                 # data. We are implicitly accounting for the situation in which our
                 # data is longer than the remaining number of bytes in the file.
 
-                _logger.debug("Replacing segment(s) (%d)->(%d) with new "
-                              "segment having offset (%d) and length "
-                              "(%d).", 
-                              seg_index, seg_stop + 1, seg_offset, len(data))
+                _logger.debug(
+                    "Replacing segment(s) (%d)->(%d) with new "
+                    "segment having offset (%d) and length "
+                    "(%d).",
+                    seg_index,
+                    seg_stop + 1,
+                    seg_offset,
+                    len(data),
+                )
 
-                self.__segments[seg_index:seg_stop + 1] = [(seg_offset, data)]
+                self.__segments[seg_index : seg_stop + 1] = [(seg_offset, data)]
             else:
                 self.__segments.append((offset, data))
 

          
@@ 187,8 209,12 @@ class BufferSegments:
         """
 
         with self.__locker:
-            _logger.debug("Reading at offset (%d) for length [%s]. Total "
-                          "length is [%s].", offset, length, self.length)
+            _logger.debug(
+                "Reading at offset (%d) for length [%s]. Total " "length is [%s].",
+                offset,
+                length,
+                self.length,
+            )
 
             if length is None:
                 length = self.length

          
@@ 198,7 224,7 @@ class BufferSegments:
 
             boundary_offset = offset + length
 
-            # The WHILE condition should only catch if the given length exceeds 
+            # The WHILE condition should only catch if the given length exceeds
             # the actual length. Else, the BREAK should always be sufficient.
             last_segindex = None
             (seg_offset, seg_data, seg_len) = (None, None, None)

          
@@ 211,14 237,19 @@ class BufferSegments:
                 grab_at = current_offset - seg_offset
                 remaining_bytes = boundary_offset - current_offset
 
-                # Determine how many bytes we're looking for, and how many we 
+                # Determine how many bytes we're looking for, and how many we
                 # can get from this segment.
 
-                grab_len = min(remaining_bytes,                         # Number of remaining, requested bytes.
-                               seg_len - (current_offset - seg_offset), # Number of available bytes in segment.
-                               self.__block_size)                       # Maximum block size.
+                grab_len = min(
+                    remaining_bytes,  # Number of remaining, requested bytes.
+                    seg_len
+                    - (
+                        current_offset - seg_offset
+                    ),  # Number of available bytes in segment.
+                    self.__block_size,
+                )  # Maximum block size.
 
-                grabbed = seg_data[grab_at:grab_at + grab_len]
+                grabbed = seg_data[grab_at : grab_at + grab_len]
                 current_offset += grab_len
                 yield grabbed
 

          
@@ 226,7 257,7 @@ class BufferSegments:
                 if current_offset >= boundary_offset:
                     break
 
-                # Are we going to have to read from the next segment, next 
+                # Are we going to have to read from the next segment, next
                 # time?
                 if current_offset >= (seg_offset + seg_len):
                     current_segindex += 1

          
@@ 238,4 269,3 @@ class BufferSegments:
 
         last_segment = self.__segments[-1]
         return last_segment[0] + len(last_segment[1])
-

          
M gdrivefs/cache_agent.py +86 -70
@@ 13,21 13,23 @@ import time
 class CacheAgent:
     """A particular namespace within the cache."""
 
-    registry        = None
-    resource_name   = None
-    max_age         = None
+    registry = None
+    resource_name = None
+    max_age = None
 
-    fault_handler       = None
-    cleanup_pretrigger  = None
+    fault_handler = None
+    cleanup_pretrigger = None
 
-    report              = None
-    report_source_name  = None
+    report = None
+    report_source_name = None
 
-    def __init__(self, resource_name, max_age, fault_handler=None, 
-                 cleanup_pretrigger=None):
-        _logger.debug("CacheAgent(%s,%s,%s,%s)" % (resource_name, max_age, 
-                                                   type(fault_handler), 
-                                                   cleanup_pretrigger))
+    def __init__(
+        self, resource_name, max_age, fault_handler=None, cleanup_pretrigger=None
+    ):
+        _logger.debug(
+            "CacheAgent(%s,%s,%s,%s)"
+            % (resource_name, max_age, type(fault_handler), cleanup_pretrigger)
+        )
 
         self.registry = CacheRegistry.get_instance(resource_name)
         self.resource_name = resource_name

          
@@ 36,8 38,8 @@ class CacheAgent:
         self.fault_handler = fault_handler
         self.cleanup_pretrigger = cleanup_pretrigger
 
-#        self.report = Report.get_instance()
-#        self.report_source_name = ("cache-%s" % (self.resource_name))
+        #        self.report = Report.get_instance()
+        #        self.report_source_name = ("cache-%s" % (self.resource_name))
 
         self.__t = None
         self.__t_quit_ev = threading.Event()

          
@@ 47,70 49,78 @@ class CacheAgent:
     def __del__(self):
         self.__stop_cleanup()
 
-# TODO(dustin): Currently disabled. The system doesn't rely on it, and it's 
-#               just another thread that unnecessarily runs, and trips up our 
-#               ability to test individual components in simple isolation. It
-#               needs to be refactored.
-#
-#               We'd like to either refactor into a multiprocessing worker, or
-#               just send to statsd (which would be kindof cool).
-#        self.__post_status()
+    # TODO(dustin): Currently disabled. The system doesn't rely on it, and it's
+    #               just another thread that unnecessarily runs, and trips up our
+    #               ability to test individual components in simple isolation. It
+    #               needs to be refactored.
+    #
+    #               We'd like to either refactor into a multiprocessing worker, or
+    #               just send to statsd (which would be kindof cool).
+    #        self.__post_status()
 
-#    def __del__(self):
-#
-#        if self.report.is_source(self.report_source_name):
-#            self.report.remove_all_values(self.report_source_name)
-#        pass
+    #    def __del__(self):
+    #
+    #        if self.report.is_source(self.report_source_name):
+    #            self.report.remove_all_values(self.report_source_name)
+    #        pass
 
-#    def __post_status(self):
-#        """Send the current status to our reporting tool."""
-#
-#        num_values = self.registry.count(self.resource_name)
-#
-#        self.report.set_values(self.report_source_name, 'count', 
-#                               num_values)
-#
-#        status_post_interval_s = Conf.get('cache_status_post_frequency_s')
-#        status_timer = Timer(status_post_interval_s, self.__post_status)
-#
-#        Timers.get_instance().register_timer('status', status_timer)
+    #    def __post_status(self):
+    #        """Send the current status to our reporting tool."""
+    #
+    #        num_values = self.registry.count(self.resource_name)
+    #
+    #        self.report.set_values(self.report_source_name, 'count',
+    #                               num_values)
+    #
+    #        status_post_interval_s = Conf.get('cache_status_post_frequency_s')
+    #        status_timer = Timer(status_post_interval_s, self.__post_status)
+    #
+    #        Timers.get_instance().register_timer('status', status_timer)
 
     def __cleanup(self):
-        """Scan the current cache and determine items old-enough to be 
+        """Scan the current cache and determine items old-enough to be
         removed.
         """
 
-        cleanup_interval_s = Conf.get('cache_cleanup_check_frequency_s')
+        cleanup_interval_s = Conf.get("cache_cleanup_check_frequency_s")
 
         _logger.info("Cache-cleanup thread running: %s", self)
 
-        while self.__t_quit_ev.is_set() is False and \
-                  gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False:
-            _logger.debug("Doing clean-up for cache resource with name [%s]." % 
-                          (self.resource_name))
+        while (
+            self.__t_quit_ev.is_set() is False
+            and gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False
+        ):
+            _logger.debug(
+                "Doing clean-up for cache resource with name [%s]."
+                % (self.resource_name)
+            )
 
             cache_dict = self.registry.list_raw(self.resource_name)
 
             # total_keys = [ (key, value_tuple[1]) for key, value_tuple \
             #                    in cache_dict.iteritems() ]
 
-            cleanup_keys = [ key for key, value_tuple \
-                                in list(cache_dict.items()) \
-                                if (datetime.datetime.now() - value_tuple[1]).seconds > \
-                                        self.max_age ]
+            cleanup_keys = [
+                key
+                for key, value_tuple in list(cache_dict.items())
+                if (datetime.datetime.now() - value_tuple[1]).seconds > self.max_age
+            ]
 
-            _logger.debug("Found (%d) entries to clean-up from entry-cache." % 
-                          (len(cleanup_keys)))
+            _logger.debug(
+                "Found (%d) entries to clean-up from entry-cache." % (len(cleanup_keys))
+            )
 
             if cleanup_keys:
                 for key in cleanup_keys:
-                    _logger.debug("Cache entry [%s] under resource-name [%s] "
-                                  "will be cleaned-up." % 
-                                  (key, self.resource_name))
+                    _logger.debug(
+                        "Cache entry [%s] under resource-name [%s] "
+                        "will be cleaned-up." % (key, self.resource_name)
+                    )
 
                     if self.exists(key, no_fault_check=True) == False:
-                        _logger.debug("Entry with ID [%s] has already been "
-                                      "cleaned-up." % (key))
+                        _logger.debug(
+                            "Entry with ID [%s] has already been " "cleaned-up." % (key)
+                        )
                     else:
                         self.remove(key)
             else:

          
@@ 139,11 149,11 @@ class CacheAgent:
     def remove(self, key):
         _logger.debug("CacheAgent.remove(%s)" % (key))
 
-        return self.registry.remove(self.resource_name, 
-                                    key, 
-                                    cleanup_pretrigger=self.cleanup_pretrigger)
+        return self.registry.remove(
+            self.resource_name, key, cleanup_pretrigger=self.cleanup_pretrigger
+        )
 
-    def get(self, key, handle_fault = None):
+    def get(self, key, handle_fault=None):
 
         if handle_fault == None:
             handle_fault = True

          
@@ 151,13 161,16 @@ class CacheAgent:
         _logger.debug("CacheAgent.get(%s)" % (key))
 
         try:
-            result = self.registry.get(self.resource_name, 
-                                       key, 
-                                       max_age=self.max_age, 
-                                       cleanup_pretrigger=self.cleanup_pretrigger)
+            result = self.registry.get(
+                self.resource_name,
+                key,
+                max_age=self.max_age,
+                cleanup_pretrigger=self.cleanup_pretrigger,
+            )
         except CacheFault:
-            _logger.debug("There was a cache-miss while requesting item with "
-                          "ID (key).")
+            _logger.debug(
+                "There was a cache-miss while requesting item with " "ID (key)."
+            )
 
             if self.fault_handler == None or not handle_fault:
                 raise

          
@@ 171,10 184,13 @@ class CacheAgent:
     def exists(self, key, no_fault_check=False):
         _logger.debug("CacheAgent.exists(%s)" % (key))
 
-        return self.registry.exists(self.resource_name, key, 
-                                    max_age=self.max_age,
-                                    cleanup_pretrigger=self.cleanup_pretrigger,
-                                    no_fault_check=no_fault_check)
+        return self.registry.exists(
+            self.resource_name,
+            key,
+            max_age=self.max_age,
+            cleanup_pretrigger=self.cleanup_pretrigger,
+            no_fault_check=no_fault_check,
+        )
 
     def __getitem__(self, key):
         return self.get(key)

          
M gdrivefs/cache_registry.py +47 -38
@@ 16,26 16,25 @@ class CacheRegistry:
     __rlock = RLock()
 
     def __init__(self):
-        self.__cache = { }
+        self.__cache = {}
 
     @staticmethod
     def get_instance(resource_name):
-    
+
         with CacheRegistry.__rlock:
             try:
-                CacheRegistry.__instance;
+                CacheRegistry.__instance
             except:
                 CacheRegistry.__instance = CacheRegistry()
 
             if resource_name not in CacheRegistry.__instance.__cache:
-                CacheRegistry.__instance.__cache[resource_name] = { }
+                CacheRegistry.__instance.__cache[resource_name] = {}
 
         return CacheRegistry.__instance
 
     def set(self, resource_name, key, value):
 
-        _logger.debug("CacheRegistry.set(%s,%s,%s)" % 
-                      (resource_name, key, value))
+        _logger.debug("CacheRegistry.set(%s,%s,%s)" % (resource_name, key, value))
 
         with CacheRegistry.__rlock:
             try:

          
@@ 49,28 48,28 @@ class CacheRegistry:
 
     def remove(self, resource_name, key, cleanup_pretrigger=None):
 
-        _logger.debug("CacheRegistry.remove(%s,%s,%s)" % 
-                      (resource_name, key, type(cleanup_pretrigger)))
+        _logger.debug(
+            "CacheRegistry.remove(%s,%s,%s)"
+            % (resource_name, key, type(cleanup_pretrigger))
+        )
 
         with CacheRegistry.__rlock:
             old_tuple = self.__cache[resource_name][key]
 
             self.__cleanup_entry(
-                resource_name, 
-                key, 
-                True, 
-                cleanup_pretrigger=cleanup_pretrigger)
+                resource_name, key, True, cleanup_pretrigger=cleanup_pretrigger
+            )
 
         return old_tuple[0]
 
     def get(self, resource_name, key, max_age, cleanup_pretrigger=None):
-        
-        trigger_given_phrase = ('None' 
-                                if cleanup_pretrigger == None 
-                                else '<given>')
+
+        trigger_given_phrase = "None" if cleanup_pretrigger == None else "<given>"
 
-        _logger.debug("CacheRegistry.get(%s,%s,%s,%s)" % 
-                      (resource_name, key, max_age, trigger_given_phrase))
+        _logger.debug(
+            "CacheRegistry.get(%s,%s,%s,%s)"
+            % (resource_name, key, max_age, trigger_given_phrase)
+        )
 
         with CacheRegistry.__rlock:
             try:

          
@@ 78,37 77,44 @@ class CacheRegistry:
             except:
                 raise CacheFault("NonExist")
 
-            if max_age != None and \
-               (datetime.now() - timestamp).seconds > max_age:
-                self.__cleanup_entry(resource_name, key, False, 
-                                     cleanup_pretrigger=cleanup_pretrigger)
+            if max_age != None and (datetime.now() - timestamp).seconds > max_age:
+                self.__cleanup_entry(
+                    resource_name, key, False, cleanup_pretrigger=cleanup_pretrigger
+                )
                 raise CacheFault("Stale")
 
         return value
 
     def list_raw(self, resource_name):
-        
+
         _logger.debug("CacheRegistry.list(%s)" % (resource_name))
 
         with CacheRegistry.__rlock:
             return self.__cache[resource_name]
 
-    def exists(self, resource_name, key, max_age, cleanup_pretrigger=None, 
-               no_fault_check=False):
+    def exists(
+        self, resource_name, key, max_age, cleanup_pretrigger=None, no_fault_check=False
+    ):
 
-        _logger.debug("CacheRegistry.exists(%s,%s,%s,%s)" % 
-                      (resource_name, key, max_age, cleanup_pretrigger))
-        
+        _logger.debug(
+            "CacheRegistry.exists(%s,%s,%s,%s)"
+            % (resource_name, key, max_age, cleanup_pretrigger)
+        )
+
         with CacheRegistry.__rlock:
             try:
                 (value, timestamp) = self.__cache[resource_name][key]
             except:
                 return False
 
-            if max_age is not None and not no_fault_check and \
-                    (datetime.now() - timestamp).seconds > max_age:
-                self.__cleanup_entry(resource_name, key, False, 
-                                     cleanup_pretrigger=cleanup_pretrigger)
+            if (
+                max_age is not None
+                and not no_fault_check
+                and (datetime.now() - timestamp).seconds > max_age
+            ):
+                self.__cleanup_entry(
+                    resource_name, key, False, cleanup_pretrigger=cleanup_pretrigger
+                )
                 return False
 
         return True

          
@@ 117,15 123,18 @@ class CacheRegistry:
 
         return len(self.__cache[resource_name])
 
-    def __cleanup_entry(self, resource_name, key, force, 
-                        cleanup_pretrigger=None):
+    def __cleanup_entry(self, resource_name, key, force, cleanup_pretrigger=None):
 
-        _logger.debug("Doing clean-up for resource_name [%s] and key "
-                      "[%s]." % (resource_name, key))
+        _logger.debug(
+            "Doing clean-up for resource_name [%s] and key "
+            "[%s]." % (resource_name, key)
+        )
 
         if cleanup_pretrigger is not None:
-            _logger.debug("Running pre-cleanup trigger for resource_name "
-                          "[%s] and key [%s]." % (resource_name, key))
+            _logger.debug(
+                "Running pre-cleanup trigger for resource_name "
+                "[%s] and key [%s]." % (resource_name, key)
+            )
 
             cleanup_pretrigger(resource_name, key, force)
 

          
M gdrivefs/cacheclient_base.py +17 -19
@@ 5,19 5,15 @@ import logging
 
 
 class CacheClientBase:
-    """Meant to be inherited by a class. Is used to configure a particular 
+    """Meant to be inherited by a class. Is used to configure a particular
     namespace within the cache.
     """
 
-
-
-# TODO(dustin): This is a terrible object, and needs to be refactored. It 
-#               doesn't provide any way to cleanup itself or CacheAgent, or any 
-#               way to invoke a singleton of CacheAgent whose thread we can 
-#               easier start or stop. Since this larger *wraps* CacheAgent, we 
-#               might just dispose of it.
-
-
+    # TODO(dustin): This is a terrible object, and needs to be refactored. It
+    #               doesn't provide any way to cleanup itself or CacheAgent, or any
+    #               way to invoke a singleton of CacheAgent whose thread we can
+    #               easier start or stop. Since this larger *wraps* CacheAgent, we
+    #               might just dispose of it.
 
     @property
     def cache(self):

          
@@ 26,16 22,19 @@ class CacheClientBase:
         except:
             pass
 
-        self.__cache = CacheAgent(self.child_type, self.max_age, 
-                                 fault_handler=self.fault_handler, 
-                                 cleanup_pretrigger=self.cleanup_pretrigger)
+        self.__cache = CacheAgent(
+            self.child_type,
+            self.max_age,
+            fault_handler=self.fault_handler,
+            cleanup_pretrigger=self.cleanup_pretrigger,
+        )
 
         return self.__cache
 
     def __init__(self):
         child_type = self.__class__.__bases__[0].__name__
         max_age = self.get_max_cache_age_seconds()
-        
+
         _logger.debug("CacheClientBase(%s,%s)" % (child_type, max_age))
 
         self.child_type = child_type

          
@@ 56,8 55,9 @@ class CacheClientBase:
         pass
 
     def get_max_cache_age_seconds(self):
-        raise NotImplementedError("get_max_cache_age() must be implemented in "
-                                  "the CacheClientBase child.")
+        raise NotImplementedError(
+            "get_max_cache_age() must be implemented in " "the CacheClientBase child."
+        )
 
     @classmethod
     def get_instance(cls):

          
@@ 70,12 70,10 @@ class CacheClientBase:
         try:
             CacheClientBase.__instances
         except:
-            CacheClientBase.__instances = { }
+            CacheClientBase.__instances = {}
 
         try:
             return CacheClientBase.__instances[class_name]
         except:
             CacheClientBase.__instances[class_name] = cls()
             return CacheClientBase.__instances[class_name]
-
-

          
M gdrivefs/change.py +54 -35
@@ 14,8 14,7 @@ import time
 class _ChangeManager:
     def __init__(self):
         self.at_change_id = AccountInfo.get_instance().largest_change_id
-        _logger.debug("Latest change-ID at startup is (%d)." % 
-                      (self.at_change_id))
+        _logger.debug("Latest change-ID at startup is (%d)." % (self.at_change_id))
 
         self.__t = None
         self.__t_quit_ev = threading.Event()

          
@@ 33,30 32,35 @@ class _ChangeManager:
     def __check_changes(self):
         _logger.info("Change-processing thread running.")
 
-        interval_s = Conf.get('change_check_frequency_s')
+        interval_s = Conf.get("change_check_frequency_s")
         cm = get_change_manager()
 
-        while self.__t_quit_ev.is_set() is False and \
-                gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False:
+        while (
+            self.__t_quit_ev.is_set() is False
+            and gdrivefs.state.GLOBAL_EXIT_EVENT.is_set() is False
+        ):
             _logger.debug("Checking for changes.")
 
             try:
                 is_done = cm.process_updates()
             except:
-                _logger.exception("Squelching an exception that occurred "
-                                  "while reading/processing changes.")
+                _logger.exception(
+                    "Squelching an exception that occurred "
+                    "while reading/processing changes."
+                )
 
                 # Force another check, soon.
                 is_done = False
 
-            # If there are still more changes, take them as quickly as 
+            # If there are still more changes, take them as quickly as
             # possible.
             if is_done is True:
                 _logger.debug("No more changes. Waiting.")
                 time.sleep(interval_s)
             else:
-                _logger.debug("There are more changes to be applied. Cycling "
-                              "immediately.")
+                _logger.debug(
+                    "There are more changes to be applied. Cycling " "immediately."
+                )
 
         _logger.info("Change-processing thread terminating.")
 

          
@@ 76,64 80,76 @@ class _ChangeManager:
         """Process any changes to our files. Return True if everything is up to
         date or False if we need to be run again.
         """
-# TODO(dustin): Reimplement using the "watch" interface. We'll have to find 
-#               more documentation:
-#
-#               https://developers.google.com/drive/v2/reference/changes/watch
-#
-        start_at_id = (self.at_change_id + 1)
+        # TODO(dustin): Reimplement using the "watch" interface. We'll have to find
+        #               more documentation:
+        #
+        #               https://developers.google.com/drive/v2/reference/changes/watch
+        #
+        start_at_id = self.at_change_id + 1
 
         gd = get_gdrive()
         result = gd.list_changes(start_change_id=start_at_id)
 
         (largest_change_id, next_page_token, changes) = result
 
-        _logger.debug("The latest reported change-ID is (%d) and we're "
-                      "currently at change-ID (%d).",
-                      largest_change_id, self.at_change_id)
+        _logger.debug(
+            "The latest reported change-ID is (%d) and we're "
+            "currently at change-ID (%d).",
+            largest_change_id,
+            self.at_change_id,
+        )
 
         _logger.info("(%d) changes will now be applied." % (len(changes)))
 
         for change_id, change_tuple in changes:
-            # Apply the changes. We expect to be running them from oldest to 
+            # Apply the changes. We expect to be running them from oldest to
             # newest.
 
-            _logger.info("========== Change with ID (%d) will now be applied. ==========" %
-                            (change_id))
+            _logger.info(
+                "========== Change with ID (%d) will now be applied. =========="
+                % (change_id)
+            )
 
             try:
                 self.__apply_change(change_id, change_tuple)
             except:
-                _logger.exception("There was a problem while processing change"
-                                  " with ID (%d). No more changes will be "
-                                  "applied." % (change_id))
+                _logger.exception(
+                    "There was a problem while processing change"
+                    " with ID (%d). No more changes will be "
+                    "applied." % (change_id)
+                )
                 return False
 
             self.at_change_id = change_id
 
-        return (next_page_token is None)
+        return next_page_token is None
 
     def __apply_change(self, change_id, change_tuple):
-        """Apply changes to our filesystem reported by GD. All we do is remove 
-        the current record components, if it's valid, and then reload it with 
+        """Apply changes to our filesystem reported by GD. All we do is remove
+        the current record components, if it's valid, and then reload it with
         what we were given. Note that since we don't necessarily know
         about the entries that have been changed, this also allows us to slowly
-        increase our knowledge of the filesystem (of, obviously, only those 
+        increase our knowledge of the filesystem (of, obviously, only those
         things that change).
         """
 
         (entry_id, was_deleted, entry) = change_tuple
-        
+
         is_visible = entry.is_visible if entry else None
 
-        _logger.info("Applying change with change-ID (%d), entry-ID [%s], "
-                     "and is-visible of [%s]",
-                     change_id, entry_id, is_visible)
+        _logger.info(
+            "Applying change with change-ID (%d), entry-ID [%s], "
+            "and is-visible of [%s]",
+            change_id,
+            entry_id,
+            is_visible,
+        )
 
         # First, remove any current knowledge from the system.
 
-        _logger.debug("Removing all trace of entry with ID [%s] "
-                      "(apply_change).", entry_id)
+        _logger.debug(
+            "Removing all trace of entry with ID [%s] " "(apply_change).", entry_id
+        )
 
         PathRelations.get_instance().remove_entry_all(entry_id)
 

          
@@ 145,7 161,10 @@ class _ChangeManager:
             path_relations = PathRelations.get_instance()
             path_relations.register_entry(entry)
 
+
 _instance = None
+
+
 def get_change_manager():
     global _instance
 

          
M gdrivefs/chunked_download.py +53 -42
@@ 4,6 4,7 @@ import gdrivefs.config
 import logging
 import random
 import time
+
 try:
     from oauth2client import util
 except ImportError:

          
@@ 17,7 18,7 @@ DEFAULT_CHUNK_SIZE = 1024 * 512
 
 
 class ChunkedDownload:
-    """"Download an entry, chunk by chunk. This code is mostly identical to
+    """ "Download an entry, chunk by chunk. This code is mostly identical to
     MediaIoBaseDownload, which couldn't be used because we have a specific URL
     that needs to be downloaded (not a request object, which doesn't apply here).
     """

          
@@ 67,20 68,25 @@ class ChunkedDownload:
         """
 
         headers = {
-            'range': 'bytes=%d-%d' % (
-                self._progress, self._progress + self._chunksize)
-            }
+            "range": "bytes=%d-%d" % (self._progress, self._progress + self._chunksize)
+        }
 
         for retry_num in range(num_retries + 1):
-            _logger.debug("Attempting to read chunk. ATTEMPT=(%d)/(%d)", 
-                          retry_num + 1, num_retries + 1)
+            _logger.debug(
+                "Attempting to read chunk. ATTEMPT=(%d)/(%d)",
+                retry_num + 1,
+                num_retries + 1,
+            )
 
             resp, content = self._http.request(self._uri, headers=headers)
             if retry_num > 0:
-                self._sleep(self._rand() * 2**retry_num)
-                _logger.warning("Retry #%d for media download: GET %s, "
-                                "following status: %d", 
-                                retry_num, self._uri, resp.status)
+                self._sleep(self._rand() * 2 ** retry_num)
+                _logger.warning(
+                    "Retry #%d for media download: GET %s, " "following status: %d",
+                    retry_num,
+                    self._uri,
+                    resp.status,
+                )
 
             if resp.status < 500:
                 break

          
@@ 89,8 95,8 @@ class ChunkedDownload:
 
         if resp.status in [200, 206]:
             try:
-                if resp['content-location'] != self._uri:
-                    self._uri = resp['content-location']
+                if resp["content-location"] != self._uri:
+                    self._uri = resp["content-location"]
             except KeyError:
                 pass
 

          
@@ 98,55 104,60 @@ class ChunkedDownload:
             self._progress += received_size_b
             self._fd.write(content)
 
-            # This seems to be the most correct method to get the filesize, but 
+            # This seems to be the most correct method to get the filesize, but
             # we've seen it not exist.
-            if 'content-range' in resp:
+            if "content-range" in resp:
                 if self._total_size is None:
-                    content_range = resp['content-range']
-                    length = content_range.rsplit('/', 1)[1]
+                    content_range = resp["content-range"]
+                    length = content_range.rsplit("/", 1)[1]
                     length = int(length)
 
                     self._total_size = length
 
-                    _logger.debug("Received download size (content-range): "
-                                  "(%d)", self._total_size)
+                    _logger.debug(
+                        "Received download size (content-range): " "(%d)",
+                        self._total_size,
+                    )
 
             # There's a chance that "content-range" will be omitted for zero-
-            # length files (or maybe files that are complete within the first 
+            # length files (or maybe files that are complete within the first
             # chunk).
 
             else:
-# TODO(dustin): Is this a valid assumption, or should it be an error?
-                _logger.warning("No 'content-range' found in response. "
-                                "Assuming that we've received all data.")
+                # TODO(dustin): Is this a valid assumption, or should it be an error?
+                _logger.warning(
+                    "No 'content-range' found in response. "
+                    "Assuming that we've received all data."
+                )
 
                 self._total_size = received_size_b
 
-# TODO(dustin): We were using this for a while, but it appears to be no larger 
-#               then a single chunk.
-#
-#            # This method doesn't seem documented, but we've seen cases where 
-#            # this is available, but "content-range" isn't.
-#            if 'content-length' in resp:
-#                self._total_size = int(resp['content-length'])
-#
-#                _logger.debug("Received download size (content-length): "
-#                              "(%d)", self._total_size)
+            # TODO(dustin): We were using this for a while, but it appears to be no larger
+            #               then a single chunk.
+            #
+            #            # This method doesn't seem documented, but we've seen cases where
+            #            # this is available, but "content-range" isn't.
+            #            if 'content-length' in resp:
+            #                self._total_size = int(resp['content-length'])
+            #
+            #                _logger.debug("Received download size (content-length): "
+            #                              "(%d)", self._total_size)
 
+            assert self._total_size is not None, "File-size was not provided."
 
-            assert self._total_size is not None, \
-                   "File-size was not provided."
-
-            _logger.debug("Checking if done. PROGRESS=(%d) TOTAL-SIZE=(%d)", 
-                          self._progress, self._total_size)
+            _logger.debug(
+                "Checking if done. PROGRESS=(%d) TOTAL-SIZE=(%d)",
+                self._progress,
+                self._total_size,
+            )
 
             if self._progress == self._total_size:
                 self._done = True
 
-            return (apiclient.http.MediaDownloadProgress(
-                        self._progress, 
-                        self._total_size), \
-                    self._done, \
-                    self._total_size)
+            return (
+                apiclient.http.MediaDownloadProgress(self._progress, self._total_size),
+                self._done,
+                self._total_size,
+            )
         else:
             raise apiclient.errors.HttpError(resp, content, uri=self._uri)

          
M gdrivefs/conf.py +29 -27
@@ 11,36 11,38 @@ class Conf:
     """Manages options."""
 
     api_credentials = {
-        "web": { "client_id": "1004122597540-ne2btnejcbr319ukdh9soke1rrldl27f.apps.googleusercontent.com",
-                 "client_secret": "TwkunuaxFi9IMs218VkJEkCX",
-                 "redirect_uris": [],
-                 "auth_uri": "https://accounts.google.com/o/oauth2/auth",
-                 "token_uri": "https://accounts.google.com/o/oauth2/token"
-               }}
+        "web": {
+            "client_id": "1004122597540-ne2btnejcbr319ukdh9soke1rrldl27f.apps.googleusercontent.com",
+            "client_secret": "TwkunuaxFi9IMs218VkJEkCX",
+            "redirect_uris": [],
+            "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+            "token_uri": "https://accounts.google.com/o/oauth2/token",
+        }
+    }
 
-    auth_cache_filepath                 = None
-#    gd_to_normal_mapping_filepath       = '/etc/gdfs/mime_mapping.json'
-    extension_mapping_filepath          = '/etc/gdfs/extension_mapping.json'
-    query_decay_intermed_prefix_length  = 7
-    file_jobthread_max_idle_time        = 60
-    file_chunk_size_kb                  = 1024
-    file_download_temp_max_age_s        = 86400
-    change_check_frequency_s            = 3
-    hidden_flags_list_local             = ['trashed', 'restricted']
-    hidden_flags_list_remote            = ['trashed']
-    cache_cleanup_check_frequency_s     = 60
-    cache_entries_max_age               = 8 * 60 * 60
-    cache_status_post_frequency_s       = 10
+    auth_cache_filepath = None
+    #    gd_to_normal_mapping_filepath       = '/etc/gdfs/mime_mapping.json'
+    extension_mapping_filepath = "/etc/gdfs/extension_mapping.json"
+    query_decay_intermed_prefix_length = 7
+    file_jobthread_max_idle_time = 60
+    file_chunk_size_kb = 1024
+    file_download_temp_max_age_s = 86400
+    change_check_frequency_s = 3
+    hidden_flags_list_local = ["trashed", "restricted"]
+    hidden_flags_list_remote = ["trashed"]
+    cache_cleanup_check_frequency_s = 60
+    cache_entries_max_age = 8 * 60 * 60
+    cache_status_post_frequency_s = 10
 
-# Deimplementing report functionality.
-#    report_emit_frequency_s             = 60
+    # Deimplementing report functionality.
+    #    report_emit_frequency_s             = 60
 
-    google_discovery_service_url        = DISCOVERY_URI
-    default_buffer_read_blocksize       = 65536
-    directory_mimetype                  = 'application/vnd.google-apps.folder'
-    default_perm_folder                 = '777'
-    default_perm_file_editable          = '666'
-    default_perm_file_noneditable       = '444'
+    google_discovery_service_url = DISCOVERY_URI
+    default_buffer_read_blocksize = 65536
+    directory_mimetype = "application/vnd.google-apps.folder"
+    default_perm_folder = "777"
+    default_perm_file_editable = "666"
+    default_perm_file_noneditable = "444"
 
     # How many extra entries to retrieve when an entry is accessed that is not
     # currently cached.

          
M gdrivefs/config/__init__.py +5 -5
@@ 1,7 1,7 @@ 
 import os
 
-IS_DEBUG = bool(int(os.environ.get('GD_DEBUG', '0')))
-NO_THREADS = bool(int(os.environ.get('GD_NOTHREADS', '0')))
-DO_LOG_FUSE_MESSAGES = bool(int(os.environ.get('GD_DO_LOG_FUSE_MESSAGES', '0')))
-DEFAULT_CREDENTIALS_FILEPATH = os.path.expandvars('$HOME/.gdfs/creds')
-DEFAULT_RETRIES = int(os.environ.get('GD_RETRIES', '3'))
+IS_DEBUG = bool(int(os.environ.get("GD_DEBUG", "0")))
+NO_THREADS = bool(int(os.environ.get("GD_NOTHREADS", "0")))
+DO_LOG_FUSE_MESSAGES = bool(int(os.environ.get("GD_DO_LOG_FUSE_MESSAGES", "0")))
+DEFAULT_CREDENTIALS_FILEPATH = os.path.expandvars("$HOME/.gdfs/creds")
+DEFAULT_RETRIES = int(os.environ.get("GD_RETRIES", "3"))

          
M gdrivefs/config/changes.py +1 -1
@@ 1,3 1,3 @@ 
 import os
 
-MONITOR_CHANGES = bool(int(os.environ.get('GD_MONITOR_CHANGES', '1')))
+MONITOR_CHANGES = bool(int(os.environ.get("GD_MONITOR_CHANGES", "1")))

          
M gdrivefs/config/log.py +9 -4
@@ 5,6 5,7 @@ import gdrivefs.config
 
 logger = logging.getLogger()
 
+
 def configure(is_debug=gdrivefs.config.IS_DEBUG):
     if is_debug:
         logger.setLevel(logging.DEBUG)

          
@@ 14,20 15,24 @@ def configure(is_debug=gdrivefs.config.I
     def _configure_syslog():
         facility = logging.handlers.SysLogHandler.LOG_LOCAL0
         sh = logging.handlers.SysLogHandler(facility=facility)
-        formatter = logging.Formatter('GD: %(name)-12s %(levelname)-7s %(message)s')
+        formatter = logging.Formatter("GD: %(name)-12s %(levelname)-7s %(message)s")
         sh.setFormatter(formatter)
         logger.addHandler(sh)
 
     def _configure_file():
-        filepath = os.environ.get('GD_LOG_FILEPATH', '/tmp/gdrivefs.log')
+        filepath = os.environ.get("GD_LOG_FILEPATH", "/tmp/gdrivefs.log")
         fh = logging.FileHandler(filepath)
-        formatter = logging.Formatter('%(asctime)s [%(name)s %(levelname)s] %(message)s')
+        formatter = logging.Formatter(
+            "%(asctime)s [%(name)s %(levelname)s] %(message)s"
+        )
         fh.setFormatter(formatter)
         logger.addHandler(fh)
 
     def _configure_console():
         sh = logging.StreamHandler()
-        formatter = logging.Formatter('%(asctime)s [%(name)s %(levelname)s] %(message)s')
+        formatter = logging.Formatter(
+            "%(asctime)s [%(name)s %(levelname)s] %(message)s"
+        )
         sh.setFormatter(formatter)
         logger.addHandler(sh)
 

          
M gdrivefs/constants.py +1 -2
@@ 1,2 1,1 @@ 
-OCTET_STREAM_MIMETYPE = 'application/octet-stream'
-
+OCTET_STREAM_MIMETYPE = "application/octet-stream"

          
M gdrivefs/displaced_file.py +27 -24
@@ 13,8 13,9 @@ class DisplacedFile:
     file_size = 1000
 
     def __init__(self, normalized_entry):
-        assert issubclass(normalized_entry.__class__, NormalEntry) is True, \
-               "DisplacedFile can not wrap a non-NormalEntry object."
+        assert (
+            issubclass(normalized_entry.__class__, NormalEntry) is True
+        ), "DisplacedFile can not wrap a non-NormalEntry object."
 
         self.__normalized_entry = normalized_entry
         self.__filepath = tempfile.NamedTemporaryFile(delete=False).name

          
@@ 23,48 24,50 @@ class DisplacedFile:
         os.unlink(self.__filepath)
 
     def deposit_file(self, mime_type):
-        """Write the file to a temporary path, and present a stub (JSON) to the 
-        user. This is the only way of getting files that don't have a 
+        """Write the file to a temporary path, and present a stub (JSON) to the
+        user. This is the only way of getting files that don't have a
         well-defined filesize without providing a type, ahead of time.
         """
 
         gd = get_gdrive()
 
         result = gd.download_to_local(
-                    self.__filepath, 
-                    self.__normalized_entry,
-                    mime_type)
+            self.__filepath, self.__normalized_entry, mime_type
+        )
 
         (length, cache_fault) = result
 
-        _logger.debug("Displaced entry [%s] deposited to [%s] with length "
-                      "(%d).", self.__normalized_entry, self.__filepath, length)
+        _logger.debug(
+            "Displaced entry [%s] deposited to [%s] with length " "(%d).",
+            self.__normalized_entry,
+            self.__filepath,
+            length,
+        )
 
         return self.get_stub(mime_type, length, self.__filepath)
 
     def get_stub(self, mime_type, file_size=0, file_path=None):
         """Return the content for an info ("stub") file."""
 
-        if file_size == 0 and \
-           self.__normalized_entry.requires_displaceable is False:
+        if file_size == 0 and self.__normalized_entry.requires_displaceable is False:
             file_size = self.__normalized_entry.file_size
 
         stub_data = {
-                'EntryId':              self.__normalized_entry.id,
-                'OriginalMimeType':     self.__normalized_entry.mime_type,
-                'ExportTypes':          self.__normalized_entry.download_types,
-                'Title':                self.__normalized_entry.title,
-                'Labels':               self.__normalized_entry.labels,
-                'FinalMimeType':        mime_type,
-                'Length':               file_size,
-                'RequiresMimeType':     self.__normalized_entry.requires_mimetype,
-                'ImageMediaMetadata':   self.__normalized_entry.image_media_metadata
-            }
+            "EntryId": self.__normalized_entry.id,
+            "OriginalMimeType": self.__normalized_entry.mime_type,
+            "ExportTypes": self.__normalized_entry.download_types,
+            "Title": self.__normalized_entry.title,
+            "Labels": self.__normalized_entry.labels,
+            "FinalMimeType": mime_type,
+            "Length": file_size,
+            "RequiresMimeType": self.__normalized_entry.requires_mimetype,
+            "ImageMediaMetadata": self.__normalized_entry.image_media_metadata,
+        }
 
         if file_path:
-            stub_data['FilePath'] = file_path
+            stub_data["FilePath"] = file_path
 
             result = json.dumps(stub_data)
-            padding = (' ' * (self.file_size - len(result) - 1))
+            padding = " " * (self.file_size - len(result) - 1)
 
-            return ("%s%s\n" % (result, padding))
+            return "%s%s\n" % (result, padding)

          
M gdrivefs/drive.py +371 -298
@@ 24,58 24,68 @@ import time
 
 httplib2shim.patch()
 
-_CONF_SERVICE_NAME = 'drive'
-_CONF_SERVICE_VERSION = 'v2'
+_CONF_SERVICE_NAME = "drive"
+_CONF_SERVICE_VERSION = "v2"
 
 _MAX_EMPTY_CHUNKS = 3
 _DEFAULT_UPLOAD_CHUNK_SIZE_B = 1024 * 1024
 
-logging.getLogger('apiclient.discovery').setLevel(logging.WARNING)
+logging.getLogger("apiclient.discovery").setLevel(logging.WARNING)
 
 _logger = logging.getLogger(__name__)
 
+
 def _marshall(f):
-    """A method wrapper that will reauth and/or reattempt where reasonable.
-    """
+    """A method wrapper that will reauth and/or reattempt where reasonable."""
 
     auto_refresh = True
 
     @functools.wraps(f)
     def wrapper(*args, **kwargs):
-        # Now, try to invoke the mechanism. If we succeed, return 
-        # immediately. If we get an authorization-fault (a resolvable 
-        # authorization problem), fall through and attempt to fix it. Allow 
+        # Now, try to invoke the mechanism. If we succeed, return
+        # immediately. If we get an authorization-fault (a resolvable
+        # authorization problem), fall through and attempt to fix it. Allow
         # any other error to bubble up.
-        
+
         for n in range(0, 5):
             try:
                 return f(*args, **kwargs)
             except (ssl.SSLError, http.client.BadStatusLine) as e:
                 # These happen sporadically. Use backoff.
-                _logger.exception("There was a transient connection "
-                                  "error (%s). Trying again [%s]: %s",
-                                  e.__class__.__name__, str(e), n)
+                _logger.exception(
+                    "There was a transient connection "
+                    "error (%s). Trying again [%s]: %s",
+                    e.__class__.__name__,
+                    str(e),
+                    n,
+                )
 
                 time.sleep((2 ** n) + random.randint(0, 1000) / 1000)
             except apiclient.errors.HttpError as e:
-                if e.content == '':
+                if e.content == "":
                     raise
 
                 try:
                     error = json.loads(e.content)
                 except ValueError:
-                    _logger.error("Non-JSON error while doing chunked "
-                                  "download: [%s]", e.content) 
+                    _logger.error(
+                        "Non-JSON error while doing chunked " "download: [%s]",
+                        e.content,
+                    )
                     raise e
 
-                if error.get('code') == 403 and \
-                   error.get('errors')[0].get('reason') \
-                        in ['rateLimitExceeded', 'userRateLimitExceeded']:
+                if error.get("code") == 403 and error.get("errors")[0].get(
+                    "reason"
+                ) in ["rateLimitExceeded", "userRateLimitExceeded"]:
                     # Apply exponential backoff.
-                    _logger.exception("There was a transient HTTP "
-                                      "error (%s). Trying again (%d): "
-                                      "%s",
-                                      e.__class__.__name__, str(e), n)
+                    _logger.exception(
+                        "There was a transient HTTP "
+                        "error (%s). Trying again (%d): "
+                        "%s",
+                        e.__class__.__name__,
+                        str(e),
+                        n,
+                    )
 
                     time.sleep((2 ** n) + random.randint(0, 1000) / 1000)
                 else:

          
@@ 89,16 99,20 @@ def _marshall(f):
 
                 # We had a resolvable authorization problem.
 
-                _logger.info("There was an authorization fault under "
-                             "action [%s]. Attempting refresh.", n)
-                
+                _logger.info(
+                    "There was an authorization fault under "
+                    "action [%s]. Attempting refresh.",
+                    n,
+                )
+
                 authorize = gdrivefs.oauth_authorize.get_auth()
                 authorize.check_credential_state()
 
                 # Re-attempt the action.
 
-                _logger.info("Refresh seemed successful. Reattempting "
-                             "action [%s].", n)
+                _logger.info(
+                    "Refresh seemed successful. Reattempting " "action [%s].", n
+                )
 
     return wrapper
 

          
@@ 117,7 131,7 @@ class GdriveAuth:
         if self.__http is None:
             self.__check_authorization()
             _logger.debug("Getting authorized HTTP tunnel.")
-                
+
             http = httplib2shim.Http()
             self.__credentials.authorize(http)
 

          
@@ 130,33 144,35 @@ class GdriveAuth:
     def get_client(self):
         if self.__client is None:
             authed_http = self.get_authed_http()
-        
+
             # Build a client from the passed discovery document path
-            
-            discoveryUrl = \
-                gdrivefs.conf.Conf.get('google_discovery_service_url')
-# TODO: We should cache this, since we have, so often, had a problem 
-#       retrieving it. If there's no other way, grab it directly, and then pass
-#       via a file:// URI.
-        
+
+            discoveryUrl = gdrivefs.conf.Conf.get("google_discovery_service_url")
+            # TODO: We should cache this, since we have, so often, had a problem
+            #       retrieving it. If there's no other way, grab it directly, and then pass
+            #       via a file:// URI.
+
             try:
-                client = \
-                    apiclient.discovery.build(
-                        _CONF_SERVICE_NAME, 
-                        _CONF_SERVICE_VERSION, 
-                        http=authed_http, 
-                        discoveryServiceUrl=discoveryUrl)
+                client = apiclient.discovery.build(
+                    _CONF_SERVICE_NAME,
+                    _CONF_SERVICE_VERSION,
+                    http=authed_http,
+                    discoveryServiceUrl=discoveryUrl,
+                )
             except apiclient.errors.HttpError as e:
                 # We've seen situations where the discovery URL's server is down,
                 # with an alternate one to be used.
                 #
-                # An error here shouldn't leave GDFS in an unstable state (the 
-                # current command should just fail). Hoepfully, the failure is 
+                # An error here shouldn't leave GDFS in an unstable state (the
+                # current command should just fail). Hoepfully, the failure is
                 # momentary, and the next command succeeds.
 
-                _logger.exception("There was an HTTP response-code of (%d) while "
-                                  "building the client with discovery URL [%s].",
-                                  e.resp.status, discoveryUrl)
+                _logger.exception(
+                    "There was an HTTP response-code of (%d) while "
+                    "building the client with discovery URL [%s].",
+                    e.resp.status,
+                    discoveryUrl,
+                )
                 raise
 
             self.__client = client

          
@@ 166,7 182,7 @@ class GdriveAuth:
 
 class _GdriveManager:
     """Handles all basic communication with Google Drive. All methods should
-    try to invoke only one call, or make sure they handle authentication 
+    try to invoke only one call, or make sure they handle authentication
     refreshing when necessary.
     """
 

          
@@ 174,10 190,12 @@ class _GdriveManager:
         self.__auth = GdriveAuth()
 
     def __assert_response_kind(self, response, expected_kind):
-        actual_kind = response['kind']
+        actual_kind = response["kind"]
         if actual_kind != str(expected_kind):
-            raise ValueError("Received response of type [%s] instead of "
-                             "[%s]." % (actual_kind, expected_kind))
+            raise ValueError(
+                "Received response of type [%s] instead of "
+                "[%s]." % (actual_kind, expected_kind)
+            )
 
     @_marshall
     def get_about_info(self):

          
@@ 185,59 203,59 @@ class _GdriveManager:
 
         client = self.__auth.get_client()
         response = client.about().get().execute()
-        self.__assert_response_kind(response, 'drive#about')
+        self.__assert_response_kind(response, "drive#about")
 
         return response
 
     @_marshall
     def list_changes(self, start_change_id=None, page_token=None):
-        """Get a list of the most recent changes from GD, with the earliest 
-        changes first. This only returns one page at a time. start_change_id 
-        doesn't have to be valid.. It's just the lower limit to what you want 
+        """Get a list of the most recent changes from GD, with the earliest
+        changes first. This only returns one page at a time. start_change_id
+        doesn't have to be valid.. It's just the lower limit to what you want
         back. Change-IDs are integers, but are not necessarily sequential.
         """
 
         client = self.__auth.get_client()
 
-        response = client.changes().list(
-                    pageToken=page_token, 
-                    startChangeId=start_change_id).execute()
+        response = (
+            client.changes()
+            .list(pageToken=page_token, startChangeId=start_change_id)
+            .execute()
+        )
 
-        self.__assert_response_kind(response, 'drive#changeList')
+        self.__assert_response_kind(response, "drive#changeList")
 
-        items = response['items']
+        items = response["items"]
 
         if items:
             _logger.debug("We received (%d) changes to apply.", len(items))
 
-        largest_change_id = int(response['largestChangeId'])
-        next_page_token = response.get('nextPageToken')
+        largest_change_id = int(response["largestChangeId"])
+        next_page_token = response.get("nextPageToken")
 
         changes = []
         # last_change_id = None
         for item in items:
-            change_id = int(item['id'])
-            entry_id = item['fileId']
+            change_id = int(item["id"])
+            entry_id = item["fileId"]
 
-            if item['deleted']:
+            if item["deleted"]:
                 was_deleted = True
                 entry = None
 
                 _logger.debug("CHANGE: [%s] (DELETED)", entry_id)
             else:
                 was_deleted = False
-                entry = item['file']
+                entry = item["file"]
 
-                _logger.debug("CHANGE: [%s] [%s] (UPDATED)", 
-                              entry_id, entry['title'])
+                _logger.debug("CHANGE: [%s] [%s] (UPDATED)", entry_id, entry["title"])
 
             if was_deleted:
                 normalized_entry = None
             else:
-                normalized_entry = \
-                    gdrivefs.normal_entry.NormalEntry(
-                        'list_changes', 
-                        entry)
+                normalized_entry = gdrivefs.normal_entry.NormalEntry(
+                    "list_changes", entry
+                )
 
             changes.append((change_id, (entry_id, was_deleted, normalized_entry)))
             # last_change_id = change_id

          
@@ 246,7 264,7 @@ class _GdriveManager:
 
     @_marshall
     def get_parents_containing_id(self, child_id, max_results=None):
-        
+
         _logger.info("Getting client for parent-listing.")
 
         client = self.__auth.get_client()

          
@@ 254,51 272,60 @@ class _GdriveManager:
         _logger.info("Listing entries over child with ID [%s].", child_id)
 
         response = client.parents().list(fileId=child_id).execute()
-        self.__assert_response_kind(response, 'drive#parentList')
+        self.__assert_response_kind(response, "drive#parentList")
 
-        return [ entry['id'] for entry in response['items'] ]
+        return [entry["id"] for entry in response["items"]]
 
     @_marshall
-    def get_children_under_parent_id(self,
-                                     parent_id,
-                                     query_contains_string=None,
-                                     query_is_string=None,
-                                     max_results=None):
+    def get_children_under_parent_id(
+        self,
+        parent_id,
+        query_contains_string=None,
+        query_is_string=None,
+        max_results=None,
+    ):
 
         _logger.info("Getting client for child-listing.")
 
         client = self.__auth.get_client()
 
-        assert \
-            (query_contains_string is not None and \
-             query_is_string is not None) is False, \
-            "The query_contains_string and query_is_string parameters are "\
+        assert (
+            query_contains_string is not None and query_is_string is not None
+        ) is False, (
+            "The query_contains_string and query_is_string parameters are "
             "mutually exclusive."
+        )
 
         if query_is_string:
-            query = ("title='%s'" % 
-                     (gdrivefs.fsutility.escape_filename_for_query(query_is_string)))
+            query = "title='%s'" % (
+                gdrivefs.fsutility.escape_filename_for_query(query_is_string)
+            )
         elif query_contains_string:
-            query = ("title contains '%s'" % 
-                     (gdrivefs.fsutility.escape_filename_for_query(query_contains_string)))
+            query = "title contains '%s'" % (
+                gdrivefs.fsutility.escape_filename_for_query(query_contains_string)
+            )
         else:
             query = None
 
-        _logger.info("Listing entries under parent with ID [%s].  QUERY= "
-                     "[%s]", parent_id, query)
+        _logger.info(
+            "Listing entries under parent with ID [%s].  QUERY= " "[%s]",
+            parent_id,
+            query,
+        )
 
-        response = client.children().list(
-                    q=query, 
-                    folderId=parent_id,
-                    maxResults=max_results).execute()
+        response = (
+            client.children()
+            .list(q=query, folderId=parent_id, maxResults=max_results)
+            .execute()
+        )
 
-        self.__assert_response_kind(response, 'drive#childList')
+        self.__assert_response_kind(response, "drive#childList")
 
-        return [ entry['id'] for entry in response['items'] ]
+        return [entry["id"] for entry in response["items"]]
 
     @_marshall
     def get_entries(self, entry_ids):
-        retrieved = { }
+        retrieved = {}
         for entry_id in entry_ids:
             retrieved[entry_id] = self.get_entry(entry_id)
 

          
@@ 311,26 338,21 @@ class _GdriveManager:
         client = self.__auth.get_client()
 
         response = client.files().get(fileId=entry_id).execute()
-        self.__assert_response_kind(response, 'drive#file')
+        self.__assert_response_kind(response, "drive#file")
 
-        return \
-            gdrivefs.normal_entry.NormalEntry('direct_read', response)
+        return gdrivefs.normal_entry.NormalEntry("direct_read", response)
 
     @_marshall
-    def list_files(self, query_contains_string=None, query_is_string=None, 
-                   parent_id=None):
-        
-        _logger.info("Listing all files. CONTAINS=[%s] IS=[%s] "
-                     "PARENT_ID=[%s]",
-                     query_contains_string 
-                        if query_contains_string is not None 
-                        else '<none>', 
-                     query_is_string 
-                        if query_is_string is not None 
-                        else '<none>', 
-                     parent_id 
-                        if parent_id is not None 
-                        else '<none>')
+    def list_files(
+        self, query_contains_string=None, query_is_string=None, parent_id=None
+    ):
+
+        _logger.info(
+            "Listing all files. CONTAINS=[%s] IS=[%s] " "PARENT_ID=[%s]",
+            query_contains_string if query_contains_string is not None else "<none>",
+            query_is_string if query_is_string is not None else "<none>",
+            parent_id if parent_id is not None else "<none>",
+        )
 
         client = self.__auth.get_client()
 

          
@@ 340,96 362,120 @@ class _GdriveManager:
             query_components.append("'%s' in parents" % (parent_id))
 
         if query_is_string:
-            query_components.append("title='%s'" % 
-                                    (gdrivefs.fsutility.escape_filename_for_query(query_is_string)))
+            query_components.append(
+                "title='%s'"
+                % (gdrivefs.fsutility.escape_filename_for_query(query_is_string))
+            )
         elif query_contains_string:
-            query_components.append("title contains '%s'" % 
-                                    (gdrivefs.fsutility.escape_filename_for_query(query_contains_string)))
+            query_components.append(
+                "title contains '%s'"
+                % (gdrivefs.fsutility.escape_filename_for_query(query_contains_string))
+            )
 
         # Make sure that we don't get any entries that we would have to ignore.
 
-        hidden_flags = gdrivefs.conf.Conf.get('hidden_flags_list_remote')
+        hidden_flags = gdrivefs.conf.Conf.get("hidden_flags_list_remote")
         if hidden_flags:
             for hidden_flag in hidden_flags:
                 query_components.append("%s = false" % (hidden_flag))
 
-        query = ' and '.join(query_components) if query_components else None
+        query = " and ".join(query_components) if query_components else None
 
         page_token = None
         page_num = 0
         entries = []
         while 1:
-            _logger.debug("Doing request for listing of files with page-"
-                          "token [%s] and page-number (%d): %s",
-                          page_token, page_num, query)
+            _logger.debug(
+                "Doing request for listing of files with page-"
+                "token [%s] and page-number (%d): %s",
+                page_token,
+                page_num,
+                query,
+            )
 
-            result = client.files().list(q=query, pageToken=page_token).\
-                        execute()
-
-            self.__assert_response_kind(result, 'drive#fileList')
+            result = client.files().list(q=query, pageToken=page_token).execute()
 
-            _logger.debug("(%d) entries were presented for page-number "
-                          "(%d).", len(result['items']), page_num)
+            self.__assert_response_kind(result, "drive#fileList")
 
-            for entry_raw in result['items']:
-                entry = \
-                    gdrivefs.normal_entry.NormalEntry(
-                        'list_files', 
-                        entry_raw)
+            _logger.debug(
+                "(%d) entries were presented for page-number " "(%d).",
+                len(result["items"]),
+                page_num,
+            )
+
+            for entry_raw in result["items"]:
+                entry = gdrivefs.normal_entry.NormalEntry("list_files", entry_raw)
 
                 entries.append(entry)
 
-            if 'nextPageToken' not in result:
+            if "nextPageToken" not in result:
                 _logger.debug("No more pages in file listing.")
                 break
 
-            _logger.debug("Next page-token in file-listing is [%s].", 
-                          result['nextPageToken'])
+            _logger.debug(
+                "Next page-token in file-listing is [%s].", result["nextPageToken"]
+            )
 
-            page_token = result['nextPageToken']
+            page_token = result["nextPageToken"]
             page_num += 1
 
         return entries
 
     @_marshall
-    def download_to_local(self, output_file_path, normalized_entry, 
-                          mime_type=None, allow_cache=True):
-        """Download the given file. If we've cached a previous download and the 
-        mtime hasn't changed, re-use. The third item returned reflects whether 
+    def download_to_local(
+        self, output_file_path, normalized_entry, mime_type=None, allow_cache=True
+    ):
+        """Download the given file. If we've cached a previous download and the
+        mtime hasn't changed, re-use. The third item returned reflects whether
         the data has changed since any prior attempts.
         """
 
-        _logger.info("Downloading entry with ID [%s] and mime-type [%s] to "
-                     "[%s].", normalized_entry.id, mime_type, output_file_path)
+        _logger.info(
+            "Downloading entry with ID [%s] and mime-type [%s] to " "[%s].",
+            normalized_entry.id,
+            mime_type,
+            output_file_path,
+        )
 
         if mime_type is None:
             if normalized_entry.mime_type in normalized_entry.download_links:
                 mime_type = normalized_entry.mime_type
 
-                _logger.debug("Electing file mime-type for download: [%s]", 
-                              normalized_entry.mime_type)
-            elif gdrivefs.constants.OCTET_STREAM_MIMETYPE \
-                    in normalized_entry.download_links:
+                _logger.debug(
+                    "Electing file mime-type for download: [%s]",
+                    normalized_entry.mime_type,
+                )
+            elif (
+                gdrivefs.constants.OCTET_STREAM_MIMETYPE
+                in normalized_entry.download_links
+            ):
                 mime_type = gdrivefs.constants.OCTET_STREAM_MIMETYPE
 
                 _logger.debug("Electing octet-stream for download.")
             else:
-                raise ValueError("Could not determine what to fallback to for "
-                                 "the mimetype: {}".format(
-                                 normalized_entry.mime_type))
+                raise ValueError(
+                    "Could not determine what to fallback to for "
+                    "the mimetype: {}".format(normalized_entry.mime_type)
+                )
 
-        if mime_type != normalized_entry.mime_type and \
-                mime_type not in normalized_entry.download_links:
-            message = ("Entry with ID [%s] can not be exported to type [%s]. "
-                       "The available types are: %s" % 
-                       (normalized_entry.id, mime_type, 
-                        ', '.join(list(normalized_entry.download_links.keys()))))
+        if (
+            mime_type != normalized_entry.mime_type
+            and mime_type not in normalized_entry.download_links
+        ):
+            message = (
+                "Entry with ID [%s] can not be exported to type [%s]. "
+                "The available types are: %s"
+                % (
+                    normalized_entry.id,
+                    mime_type,
+                    ", ".join(list(normalized_entry.download_links.keys())),
+                )
+            )
 
             _logger.warning(message)
             raise gdrivefs.errors.ExportFormatError(message)
 
-        gd_mtime_epoch = time.mktime(
-                            normalized_entry.modified_date.timetuple())
+        gd_mtime_epoch = time.mktime(normalized_entry.modified_date.timetuple())
 
         _logger.info("File will be downloaded to [%s].", output_file_path)
 

          
@@ 444,8 490,9 @@ class _GdriveManager:
         if use_cache:
             # Use the cache. It's fine.
 
-            _logger.info("File retrieved from the previously downloaded, "
-                         "still-current file.")
+            _logger.info(
+                "File retrieved from the previously downloaded, " "still-current file."
+            )
 
             return (stat_info.st_size, False)
 

          
@@ 455,37 502,40 @@ class _GdriveManager:
 
         url = normalized_entry.download_links[mime_type]
 
-        with open(output_file_path, 'wb') as f:
-            downloader = gdrivefs.chunked_download.ChunkedDownload(
-                            f, 
-                            authed_http, 
-                            url)
+        with open(output_file_path, "wb") as f:
+            downloader = gdrivefs.chunked_download.ChunkedDownload(f, authed_http, url)
 
             progresses = []
 
             while 1:
                 status, done, total_size = downloader.next_chunk()
-                assert status.total_size is not None, \
-                       "total_size is None"
+                assert status.total_size is not None, "total_size is None"
 
-                _logger.debug("Read chunk: STATUS=[%s] DONE=[%s] "
-                              "TOTAL_SIZE=[%s]", status, done, total_size)
+                _logger.debug(
+                    "Read chunk: STATUS=[%s] DONE=[%s] " "TOTAL_SIZE=[%s]",
+                    status,
+                    done,
+                    total_size,
+                )
 
                 if status.total_size > 0:
                     percent = status.progress()
                 else:
                     percent = 100.0
 
-                _logger.debug("Chunk: PROGRESS=[%s] TOTAL-SIZE=[%s] "
-                              "RESUMABLE-PROGRESS=[%s]",
-                              percent, status.total_size, 
-                              status.resumable_progress)
+                _logger.debug(
+                    "Chunk: PROGRESS=[%s] TOTAL-SIZE=[%s] " "RESUMABLE-PROGRESS=[%s]",
+                    percent,
+                    status.total_size,
+                    status.resumable_progress,
+                )
 
-# TODO(dustin): This just places an arbitrary limit on the number of empty 
-#               chunks we can receive. Can we drop this to 1?
+                # TODO(dustin): This just places an arbitrary limit on the number of empty
+                #               chunks we can receive. Can we drop this to 1?
                 if len(progresses) >= _MAX_EMPTY_CHUNKS:
-                    assert percent > progresses[0], \
-                           "Too many empty chunks have been received."
+                    assert (
+                        percent > progresses[0]
+                    ), "Too many empty chunks have been received."
 
                 progresses.append(percent)
 

          
@@ 505,35 555,35 @@ class _GdriveManager:
     @_marshall
     def create_directory(self, filename, parents, **kwargs):
 
-        mimetype_directory = gdrivefs.conf.Conf.get('directory_mimetype')
+        mimetype_directory = gdrivefs.conf.Conf.get("directory_mimetype")
         return self.__insert_entry(
-                False,
-                filename, 
-                parents,
-                mimetype_directory, 
-                **kwargs)
+            False, filename, parents, mimetype_directory, **kwargs
+        )
 
     @_marshall
-    def create_file(self, filename, parents, mime_type, data_filepath=None, 
-                    **kwargs):
-# TODO: It doesn't seem as if the created file is being registered.
-        # Even though we're supposed to provide an extension, we can get away 
-        # without having one. We don't want to impose this when acting like a 
+    def create_file(self, filename, parents, mime_type, data_filepath=None, **kwargs):
+        # TODO: It doesn't seem as if the created file is being registered.
+        # Even though we're supposed to provide an extension, we can get away
+        # without having one. We don't want to impose this when acting like a
         # normal FS.
 
         return self.__insert_entry(
-                True,
-                filename,
-                parents,
-                mime_type,
-                data_filepath=data_filepath,
-                **kwargs)
+            True, filename, parents, mime_type, data_filepath=data_filepath, **kwargs
+        )
 
     @_marshall
-    def __insert_entry(self, is_file, filename, parents, mime_type, 
-                       data_filepath=None, modified_datetime=None, 
-                       accessed_datetime=None, is_hidden=False, 
-                       description=None):
+    def __insert_entry(
+        self,
+        is_file,
+        filename,
+        parents,
+        mime_type,
+        data_filepath=None,
+        modified_datetime=None,
+        accessed_datetime=None,
+        is_hidden=False,
+        description=None,
+    ):
 
         if parents is None:
             parents = []

          
@@ 541,72 591,73 @@ class _GdriveManager:
         now_phrase = gdrivefs.time_support.get_flat_normal_fs_time_from_dt()
 
         if modified_datetime is None:
-            modified_datetime = now_phrase 
-    
+            modified_datetime = now_phrase
+
         if accessed_datetime is None:
-            accessed_datetime = now_phrase 
+            accessed_datetime = now_phrase
 
-        _logger.info("Creating entry with filename [%s] under parent(s) "
-                     "[%s] with mime-type [%s]. MTIME=[%s] ATIME=[%s] "
-                     "DATA_FILEPATH=[%s]",
-                     filename, ', '.join(parents), mime_type, 
-                     modified_datetime, accessed_datetime, data_filepath)
+        _logger.info(
+            "Creating entry with filename [%s] under parent(s) "
+            "[%s] with mime-type [%s]. MTIME=[%s] ATIME=[%s] "
+            "DATA_FILEPATH=[%s]",
+            filename,
+            ", ".join(parents),
+            mime_type,
+            modified_datetime,
+            accessed_datetime,
+            data_filepath,
+        )
 
         client = self.__auth.get_client()
 
         ## Create request-body.
 
-        body = { 
-                'title': filename, 
-                'parents': [dict(id=parent) for parent in parents], 
-                'labels': { "hidden": is_hidden }, 
-                'mimeType': mime_type,
-            }
+        body = {
+            "title": filename,
+            "parents": [dict(id=parent) for parent in parents],
+            "labels": {"hidden": is_hidden},
+            "mimeType": mime_type,
+        }
 
         if description is not None:
-            body['description'] = description
+            body["description"] = description
 
         if modified_datetime is not None:
-            body['modifiedDate'] = modified_datetime
+            body["modifiedDate"] = modified_datetime
 
         if accessed_datetime is not None:
-            body['lastViewedByMeDate'] = accessed_datetime
+            body["lastViewedByMeDate"] = accessed_datetime
 
         ## Create request-arguments.
 
         args = {
-            'body': body,
+            "body": body,
         }
 
         if data_filepath:
-            args.update({
-                'media_body': 
-                    apiclient.http.MediaFileUpload(
-                        data_filepath, 
-                        mimetype=mime_type, 
+            args.update(
+                {
+                    "media_body": apiclient.http.MediaFileUpload(
+                        data_filepath,
+                        mimetype=mime_type,
                         resumable=True,
-                        chunksize=_DEFAULT_UPLOAD_CHUNK_SIZE_B),
-# TODO(dustin): Documented, but does not exist.
-#                'uploadType': 'resumable',
-            })
+                        chunksize=_DEFAULT_UPLOAD_CHUNK_SIZE_B,
+                    ),
+                    # TODO(dustin): Documented, but does not exist.
+                    #                'uploadType': 'resumable',
+                }
+            )
 
         if gdrivefs.config.IS_DEBUG is True:
-            _logger.debug("Doing file-insert with:\n%s", 
-                          pprint.pformat(args))
+            _logger.debug("Doing file-insert with:\n%s", pprint.pformat(args))
 
         request = client.files().insert(**args)
 
-        response = self.__finish_upload(
-                    filename,
-                    request,
-                    data_filepath is not None)
+        response = self.__finish_upload(filename, request, data_filepath is not None)
 
-        self.__assert_response_kind(response, 'drive#file')
+        self.__assert_response_kind(response, "drive#file")
 
-        normalized_entry = \
-            gdrivefs.normal_entry.NormalEntry(
-                'insert_entry', 
-                response)
+        normalized_entry = gdrivefs.normal_entry.NormalEntry("insert_entry", response)
 
         _logger.info("New entry created with ID [%s].", normalized_entry.id)
 

          
@@ 619,120 670,130 @@ class _GdriveManager:
 
         client = self.__auth.get_client()
 
-        file_ = \
-            apiclient.http.MediaFileUpload(
-                '/dev/null',
-                mimetype=normalized_entry.mime_type)
+        file_ = apiclient.http.MediaFileUpload(
+            "/dev/null", mimetype=normalized_entry.mime_type
+        )
 
-        args = { 
-            'fileId': normalized_entry.id, 
-# TODO(dustin): Can we omit 'body'?
-            'body': {}, 
-            'media_body': file_,
+        args = {
+            "fileId": normalized_entry.id,
+            # TODO(dustin): Can we omit 'body'?
+            "body": {},
+            "media_body": file_,
         }
 
         response = client.files().update(**args).execute()
-        self.__assert_response_kind(response, 'drive#file')
+        self.__assert_response_kind(response, "drive#file")
 
         _logger.debug("Truncate complete: [%s]", normalized_entry.id)
 
         return response
 
     @_marshall
-    def update_entry(self, normalized_entry, filename=None, data_filepath=None, 
-                     mime_type=None, parents=None, modified_datetime=None, 
-                     accessed_datetime=None, is_hidden=False, 
-                     description=None):
+    def update_entry(
+        self,
+        normalized_entry,
+        filename=None,
+        data_filepath=None,
+        mime_type=None,
+        parents=None,
+        modified_datetime=None,
+        accessed_datetime=None,
+        is_hidden=False,
+        description=None,
+    ):
 
         _logger.info("Updating entry [%s].", normalized_entry)
 
         client = self.__auth.get_client()
 
         # Build request-body.
-        
+
         body = {}
 
         if mime_type is None:
             mime_type = normalized_entry.mime_type
 
-        body['mimeType'] = mime_type 
+        body["mimeType"] = mime_type
 
         if filename is not None:
-            body['title'] = filename
-        
+            body["title"] = filename
+
         if parents is not None:
-            body['parents'] = parents
+            body["parents"] = parents
 
         if is_hidden is not None:
-            body['labels'] = { "hidden": is_hidden }
+            body["labels"] = {"hidden": is_hidden}
 
         if description is not None:
-            body['description'] = description
+            body["description"] = description
 
         set_mtime = True
         if modified_datetime is not None:
-            body['modifiedDate'] = modified_datetime
+            body["modifiedDate"] = modified_datetime
         else:
-            body['modifiedDate'] = \
-                gdrivefs.time_support.get_flat_normal_fs_time_from_dt()
+            body[
+                "modifiedDate"
+            ] = gdrivefs.time_support.get_flat_normal_fs_time_from_dt()
 
         if accessed_datetime is not None:
             set_atime = True
-            body['lastViewedByMeDate'] = accessed_datetime
+            body["lastViewedByMeDate"] = accessed_datetime
         else:
             set_atime = False
 
         # Build request-arguments.
 
-        args = { 
-            'fileId': normalized_entry.id, 
-            'body': body, 
-            'setModifiedDate': set_mtime, 
-            'updateViewedDate': set_atime,
+        args = {
+            "fileId": normalized_entry.id,
+            "body": body,
+            "setModifiedDate": set_mtime,
+            "updateViewedDate": set_atime,
         }
 
         if data_filepath is not None:
-            _logger.debug("We'll be sending a file in the update: [%s] [%s]", 
-                          normalized_entry.id, data_filepath)
+            _logger.debug(
+                "We'll be sending a file in the update: [%s] [%s]",
+                normalized_entry.id,
+                data_filepath,
+            )
 
             # We can only upload large files using resumable-uploads.
-            args.update({
-                'media_body': 
-                    apiclient.http.MediaFileUpload(
-                        data_filepath, 
-                        mimetype=mime_type, 
+            args.update(
+                {
+                    "media_body": apiclient.http.MediaFileUpload(
+                        data_filepath,
+                        mimetype=mime_type,
                         resumable=True,
-                        chunksize=_DEFAULT_UPLOAD_CHUNK_SIZE_B),
-# TODO(dustin): Documented, but does not exist.
-#                'uploadType': 'resumable',
-            })
+                        chunksize=_DEFAULT_UPLOAD_CHUNK_SIZE_B,
+                    ),
+                    # TODO(dustin): Documented, but does not exist.
+                    #                'uploadType': 'resumable',
+                }
+            )
 
         _logger.debug("Sending entry update: [%s]", normalized_entry.id)
 
         request = client.files().update(**args)
 
         result = self.__finish_upload(
-                    normalized_entry.title,
-                    request,
-                    data_filepath is not None)
+            normalized_entry.title, request, data_filepath is not None
+        )
 
-        normalized_entry = \
-            gdrivefs.normal_entry.NormalEntry('update_entry', result)
+        normalized_entry = gdrivefs.normal_entry.NormalEntry("update_entry", result)
 
         _logger.debug("Entry updated: [%s]", normalized_entry)
 
         return normalized_entry
 
     def __finish_upload(self, filename, request, has_file):
-        """Finish a resumable-upload is a file was given, or just execute the 
+        """Finish a resumable-upload is a file was given, or just execute the
         request if not.
         """
 
         if has_file is False:
             return request.execute()
 
-        _logger.debug("We need to finish updating the entry's data: [%s]", 
-                      filename)
+        _logger.debug("We need to finish updating the entry's data: [%s]", filename)
 
         result = None
         while result is None:

          
@@ 742,8 803,9 @@ class _GdriveManager:
                 if status.total_size == 0:
                     _logger.debug("Uploaded (zero-length): [%s]", filename)
                 else:
-                    _logger.debug("Uploaded [%s]: %.2f%%", 
-                                  filename, status.progress() * 100)
+                    _logger.debug(
+                        "Uploaded [%s]: %.2f%%", filename, status.progress() * 100
+                    )
 
         return result
 

          
@@ 753,11 815,16 @@ class _GdriveManager:
         result = gdrivefs.fsutility.split_path_nolookups(new_filename)
         (path, filename_stripped, mime_type, is_hidden) = result
 
-        _logger.debug("Renaming entry [%s] to [%s]. IS_HIDDEN=[%s]",
-                      normalized_entry, filename_stripped, is_hidden)
+        _logger.debug(
+            "Renaming entry [%s] to [%s]. IS_HIDDEN=[%s]",
+            normalized_entry,
+            filename_stripped,
+            is_hidden,
+        )
 
-        return self.update_entry(normalized_entry, filename=filename_stripped, 
-                                 is_hidden=is_hidden)
+        return self.update_entry(
+            normalized_entry, filename=filename_stripped, is_hidden=is_hidden
+        )
 
     @_marshall
     def remove_entry(self, normalized_entry):

          
@@ 766,24 833,30 @@ class _GdriveManager:
 
         client = self.__auth.get_client()
 
-        args = { 'fileId': normalized_entry.id }
+        args = {"fileId": normalized_entry.id}
 
         try:
             result = client.files().delete(**args).execute()
         except Exception as e:
-            if e.__class__.__name__ == 'HttpError' and \
-               str(e).find('File not found') != -1:
+            if (
+                e.__class__.__name__ == "HttpError"
+                and str(e).find("File not found") != -1
+            ):
                 raise NameError(normalized_entry.id)
 
-            _logger.exception("Could not send delete for entry with ID [%s].",
-                              normalized_entry.id)
+            _logger.exception(
+                "Could not send delete for entry with ID [%s].", normalized_entry.id
+            )
             raise
 
         _logger.info("Entry deleted successfully.")
 
+
 _THREAD_STORAGE = None
+
+
 def get_gdrive():
-    """Return an instance of _GdriveManager unique to each thread (we can't 
+    """Return an instance of _GdriveManager unique to each thread (we can't
     reuse sockets between threads).
     """
 

          
M gdrivefs/errors.py +8 -1
@@ 4,34 4,41 @@ class GdFsError(Exception):
 
 class AuthorizationError(GdFsError):
     """All authorization-related errors inherit from this."""
+
     pass
 
 
 class AuthorizationFailureError(AuthorizationError):
     """There was a general authorization failure."""
+
     pass
-        
+
 
 class AuthorizationFaultError(AuthorizationError):
     """Our authorization is not available or has expired."""
+
     pass
 
 
 class MustIgnoreFileError(GdFsError):
     """An error requiring us to ignore the file."""
+
     pass
 
 
 class FilenameQuantityError(MustIgnoreFileError):
     """Too many filenames share the same name in a single directory."""
+
     pass
 
 
 class ExportFormatError(GdFsError):
     """A format was not available for export."""
+
     pass
 
 
 class GdNotFoundError(GdFsError):
     """A file/path was not found."""
+
     pass

          
M gdrivefs/fsutility.py +56 -43
@@ 7,41 7,42 @@ import re
 
 _logger = logging.getLogger(__name__)
 
-def dec_hint(argument_names=[], excluded=[], prefix='', otherdata_cb=None):
-    """A decorator for the calling of functions to be emphasized in the 
+
+def dec_hint(argument_names=[], excluded=[], prefix="", otherdata_cb=None):
+    """A decorator for the calling of functions to be emphasized in the
     logging. Displays prefix and suffix information in the logs.
     """
 
     # We use a serial-number so that we can eyeball corresponding pairs of
     # beginning and ending statements in the logs.
-    sn = getattr(dec_hint, 'sn', 0) + 1
+    sn = getattr(dec_hint, "sn", 0) + 1
     dec_hint.sn = sn
 
-    prefix = ("%s: " % (prefix)) if prefix else ''
+    prefix = ("%s: " % (prefix)) if prefix else ""
 
     def real_decorator(f):
         def wrapper(*args, **kwargs):
-        
+
             try:
                 pid = fuse_get_context()[2]
             except:
                 # Just in case.
                 pid = 0
-        
+
             if not prefix:
-                _logger.debug("-----------------------------------------------"
-                              "---")
+                _logger.debug("-----------------------------------------------" "---")
 
-            _logger.debug("%s>>>>>>>>>> %s(%d) >>>>>>>>>> (%d)",
-                          prefix, f.__name__, sn, pid)
-        
+            _logger.debug(
+                "%s>>>>>>>>>> %s(%d) >>>>>>>>>> (%d)", prefix, f.__name__, sn, pid
+            )
+
             if args or kwargs:
                 condensed = {}
                 for i in range(len(args)):
                     # Skip the 'self' argument.
                     if i == 0:
                         continue
-                
+
                     if i - 1 >= len(argument_names):
                         break
 

          
@@ 50,67 51,78 @@ def dec_hint(argument_names=[], excluded
                 for k, v in list(kwargs.items()):
                     condensed[k] = v
 
-                values_nice = [("%s= [%s]" % (k, v)) for k, v \
-                                                     in list(condensed.items()) \
-                                                     if k not in excluded]
-                
+                values_nice = [
+                    ("%s= [%s]" % (k, v))
+                    for k, v in list(condensed.items())
+                    if k not in excluded
+                ]
+
                 if otherdata_cb:
                     data = otherdata_cb(*args, **kwargs)
                     for k, v in list(data.items()):
                         values_nice[k] = v
-                
+
                 if values_nice:
-                    values_string = '  '.join(values_nice)
+                    values_string = "  ".join(values_nice)
                     _logger.debug("DATA: %s", values_string)
 
-            suffix = ''
+            suffix = ""
 
             try:
                 result = f(*args, **kwargs)
             except FuseOSError as e:
                 if e.errno not in (errno.ENOENT,):
-                    _logger.error("FUSE error [%s] (%s) will be forwarded "
-                                  "back to GDFS from [%s]: %s", 
-                                  e.__class__.__name__, e.errno, f.__name__, 
-                                  str(e))
+                    _logger.error(
+                        "FUSE error [%s] (%s) will be forwarded "
+                        "back to GDFS from [%s]: %s",
+                        e.__class__.__name__,
+                        e.errno,
+                        f.__name__,
+                        str(e),
+                    )
                 raise
             except Exception as e:
                 _logger.exception("There was an exception in [%s]", f.__name__)
-                suffix = (' (E(%s): "%s")' % (e.__class__.__name__, str(e)))
+                suffix = ' (E(%s): "%s")' % (e.__class__.__name__, str(e))
                 raise
             finally:
-                _logger.debug("%s<<<<<<<<<< %s(%d) (%d)%s", 
-                              prefix, f.__name__, sn, pid, suffix)
-            
+                _logger.debug(
+                    "%s<<<<<<<<<< %s(%d) (%d)%s", prefix, f.__name__, sn, pid, suffix
+                )
+
             return result
+
         return wrapper
+
     return real_decorator
 
+
 def strip_export_type(path):
 
     matched = re.search(
-                r'#([a-zA-Z0-9\-]+\\+[a-zA-Z0-9\-]+)?$'.encode('utf-8'),
-                path.encode('utf-8'))
+        r"#([a-zA-Z0-9\-]+\\+[a-zA-Z0-9\-]+)?$".encode("utf-8"), path.encode("utf-8")
+    )
 
     mime_type = None
 
     if matched:
         fragment = matched.group(0)
         mime_type = matched.group(1)
-        
+
         if mime_type is not None:
-            mime_type = mime_type.replace('+', '/')
+            mime_type = mime_type.replace("+", "/")
 
-        path = path[:-len(fragment)]
+        path = path[: -len(fragment)]
 
     return (path, mime_type)
 
+
 def split_path(filepath_original, pathresolver_cb):
     """Completely process and distill the requested file-path. The filename can"
-    be padded to adjust what's being requested. This will remove all such 
+    be padded to adjust what's being requested. This will remove all such
     information, and return the actual file-path along with the extra meta-
     information. pathresolver_cb should expect a single parameter of a path,
-    and return a NormalEntry object. This can be used for both directories and 
+    and return a NormalEntry object. This can be used for both directories and
     files.
     """
 

          
@@ 124,10 136,9 @@ def split_path(filepath_original, pathre
 
     try:
         path_resolution = pathresolver_cb(path)
-# TODO(dustin): We need to specify the exception for when a file doesn't exist.
+    # TODO(dustin): We need to specify the exception for when a file doesn't exist.
     except:
-        _logger.exception("Exception while getting entry from path [%s].", 
-                          path)
+        _logger.exception("Exception while getting entry from path [%s].", path)
 
         raise GdNotFoundError()
 

          
@@ 136,12 147,13 @@ def split_path(filepath_original, pathre
 
     (parent_entry, parent_clause) = path_resolution
 
-    is_hidden = (filename[0] == '.') if filename else False
+    is_hidden = (filename[0] == ".") if filename else False
 
     return (parent_clause, path, filename, mime_type, is_hidden)
 
+
 def split_path_nolookups(filepath_original):
-    """This allows us to get the is-hidden flag, mimetype info, path, and 
+    """This allows us to get the is-hidden flag, mimetype info, path, and
     filename, without doing the [time consuming] lookup if unnecessary.
     """
 

          
@@ 152,15 164,16 @@ def split_path_nolookups(filepath_origin
     (path, filename) = split(filepath)
 
     # We don't remove the period, if we will mark it as hidden, as appropriate.
-    is_hidden = (filename[0] == '.') if filename else False
+    is_hidden = (filename[0] == ".") if filename else False
 
     return (path, filename, mime_type, is_hidden)
 
+
 def build_filepath(path, filename):
-    separator = '/' if path != '/' else ''
+    separator = "/" if path != "/" else ""
 
-    return ('%s%s%s' % (path, separator, filename))
+    return "%s%s%s" % (path, separator, filename)
+
 
 def escape_filename_for_query(filename):
     return filename.replace("\\", "\\\\").replace("'", "\\'")
-

          
M gdrivefs/gdfuse.py +241 -212
@@ 26,28 26,30 @@ import stat
 
 # TODO: make sure strip_extension and split_path are used when each are relevant
 # TODO: make sure create path reserves a file-handle, uploads the data, and then registers the open-file with the file-handle.
-# TODO: Make sure that we rely purely on the FH, whenever it is given, 
+# TODO: Make sure that we rely purely on the FH, whenever it is given,
 #       whereever it appears. This will be to accomodate system calls that can work either via file-path or file-handle.
 
+
 def set_datetime_tz(datetime_obj, tz):
     return datetime_obj.replace(tzinfo=tz)
 
+
 def get_entry_or_raise(raw_path, allow_normal_for_missing=False):
     try:
         result = split_path(raw_path, path_resolver)
         (parent_clause, path, filename, mime_type, is_hidden) = result
     except GdNotFoundError:
-        _logger.exception("Could not retrieve clause for non-existent "
-                          "file-path [%s] (parent does not exist)." % 
-                          (raw_path))
+        _logger.exception(
+            "Could not retrieve clause for non-existent "
+            "file-path [%s] (parent does not exist)." % (raw_path)
+        )
 
         if allow_normal_for_missing is True:
             raise
         else:
             raise FuseOSError(ENOENT)
     except:
-        _logger.exception("Could not process file-path [%s]." % 
-                          (raw_path))
+        _logger.exception("Could not process file-path [%s]." % (raw_path))
         raise FuseOSError(EIO)
 
     filepath = build_filepath(path, filename)

          
@@ 56,17 58,17 @@ def get_entry_or_raise(raw_path, allow_n
     try:
         entry_clause = path_relations.get_clause_from_path(filepath)
     except GdNotFoundError:
-        _logger.exception("Could not retrieve clause for non-existent "
-                          "file-path [%s] (parent exists)." % 
-                          (filepath))
+        _logger.exception(
+            "Could not retrieve clause for non-existent "
+            "file-path [%s] (parent exists)." % (filepath)
+        )
 
         if allow_normal_for_missing is True:
             raise
         else:
             raise FuseOSError(ENOENT)
     except:
-        _logger.exception("Could not retrieve clause for path [%s]. " %
-                          (filepath))
+        _logger.exception("Could not retrieve clause for path [%s]. " % (filepath))
         raise FuseOSError(EIO)
 
     if not entry_clause:

          
@@ 102,62 104,64 @@ class _GdfsMixin:
         block_size_b = gdrivefs.config.fs.CALCULATION_BLOCK_SIZE
 
         if entry.is_directory:
-            effective_permission = int(Conf.get('default_perm_folder'), 
-                                       8)
+            effective_permission = int(Conf.get("default_perm_folder"), 8)
         elif entry.editable:
-            effective_permission = int(Conf.get('default_perm_file_editable'), 
-                                       8)
+            effective_permission = int(Conf.get("default_perm_file_editable"), 8)
         else:
-            effective_permission = int(Conf.get(
-                                            'default_perm_file_noneditable'), 
-                                       8)
+            effective_permission = int(Conf.get("default_perm_file_noneditable"), 8)
 
-        stat_result = { "st_mtime": entry.modified_date_epoch, # modified time.
-                        "st_ctime": entry.modified_date_epoch, # changed time.
-                        "st_atime": time(),
-                        "st_uid":   uid,
-                        "st_gid":   gid }
-        
+        stat_result = {
+            "st_mtime": entry.modified_date_epoch,  # modified time.
+            "st_ctime": entry.modified_date_epoch,  # changed time.
+            "st_atime": time(),
+            "st_uid": uid,
+            "st_gid": gid,
+        }
+
         if entry.is_directory:
-            # Per http://sourceforge.net/apps/mediawiki/fuse/index.php?title=SimpleFilesystemHowto, 
+            # Per http://sourceforge.net/apps/mediawiki/fuse/index.php?title=SimpleFilesystemHowto,
             # default size should be 4K.
-# TODO(dustin): Should we just make this (0), since that's what it is?
+            # TODO(dustin): Should we just make this (0), since that's what it is?
             stat_result["st_size"] = 1024 * 4
-            stat_result["st_mode"] = (stat.S_IFDIR | effective_permission)
+            stat_result["st_mode"] = stat.S_IFDIR | effective_permission
             stat_result["st_nlink"] = 2
         else:
-            stat_result["st_size"] = DisplacedFile.file_size \
-                                        if entry.requires_mimetype \
-                                        else entry.file_size
+            stat_result["st_size"] = (
+                DisplacedFile.file_size if entry.requires_mimetype else entry.file_size
+            )
 
-            stat_result["st_mode"] = (stat.S_IFREG | effective_permission)
+            stat_result["st_mode"] = stat.S_IFREG | effective_permission
             stat_result["st_nlink"] = 1
 
-        stat_result["st_blocks"] = \
-            int(math.ceil(float(stat_result["st_size"]) / block_size_b))
-  
+        stat_result["st_blocks"] = int(
+            math.ceil(float(stat_result["st_size"]) / block_size_b)
+        )
+
         return stat_result
 
-    @dec_hint(['raw_path', 'fh'])
+    @dec_hint(["raw_path", "fh"])
     def getattr(self, raw_path, fh=None):
         """Return a stat() structure."""
-# TODO: Implement handle.
+        # TODO: Implement handle.
 
         (entry, path, filename) = get_entry_or_raise(raw_path)
         return self.__build_stat_from_entry(entry)
 
-    @dec_hint(['path', 'offset'])
+    @dec_hint(["path", "offset"])
     def readdir(self, path, offset):
         """A generator returning one base filename at a time."""
 
         # We expect "offset" to always be (0).
         if offset != 0:
-            _logger.warning("readdir() has been invoked for path [%s] and "
-                            "non-zero offset (%d). This is not allowed.",
-                            path, offset)
+            _logger.warning(
+                "readdir() has been invoked for path [%s] and "
+                "non-zero offset (%d). This is not allowed.",
+                path,
+                offset,
+            )
 
-# TODO: Once we start working on the cache, make sure we don't make this call, 
-#       constantly.
+        # TODO: Once we start working on the cache, make sure we don't make this call,
+        #       constantly.
 
         path_relations = PathRelations.get_instance()
 

          
@@ 167,37 171,38 @@ class _GdfsMixin:
             _logger.exception("Could not process [%s] (readdir).")
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not get clause from path [%s] "
-                              "(readdir)." % (path))
+            _logger.exception(
+                "Could not get clause from path [%s] " "(readdir)." % (path)
+            )
             raise FuseOSError(EIO)
 
         if not entry_clause:
             raise FuseOSError(ENOENT)
 
         try:
-            entry_tuples = path_relations.get_children_entries_from_entry_id \
-                            (entry_clause[CLAUSE_ID])
+            entry_tuples = path_relations.get_children_entries_from_entry_id(
+                entry_clause[CLAUSE_ID]
+            )
         except:
-            _logger.exception("Could not render list of filenames under path "
-                              "[%s].", path)
+            _logger.exception(
+                "Could not render list of filenames under path " "[%s].", path
+            )
 
             raise FuseOSError(EIO)
 
-        yield utility.translate_filename_charset('.')
-        yield utility.translate_filename_charset('..')
+        yield utility.translate_filename_charset(".")
+        yield utility.translate_filename_charset("..")
 
         for (filename, entry) in entry_tuples:
 
-            # Decorate any file that -requires- a mime-type (all files can 
+            # Decorate any file that -requires- a mime-type (all files can
             # merely accept a mime-type)
             if entry.requires_mimetype:
-                filename += utility.translate_filename_charset('#')
-        
-            yield (filename,
-                   self.__build_stat_from_entry(entry),
-                   0)
+                filename += utility.translate_filename_charset("#")
 
-    @dec_hint(['raw_path', 'length', 'offset', 'fh'])
+            yield (filename, self.__build_stat_from_entry(entry), 0)
+
+    @dec_hint(["raw_path", "length", "offset", "fh"])
     def read(self, raw_path, length, offset, fh):
 
         om = gdrivefs.opened_file.get_om()

          
@@ 205,8 210,9 @@ class _GdfsMixin:
         try:
             opened_file = om.get_by_fh(fh)
         except:
-            _logger.exception("Could not retrieve OpenedFile for handle with"
-                              "ID (%d) (read).", fh)
+            _logger.exception(
+                "Could not retrieve OpenedFile for handle with" "ID (%d) (read).", fh
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 216,11 222,11 @@ class _GdfsMixin:
             _logger.exception("Could not read data.")
             raise FuseOSError(EIO)
 
-    @dec_hint(['filepath', 'mode'])
+    @dec_hint(["filepath", "mode"])
     def mkdir(self, filepath, mode):
         """Create the given directory."""
 
-# TODO: Implement the "mode".
+        # TODO: Implement the "mode".
 
         try:
             result = split_path(filepath, path_resolver)

          
@@ 236,20 242,23 @@ class _GdfsMixin:
         gd = get_gdrive()
 
         try:
-            entry = gd.create_directory(
-                        filename, 
-                        [parent_id], 
-                        is_hidden=is_hidden)
+            entry = gd.create_directory(filename, [parent_id], is_hidden=is_hidden)
         except:
-            _logger.exception("Could not create directory with name [%s] "
-                              "and parent with ID [%s].",
-                              filename, parent_clause[0].id)
+            _logger.exception(
+                "Could not create directory with name [%s] " "and parent with ID [%s].",
+                filename,
+                parent_clause[0].id,
+            )
             raise FuseOSError(EIO)
 
-        _logger.info("Directory [%s] created as ID [%s] under parent with "
-                     "ID [%s].", filepath, entry.id, parent_id)
+        _logger.info(
+            "Directory [%s] created as ID [%s] under parent with " "ID [%s].",
+            filepath,
+            entry.id,
+            parent_id,
+        )
 
-        #parent_clause[4] = False
+        # parent_clause[4] = False
 
         path_relations = PathRelations.get_instance()
 

          
@@ 259,15 268,15 @@ class _GdfsMixin:
             _logger.exception("Could not register new directory in cache.")
             raise FuseOSError(EIO)
 
-# TODO: Find a way to implement or enforce 'mode'.
+    # TODO: Find a way to implement or enforce 'mode'.
     def __create(self, filepath, mode=None):
         """Create a new file.
-                
-        We don't implement "mode" (permissions) because the model doesn't agree 
+
+        We don't implement "mode" (permissions) because the model doesn't agree
         with GD.
         """
 
-# TODO: Fail if it already exists.
+        # TODO: Fail if it already exists.
 
         try:
             result = split_path(filepath, path_resolver)

          
@@ 276,13 285,12 @@ class _GdfsMixin:
             _logger.exception("Could not process [%s] (i-create).", filepath)
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not split path [%s] (i-create).",
-                              filepath)
+            _logger.exception("Could not split path [%s] (i-create).", filepath)
             raise FuseOSError(EIO)
 
         if mime_type is None:
             _, ext = os.path.splitext(filename)
-            if ext != '':
+            if ext != "":
                 ext = ext[1:]
 
             mime_type = utility.get_first_mime_type_by_extension(ext)

          
@@ 293,14 301,14 @@ class _GdfsMixin:
 
         try:
             entry = gd.create_file(
-                        filename, 
-                        [parent_clause[3]], 
-                        mime_type,
-                        is_hidden=is_hidden)
+                filename, [parent_clause[3]], mime_type, is_hidden=is_hidden
+            )
         except:
-            _logger.exception("Could not create empty file [%s] under "
-                              "parent with ID [%s].",
-                              filename, parent_clause[3])
+            _logger.exception(
+                "Could not create empty file [%s] under " "parent with ID [%s].",
+                filename,
+                parent_clause[3],
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 316,7 324,7 @@ class _GdfsMixin:
 
         return (entry, path, filename, mime_type)
 
-    @dec_hint(['filepath', 'mode'])
+    @dec_hint(["filepath", "mode"])
     def create(self, raw_filepath, mode):
         """Create a new file. This always precedes a write."""
 

          
@@ 325,8 333,9 @@ class _GdfsMixin:
         try:
             fh = om.get_new_handle()
         except:
-            _logger.exception("Could not acquire file-handle for create of "
-                              "[%s].", raw_filepath)
+            _logger.exception(
+                "Could not acquire file-handle for create of " "[%s].", raw_filepath
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 334,27 343,29 @@ class _GdfsMixin:
 
         try:
             opened_file = gdrivefs.opened_file.OpenedFile(
-                            entry.id, 
-                            path, 
-                            filename, 
-                            not entry.is_visible, 
-                            mime_type)
+                entry.id, path, filename, not entry.is_visible, mime_type
+            )
         except:
-            _logger.exception("Could not create OpenedFile object for "
-                              "created file.")
+            _logger.exception("Could not create OpenedFile object for " "created file.")
 
             raise FuseOSError(EIO)
 
-        _logger.debug("Registering OpenedFile object with handle (%d), "
-                      "path [%s], and ID [%s].", fh, raw_filepath, entry.id)
+        _logger.debug(
+            "Registering OpenedFile object with handle (%d), "
+            "path [%s], and ID [%s].",
+            fh,
+            raw_filepath,
+            entry.id,
+        )
 
         om = gdrivefs.opened_file.get_om()
 
         try:
             om.add(opened_file, fh=fh)
         except:
-            _logger.exception("Could not register OpenedFile for created "
-                              "file: [%s]", opened_file)
+            _logger.exception(
+                "Could not register OpenedFile for created " "file: [%s]", opened_file
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 362,20 373,22 @@ class _GdfsMixin:
 
         return fh
 
-    @dec_hint(['filepath', 'flags'])
+    @dec_hint(["filepath", "flags"])
     def open(self, filepath, flags):
-# TODO: Fail if does not exist and the mode/flags is read only.
+        # TODO: Fail if does not exist and the mode/flags is read only.
 
         try:
-            opened_file = gdrivefs.opened_file.\
-                            create_for_existing_filepath(filepath)
+            opened_file = gdrivefs.opened_file.create_for_existing_filepath(filepath)
         except GdNotFoundError:
-            _logger.exception("Could not create handle for requested [%s] "
-                              "(open)." % (filepath))
+            _logger.exception(
+                "Could not create handle for requested [%s] " "(open)." % (filepath)
+            )
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not create OpenedFile object for "
-                                 "opened filepath [%s].", filepath)
+            _logger.exception(
+                "Could not create OpenedFile object for " "opened filepath [%s].",
+                filepath,
+            )
             raise FuseOSError(EIO)
 
         om = gdrivefs.opened_file.get_om()

          
@@ 383,8 396,7 @@ class _GdfsMixin:
         try:
             fh = om.add(opened_file)
         except:
-            _logger.exception("Could not register OpenedFile for opened "
-                              "file.")
+            _logger.exception("Could not register OpenedFile for opened " "file.")
 
             raise FuseOSError(EIO)
 

          
@@ 392,7 404,7 @@ class _GdfsMixin:
 
         return fh
 
-    @dec_hint(['filepath', 'fh'])
+    @dec_hint(["filepath", "fh"])
     def release(self, filepath, fh):
         """Close a file."""
 

          
@@ 401,12 413,13 @@ class _GdfsMixin:
         try:
             om.remove_by_fh(fh)
         except:
-            _logger.exception("Could not remove OpenedFile for handle with "
-                              "ID (%d) (release).", fh)
+            _logger.exception(
+                "Could not remove OpenedFile for handle with " "ID (%d) (release).", fh
+            )
 
             raise FuseOSError(EIO)
 
-    @dec_hint(['filepath', 'data', 'offset', 'fh'], ['data'])
+    @dec_hint(["filepath", "data", "offset", "fh"], ["data"])
     def write(self, filepath, data, offset, fh):
         om = gdrivefs.opened_file.get_om()
 

          
@@ 424,9 437,9 @@ class _GdfsMixin:
 
         return len(data)
 
-    @dec_hint(['filepath', 'fh'])
+    @dec_hint(["filepath", "fh"])
     def flush(self, filepath, fh):
-        
+
         om = gdrivefs.opened_file.get_om()
 
         try:

          
@@ 441,7 454,7 @@ class _GdfsMixin:
             _logger.exception("Could not flush local updates.")
             raise FuseOSError(EIO)
 
-    @dec_hint(['filepath'])
+    @dec_hint(["filepath"])
     def rmdir(self, filepath):
         """Remove a directory."""
 

          
@@ 453,8 466,9 @@ class _GdfsMixin:
             _logger.exception("Could not process [%s] (rmdir).", filepath)
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not get clause from file-path [%s] "
-                              "(rmdir).", filepath)
+            _logger.exception(
+                "Could not get clause from file-path [%s] " "(rmdir).", filepath
+            )
             raise FuseOSError(EIO)
 
         if not entry_clause:

          
@@ 467,8 481,9 @@ class _GdfsMixin:
         # Check if not a directory.
 
         if not normalized_entry.is_directory:
-            _logger.error("Can not rmdir() non-directory [%s] with ID [%s].", 
-                          filepath, entry_id)
+            _logger.error(
+                "Can not rmdir() non-directory [%s] with ID [%s].", filepath, entry_id
+            )
 
             raise FuseOSError(ENOTDIR)
 

          
@@ 477,12 492,12 @@ class _GdfsMixin:
         gd = get_gdrive()
 
         try:
-            found = gd.get_children_under_parent_id(
-                        entry_id,
-                        max_results=1)
+            found = gd.get_children_under_parent_id(entry_id, max_results=1)
         except:
-            _logger.exception("Could not determine if directory to be removed "
-                              "has children.", entry_id)
+            _logger.exception(
+                "Could not determine if directory to be removed " "has children.",
+                entry_id,
+            )
 
             raise FuseOSError(EIO)
 

          
@@ 494,39 509,41 @@ class _GdfsMixin:
         except (NameError):
             raise FuseOSError(ENOENT)
         except:
-            _logger.exception("Could not remove directory [%s] with ID [%s].",
-                              filepath, entry_id)
+            _logger.exception(
+                "Could not remove directory [%s] with ID [%s].", filepath, entry_id
+            )
 
             raise FuseOSError(EIO)
-# TODO: Remove from cache.
+
+    # TODO: Remove from cache.
 
     # Not supported. Google Drive doesn't fit within this model.
-    @dec_hint(['filepath', 'mode'])
+    @dec_hint(["filepath", "mode"])
     def chmod(self, filepath, mode):
         # Return successfully, or rsync might have a problem.
-#        raise FuseOSError(EPERM) # Operation not permitted.
+        #        raise FuseOSError(EPERM) # Operation not permitted.
         pass
 
     # Not supported. Google Drive doesn't fit within this model.
-    @dec_hint(['filepath', 'uid', 'gid'])
+    @dec_hint(["filepath", "uid", "gid"])
     def chown(self, filepath, uid, gid):
         # Return successfully, or rsync might have a problem.
-#        raise FuseOSError(EPERM) # Operation not permitted.
+        #        raise FuseOSError(EPERM) # Operation not permitted.
         pass
 
     # Not supported.
-    @dec_hint(['target', 'source'])
+    @dec_hint(["target", "source"])
     def symlink(self, target, source):
 
         raise FuseOSError(EPERM)
 
     # Not supported.
-    @dec_hint(['filepath'])
+    @dec_hint(["filepath"])
     def readlink(self, filepath):
 
         raise FuseOSError(EPERM)
 
-    @dec_hint(['filepath'])
+    @dec_hint(["filepath"])
     def statfs(self, filepath):
         """Return filesystem status info (for df).
 

          
@@ 549,31 566,24 @@ class _GdfsMixin:
 
         return {
             # Optimal transfer block size.
-            'f_bsize': block_size_b,
-
+            "f_bsize": block_size_b,
             # Total data blocks in file system.
-            'f_blocks': total,
-
+            "f_blocks": total,
             # Fragment size.
-            'f_frsize': block_size_b,
-
+            "f_frsize": block_size_b,
             # Free blocks in filesystem.
-            'f_bfree': free,
-
+            "f_bfree": free,
             # Free blocks avail to non-superuser.
-            'f_bavail': free
-
+            "f_bavail": free
             # Total file nodes in filesystem.
-#            'f_files': 0,
-
+            #            'f_files': 0,
             # Free file nodes in filesystem.
-#            'f_ffree': 0,
-
+            #            'f_ffree': 0,
             # Free inodes for unprivileged users.
-#            'f_favail': 0
+            #            'f_favail': 0
         }
 
-    @dec_hint(['filepath_old', 'filepath_new'])
+    @dec_hint(["filepath_old", "filepath_new"])
     def rename(self, filepath_old, filepath_new):
         # Make sure the old filepath exists.
         (entry, path, filename_old) = get_entry_or_raise(filepath_old)

          
@@ 607,7 617,7 @@ class _GdfsMixin:
             _logger.exception("Could not register renamed entry: %s", entry)
             raise FuseOSError(EIO)
 
-    @dec_hint(['filepath', 'length', 'fh'])
+    @dec_hint(["filepath", "length", "fh"])
     def truncate(self, filepath, length, fh=None):
         if fh is not None:
             om = gdrivefs.opened_file.get_om()

          
@@ 615,8 625,11 @@ class _GdfsMixin:
             try:
                 opened_file = om.get_by_fh(fh)
             except:
-                _logger.exception("Could not retrieve OpenedFile for handle "
-                                  "with ID (%d) (truncate).", fh)
+                _logger.exception(
+                    "Could not retrieve OpenedFile for handle "
+                    "with ID (%d) (truncate).",
+                    fh,
+                )
 
                 raise FuseOSError(EIO)
 

          
@@ 640,17 653,17 @@ class _GdfsMixin:
             _logger.exception("Could not truncate entry [%s].", entry)
             raise FuseOSError(EIO)
 
-# TODO(dustin): It would be a lot quicker if we truncate our temporary file 
-#               here, and make sure its mtime matches.
+    # TODO(dustin): It would be a lot quicker if we truncate our temporary file
+    #               here, and make sure its mtime matches.
 
-        # We don't need to update our internal representation of the file (just 
-        # our file-handle and its related buffering).
+    # We don't need to update our internal representation of the file (just
+    # our file-handle and its related buffering).
 
-    @dec_hint(['file_path'])
+    @dec_hint(["file_path"])
     def unlink(self, file_path):