From 12857c73ef7c46f6d01d0e9d0b995564eb0e013a Mon Sep 17 00:00:00 2001 From: Manuel Fritsch Date: Sun, 14 Mar 2021 11:52:46 +0100 Subject: [PATCH] "mqtt added" --- cbpi/__init__.py | 2 +- cbpi/api/dataclasses.py | 13 +- cbpi/api/sensor.py | 1 + cbpi/config/config.yaml | 6 + cbpi/controller/actor_controller.py | 9 +- cbpi/controller/basic_controller2.py | 4 +- cbpi/controller/dashboard_controller.py | 4 +- cbpi/controller/notification_controller.py | 32 +- cbpi/controller/satellite_controller.py | 127 +- cbpi/controller/step_controller.py | 3 +- cbpi/craftbeerpi.py | 21 +- cbpi/extension/dummyactor/__init__.py | 9 +- cbpi/extension/mashstep/__init__.py | 2 +- cbpi/extension/mqtt/__init__.py | 42 - cbpi/extension/mqtt/config.yaml | 3 - cbpi/extension/mqtt_sensor/__init__.py | 49 + cbpi/extension/mqtt_sensor/config.yaml | 3 + cbpi/http_endpoints/http_dashboard.py | 1 + venv3/bin/autopep8 | 11 + venv3/bin/pycodestyle | 11 + .../autopep8-1.5.5.dist-info/AUTHORS.rst | 48 + .../autopep8-1.5.5.dist-info/INSTALLER | 1 + .../autopep8-1.5.5.dist-info/LICENSE | 23 + .../autopep8-1.5.5.dist-info/METADATA | 453 ++ .../autopep8-1.5.5.dist-info/RECORD | 11 + .../autopep8-1.5.5.dist-info/WHEEL | 6 + .../autopep8-1.5.5.dist-info/entry_points.txt | 3 + .../autopep8-1.5.5.dist-info/top_level.txt | 1 + venv3/lib/python3.7/site-packages/autopep8.py | 4469 +++++++++++++++++ .../pycodestyle-2.6.0.dist-info/INSTALLER | 1 + .../pycodestyle-2.6.0.dist-info/LICENSE | 25 + .../pycodestyle-2.6.0.dist-info/METADATA | 1035 ++++ .../pycodestyle-2.6.0.dist-info/RECORD | 11 + .../pycodestyle-2.6.0.dist-info/WHEEL | 6 + .../entry_points.txt | 3 + .../namespace_packages.txt | 1 + .../pycodestyle-2.6.0.dist-info/top_level.txt | 1 + .../python3.7/site-packages/pycodestyle.py | 2763 ++++++++++ .../toml-0.10.2.dist-info/INSTALLER | 1 + .../toml-0.10.2.dist-info/LICENSE | 27 + .../toml-0.10.2.dist-info/METADATA | 255 + .../toml-0.10.2.dist-info/RECORD | 16 + .../site-packages/toml-0.10.2.dist-info/WHEEL | 6 + .../toml-0.10.2.dist-info/top_level.txt | 1 + .../python3.7/site-packages/toml/__init__.py | 25 + .../python3.7/site-packages/toml/decoder.py | 1057 ++++ .../python3.7/site-packages/toml/encoder.py | 304 ++ .../python3.7/site-packages/toml/ordered.py | 15 + venv3/lib/python3.7/site-packages/toml/tz.py | 24 + venv3/pip-selfcheck.json | 2 +- 50 files changed, 10834 insertions(+), 113 deletions(-) delete mode 100644 cbpi/extension/mqtt/__init__.py delete mode 100644 cbpi/extension/mqtt/config.yaml create mode 100644 cbpi/extension/mqtt_sensor/__init__.py create mode 100644 cbpi/extension/mqtt_sensor/config.yaml create mode 100755 venv3/bin/autopep8 create mode 100755 venv3/bin/pycodestyle create mode 100644 venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/AUTHORS.rst create mode 100644 venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/INSTALLER create mode 100644 venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/LICENSE create mode 100644 venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/METADATA create mode 100644 venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/RECORD create mode 100644 venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/WHEEL create mode 100644 venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/entry_points.txt create mode 100644 venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/top_level.txt create mode 100644 venv3/lib/python3.7/site-packages/autopep8.py create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/INSTALLER create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/LICENSE create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/METADATA create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/RECORD create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/WHEEL create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/entry_points.txt create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/namespace_packages.txt create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/top_level.txt create mode 100644 venv3/lib/python3.7/site-packages/pycodestyle.py create mode 100644 venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/INSTALLER create mode 100644 venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/LICENSE create mode 100644 venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/METADATA create mode 100644 venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/RECORD create mode 100644 venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/WHEEL create mode 100644 venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/top_level.txt create mode 100644 venv3/lib/python3.7/site-packages/toml/__init__.py create mode 100644 venv3/lib/python3.7/site-packages/toml/decoder.py create mode 100644 venv3/lib/python3.7/site-packages/toml/encoder.py create mode 100644 venv3/lib/python3.7/site-packages/toml/ordered.py create mode 100644 venv3/lib/python3.7/site-packages/toml/tz.py diff --git a/cbpi/__init__.py b/cbpi/__init__.py index 69ec4a5..5cd23c6 100644 --- a/cbpi/__init__.py +++ b/cbpi/__init__.py @@ -1 +1 @@ -__version__ = "4.0.0.31" \ No newline at end of file +__version__ = "4.0.0.32" \ No newline at end of file diff --git a/cbpi/api/dataclasses.py b/cbpi/api/dataclasses.py index f866d46..dcac674 100644 --- a/cbpi/api/dataclasses.py +++ b/cbpi/api/dataclasses.py @@ -60,7 +60,7 @@ class Actor: def __str__(self): return "name={} props={}, state={}, type={}".format(self.name, self.props, self.state, self.type) def to_dict(self): - return dict(id=self.id, name=self.name, type=self.type, props=self.props.to_dict(), state=self.instance.get_state()) + return dict(id=self.id, name=self.name, type=self.type, props=self.props.to_dict(), state2="HELLO WORLD", state=self.instance.get_state()) @dataclass @@ -149,4 +149,13 @@ class NotificationAction: id: str = None def to_dict(self): - return dict(id=self.id, label=self.label) \ No newline at end of file + return dict(id=self.id, label=self.label) + +class NotificationType(Enum): + INFO="info" + WARNING="warning" + ERROR="error" + SUCCESS="success" + + def __str__(self): + return self.value \ No newline at end of file diff --git a/cbpi/api/sensor.py b/cbpi/api/sensor.py index b1211c0..a71b75a 100644 --- a/cbpi/api/sensor.py +++ b/cbpi/api/sensor.py @@ -35,6 +35,7 @@ class CBPiSensor(CBPiBase, metaclass=ABCMeta): def push_update(self, value): try: self.cbpi.ws.send(dict(topic="sensorstate", id=self.id, value=value)) + self.cbpi.push_update("cbpi/sensor/{}/udpate".format(self.id), dict(id=self.id, value=value), retain=True) except: logging.error("Faild to push sensor update") diff --git a/cbpi/config/config.yaml b/cbpi/config/config.yaml index a7116f8..f7a5abe 100644 --- a/cbpi/config/config.yaml +++ b/cbpi/config/config.yaml @@ -6,6 +6,12 @@ index_url: /cbpi_ui/static/index.html port: 8000 +mqtt: false +mqtt_host: localhost +mqtt_port: 1883 +mqtt_username: "" +mqtt_password: "" + username: cbpi password: 123 diff --git a/cbpi/controller/actor_controller.py b/cbpi/controller/actor_controller.py index 67f26c9..e08b02e 100644 --- a/cbpi/controller/actor_controller.py +++ b/cbpi/controller/actor_controller.py @@ -8,15 +8,14 @@ class ActorController(BasicController): super(ActorController, self).__init__(cbpi, Actor,"actor.json") self.update_key = "actorupdate" - async def on(self, id): try: item = self.find_by_id(id) - if item.instance.state is False: await item.instance.on() await self.push_udpate() - #await self.cbpi.satellite.publish("cbpi/actor/on", "ACTOR ON") + self.cbpi.push_update("cbpi/actor/"+id, item.to_dict(), True) + except Exception as e: logging.error("Faild to switch on Actor {} {}".format(id, e)) @@ -26,14 +25,16 @@ class ActorController(BasicController): if item.instance.state is True: await item.instance.off() await self.push_udpate() + self.cbpi.push_update("cbpi/actor/"+id, item.to_dict()) except Exception as e: - logging.error("Faild to switch on Actor {} {}".format(id, e)) + logging.error("Faild to switch on Actor {} {}".format(id, e), True) async def toogle(self, id): try: item = self.find_by_id(id) instance = item.get("instance") await instance.toggle() + self.cbpi.push_update("cbpi/actor/update", item.to_dict()) except Exception as e: logging.error("Faild to switch on Actor {} {}".format(id, e)) \ No newline at end of file diff --git a/cbpi/controller/basic_controller2.py b/cbpi/controller/basic_controller2.py index 4ac86db..67d7b87 100644 --- a/cbpi/controller/basic_controller2.py +++ b/cbpi/controller/basic_controller2.py @@ -30,7 +30,6 @@ class BasicController: await self.load() def create(self, data): - return self.resource(data.get("id"), data.get("name"), type=data.get("type"), props=Props(data.get("props", {})) ) async def load(self): @@ -53,9 +52,10 @@ class BasicController: with open(self.path, "w") as file: json.dump(data, file, indent=4, sort_keys=True) await self.push_udpate() - + async def push_udpate(self): self.cbpi.ws.send(dict(topic=self.update_key, data=list(map(lambda item: item.to_dict(), self.data)))) + self.cbpi.push_update("cbpi/{}/update".format(self.update_key), list(map(lambda item: item.to_dict(), self.data))) def find_by_id(self, id): return next((item for item in self.data if item.id == id), None) diff --git a/cbpi/controller/dashboard_controller.py b/cbpi/controller/dashboard_controller.py index 2c130b8..7cde13a 100644 --- a/cbpi/controller/dashboard_controller.py +++ b/cbpi/controller/dashboard_controller.py @@ -1,3 +1,4 @@ +from cbpi.api.dataclasses import NotificationType import logging import json import os @@ -29,9 +30,10 @@ class DashboardController: return {} async def add_content(self, dashboard_id, data): + print(data) with open(self.path, 'w') as outfile: json.dump(data, outfile, indent=4, sort_keys=True) - self.cbpi.notify(title="Dashboard", message="Saved Successfully", type="success") + self.cbpi.notify(title="Dashboard", message="Saved Successfully", type=NotificationType.SUCCESS) return {"status": "OK"} async def delete_content(self, dashboard_id): diff --git a/cbpi/controller/notification_controller.py b/cbpi/controller/notification_controller.py index d7950af..0e9fbfe 100644 --- a/cbpi/controller/notification_controller.py +++ b/cbpi/controller/notification_controller.py @@ -1,18 +1,36 @@ import asyncio +from cbpi.api.dataclasses import NotificationType import logging import shortuuid class NotificationController: def __init__(self, cbpi): ''' - :param cbpi: craftbeerpi object ''' self.cbpi = cbpi self.logger = logging.getLogger(__name__) - self.callback_cache = {} + self.callback_cache = {} + self.listener = {} + + def add_listener(self, method): + listener_id = shortuuid.uuid() + self.listener[listener_id] = method + return listener_id - def notify(self, title, message: str, type: str = "info", action=[]) -> None: + def remove_listener(self, listener_id): + try: + del self.listener[listener_id] + except: + self.logger.error("Faild to remove listener {}".format(listener_id)) + + async def _call_listener(self, title, message, type, action): + for id, method in self.listener.items(): + print(id, method) + asyncio.create_task(method(self.cbpi, title, message, type, action )) + + + def notify(self, title, message: str, type: NotificationType = NotificationType.INFO, action=[]) -> None: ''' This is a convinience method to send notification to the client @@ -22,14 +40,18 @@ class NotificationController: :return: ''' notifcation_id = shortuuid.uuid() - + def prepare_action(item): item.id = shortuuid.uuid() return item.to_dict() actions = list(map(lambda item: prepare_action(item), action)) self.callback_cache[notifcation_id] = action - self.cbpi.ws.send(dict(id=notifcation_id, topic="notifiaction", type=type, title=title, message=message, action=actions)) + self.cbpi.ws.send(dict(id=notifcation_id, topic="notifiaction", type=type.value, title=title, message=message, action=actions)) + data = dict(type=type.value, title=title, message=message, action=actions) + self.cbpi.push_update(topic="cbpi/notification", data=data) + asyncio.create_task(self._call_listener(title, message, type, action)) + def notify_callback(self, notification_id, action_id) -> None: try: diff --git a/cbpi/controller/satellite_controller.py b/cbpi/controller/satellite_controller.py index 48c2214..6cd47b7 100644 --- a/cbpi/controller/satellite_controller.py +++ b/cbpi/controller/satellite_controller.py @@ -1,38 +1,82 @@ - - import asyncio - -from asyncio_mqtt import Client, MqttError, Will +import json +from re import M +from asyncio_mqtt import Client, MqttError, Will, client from contextlib import AsyncExitStack, asynccontextmanager +from cbpi import __version__ +import logging + class SatelliteController: def __init__(self, cbpi): self.cbpi = cbpi + self.logger = logging.getLogger(__name__) + self.host = cbpi.static_config.get("mqtt_host", "localhost") + self.port = cbpi.static_config.get("mqtt_port", 1883) + self.username = cbpi.static_config.get("mqtt_username", None) + self.password = cbpi.static_config.get("mqtt_password", None) self.client = None + self.topic_filters = [ + ("cbpi/actor/+/on", self._actor_on), + ("cbpi/actor/+/off", self._actor_off) + ] + self.tasks = set() async def init(self): asyncio.create_task(self.init_client(self.cbpi)) - async def publish(self, topic, message): - print("MQTT ON") - await self.client.publish(topic, message, qos=1) - - async def handle_message(self, messages): - async for message in messages: - print("FILTERED", message.payload.decode()) + async def publish(self, topic, message, retain=False): + if self.client is not None and self.client._connected: + try: + await self.client.publish(topic, message, qos=1, retain=retain) + except: + self.logger.warning("Faild to push data via mqtt") - async def handle_unfilterd_message(self, messages): + async def _actor_on(self, messages): async for message in messages: - print("UNFILTERED", message.payload.decode()) - + try: + topic_key = message.topic.split("/") + await self.cbpi.actor.on(topic_key[2]) + except: + self.logger.warning("Faild to process actor on via mqtt") + + async def _actor_off(self, messages): + async for message in messages: + try: + topic_key = message.topic.split("/") + await self.cbpi.actor.off(topic_key[2]) + except: + self.logger.warning("Faild to process actor off via mqtt") + + def subcribe(self, topic, method): + task = asyncio.create_task(self._subcribe(topic, method)) + return task + + async def _subcribe(self, topic, method): + while True: + try: + if self.client._connected.done(): + async with self.client.filtered_messages(topic) as messages: + await self.client.subscribe(topic) + async for message in messages: + await method(message.payload.decode()) + except asyncio.CancelledError as e: + # Cancel + self.logger.warning( + "Sub CancelledError Exception: {}".format(e)) + return + except MqttError as e: + self.logger.error("Sub MQTT Exception: {}".format(e)) + except Exception as e: + self.logger.error("Sub Exception: {}".format(e)) + + # wait before try to resubscribe + await asyncio.sleep(5) + async def init_client(self, cbpi): - async def log_messages(messages, template): - - async for message in messages: - print(template.format(message.payload.decode())) async def cancel_tasks(tasks): for task in tasks: @@ -44,31 +88,32 @@ class SatelliteController: except asyncio.CancelledError: pass - + while True: + try: + async with AsyncExitStack() as stack: + self.tasks = set() + stack.push_async_callback(cancel_tasks, self.tasks) + self.client = Client(self.host, port=self.port, username=self.username, password=self.password, will=Will(topic="cbpi/diconnect", payload="CBPi Server Disconnected")) - async with AsyncExitStack() as stack: + await stack.enter_async_context(self.client) - tasks = set() - stack.push_async_callback(cancel_tasks, tasks) + for topic_filter in self.topic_filters: + topic = topic_filter[0] + method = topic_filter[1] + manager = self.client.filtered_messages(topic) + messages = await stack.enter_async_context(manager) + task = asyncio.create_task(method(messages)) + self.tasks.add(task) - self.client = Client("localhost", will=Will(topic="cbpi/diconnect", payload="CBPi Server Disconnected")) - await stack.enter_async_context(self.client) + for topic_filter in self.topic_filters: + topic = topic_filter[0] + await self.client.subscribe(topic) - topic_filters = ( - "cbpi/sensor/#", - "cbpi/actor/#" - ) - for topic_filter in topic_filters: - # Log all messages that matches the filter - manager = self.client.filtered_messages(topic_filter) - messages = await stack.enter_async_context(manager) - task = asyncio.create_task(self.handle_message(messages)) - tasks.add(task) + self.logger.info("MQTT Connected to {}:{}".format(self.host, self.port)) + await asyncio.gather(*self.tasks) - messages = await stack.enter_async_context(self.client.unfiltered_messages()) - task = asyncio.create_task(self.handle_unfilterd_message(messages)) - tasks.add(task) - - await self.client.subscribe("cbpi/#") - await asyncio.gather(*tasks) - + except MqttError as e: + self.logger.error("MQTT Exception: {}".format(e)) + except Exception as e: + self.logger.error("MQTT General Exception: {}".format(e)) + await asyncio.sleep(5) diff --git a/cbpi/controller/step_controller.py b/cbpi/controller/step_controller.py index 081f9cc..f7d9c57 100644 --- a/cbpi/controller/step_controller.py +++ b/cbpi/controller/step_controller.py @@ -78,7 +78,6 @@ class StepController: try: type_cfg = self.types.get(item.type) clazz = type_cfg.get("class") - print("CLASS", clazz) item.instance = clazz(self.cbpi, item.id, item.name, item.props, self.done) except Exception as e: logging.warning("Failed to create step instance %s - %s " % (id, e)) @@ -258,6 +257,8 @@ class StepController: else: self.cbpi.ws.send(dict(topic="step_update", data=list(map(lambda item: item.to_dict(), self.profile)))) + self.cbpi.push_update(topic="cbpi/stepupdate", data=list(map(lambda item: item.to_dict(), self.profile))) + async def start_step(self,step): try: logging.info("Try to start step %s" % step) diff --git a/cbpi/craftbeerpi.py b/cbpi/craftbeerpi.py index a7d1f11..feeeb88 100644 --- a/cbpi/craftbeerpi.py +++ b/cbpi/craftbeerpi.py @@ -1,4 +1,8 @@ +import asyncio +import json +from voluptuous.schema_builder import message +from cbpi.api.dataclasses import NotificationType from cbpi.controller.notification_controller import NotificationController import logging from os import urandom @@ -22,6 +26,7 @@ from cbpi.controller.sensor_controller import SensorController from cbpi.controller.step_controller import StepController from cbpi.controller.recipe_controller import RecipeController from cbpi.controller.system_controller import SystemController +from cbpi.controller.satellite_controller import SatelliteController from cbpi.controller.log_file_controller import LogController @@ -102,7 +107,10 @@ class CraftBeerPi: self.step : StepController = StepController(self) self.recipe : RecipeController = RecipeController(self) self.notification : NotificationController = NotificationController(self) - #self.satellite: SatelliteController = SatelliteController(self) + self.satellite = None + if self.static_config.get("mqtt", False) is True: + self.satellite: SatelliteController = SatelliteController(self) + self.dashboard = DashboardController(self) self.http_step = StepHttpEndpoints(self) self.http_recipe = RecipeHttpEndpoints(self) @@ -212,9 +220,11 @@ class CraftBeerPi: - def notify(self, title: str, message: str, type: str = "info", action=[]) -> None: + def notify(self, title: str, message: str, type: NotificationType = NotificationType.INFO, action=[]) -> None: self.notification.notify(title, message, type, action) - + + def push_update(self, topic, data, retain=False) -> None: + asyncio.create_task(self.satellite.publish(topic=topic, message=json.dumps(data), retain=retain)) async def call_initializer(self, app): self.initializer = sorted(self.initializer, key=lambda k: k['order']) @@ -249,6 +259,8 @@ class CraftBeerPi: await self.job.init() await self.config.init() + if self.satellite is not None: + await self.satellite.init() self._setup_http_index() self.plugin.load_plugins() self.plugin.load_plugins_from_evn() @@ -259,7 +271,8 @@ class CraftBeerPi: await self.kettle.init() await self.call_initializer(self.app) await self.dashboard.init() - #await self.satellite.init() + + self._swagger_setup() return self.app diff --git a/cbpi/extension/dummyactor/__init__.py b/cbpi/extension/dummyactor/__init__.py index f0c5ab4..cc8a0cf 100644 --- a/cbpi/extension/dummyactor/__init__.py +++ b/cbpi/extension/dummyactor/__init__.py @@ -2,7 +2,7 @@ from socket import timeout from typing import KeysView from voluptuous.schema_builder import message -from cbpi.api.dataclasses import NotificationAction +from cbpi.api.dataclasses import NotificationAction, NotificationType import logging from unittest.mock import MagicMock, patch from datetime import datetime @@ -17,10 +17,9 @@ class DummyActor(CBPiActor): def __init__(self, cbpi, id, props): super().__init__(cbpi, id, props) - async def yes(self, **kwargs): - print("YES!") - await self.cbpi.step.next() - + @action("SAY HELLO", {}) + async def helloWorld(self, **kwargs): + self.cbpi.notify("HELLO", "WOOHO", NotificationType.ERROR) async def start(self): await super().start() diff --git a/cbpi/extension/mashstep/__init__.py b/cbpi/extension/mashstep/__init__.py index 90d55f6..b4547ef 100644 --- a/cbpi/extension/mashstep/__init__.py +++ b/cbpi/extension/mashstep/__init__.py @@ -38,7 +38,7 @@ class MashStep(CBPiStep): self.timer = Timer(int(self.props.Timer) *60 ,on_update=self.on_timer_update, on_done=self.on_timer_done) async def run(self): - while self.running == True: + while True: await asyncio.sleep(1) sensor_value = self.get_sensor_value(self.props.Sensor) if sensor_value.get("value") >= int(self.props.Temp) and self.timer.is_running is not True: diff --git a/cbpi/extension/mqtt/__init__.py b/cbpi/extension/mqtt/__init__.py deleted file mode 100644 index f050344..0000000 --- a/cbpi/extension/mqtt/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -import json - -from cbpi.utils.encoder import ComplexEncoder -from hbmqtt.mqtt.constants import QOS_0 -from hbmqtt.client import MQTTClient -from hbmqtt.mqtt.constants import QOS_1, QOS_2 -from asyncio_mqtt import Client, MqttError, Will -import asyncio - -class CBPiMqttClient: - def __init__(self, cbpi): - self.cbpi = cbpi - self.cbpi.bus.register("#", self.listen) - self.client = None - self._loop = asyncio.get_event_loop() - self._loop.create_task(self.init_client(self.cbpi)) - - async def init_client(self, cbpi): - - async with Client("localhost", will=Will(topic="cbpi/diconnect", payload="MY CLIENT"))as client: - async with client.filtered_messages("cbpi/#") as messages: - await client.subscribe("cbpi/#") - async for message in messages: - await self.cbpi.actor.on("YwGzXvWMpmbLb6XobesL8n") - - - - async def listen(self, topic, **kwargs): - if self.client is not None: - await self.client.publish(topic, str.encode(json.dumps(kwargs, cls=ComplexEncoder)), QOS_0) - -def setup(cbpi): - ''' - This method is called by the server during startup - Here you need to register your plugins at the server - - :param cbpi: the cbpi core - :return: - ''' - - client = CBPiMqttClient(cbpi) - diff --git a/cbpi/extension/mqtt/config.yaml b/cbpi/extension/mqtt/config.yaml deleted file mode 100644 index b10dc5d..0000000 --- a/cbpi/extension/mqtt/config.yaml +++ /dev/null @@ -1,3 +0,0 @@ -name: MQTT -version: 4 -active: false \ No newline at end of file diff --git a/cbpi/extension/mqtt_sensor/__init__.py b/cbpi/extension/mqtt_sensor/__init__.py new file mode 100644 index 0000000..4b6f389 --- /dev/null +++ b/cbpi/extension/mqtt_sensor/__init__.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- +import asyncio +import random +import re +from aiohttp import web +from cbpi.api import * + + +@parameters([Property.Text(label="Topic", configurable=True)]) +class MQTTSensor(CBPiSensor): + + async def on_message(self, message): + try: + self.value = message + self.log_data(self.value) + self.push_update(message) + except Exception as e: + print(e) + + def __init__(self, cbpi, id, props): + super(MQTTSensor, self).__init__(cbpi, id, props) + self.mqtt_task = self.cbpi.satellite.subcribe(self.props.Topic, self.on_message) + + async def run(self): + while self.running == True: + await asyncio.sleep(1) + + def get_state(self): + return dict(value=self.value) + + async def on_stop(self): + if self.mqtt_task.done() is False: + self.mqtt_task.cancel() + try: + await self.mqtt_task + except asyncio.CancelledError: + pass + +def setup(cbpi): + + ''' + This method is called by the server during startup + Here you need to register your plugins at the server + + :param cbpi: the cbpi core + :return: + ''' + if cbpi.static_config.get("mqtt", False) is True: + cbpi.plugin.register("MQTTSensor", MQTTSensor) diff --git a/cbpi/extension/mqtt_sensor/config.yaml b/cbpi/extension/mqtt_sensor/config.yaml new file mode 100644 index 0000000..7f0aa15 --- /dev/null +++ b/cbpi/extension/mqtt_sensor/config.yaml @@ -0,0 +1,3 @@ +name: DummySensor +version: 4 +active: true \ No newline at end of file diff --git a/cbpi/http_endpoints/http_dashboard.py b/cbpi/http_endpoints/http_dashboard.py index 891fd0d..d28ba5e 100644 --- a/cbpi/http_endpoints/http_dashboard.py +++ b/cbpi/http_endpoints/http_dashboard.py @@ -69,6 +69,7 @@ class DashBoardHttpEndpoints: data = await request.json() dashboard_id = int(request.match_info['id']) await self.cbpi.dashboard.add_content(dashboard_id, data) + print("##### SAVE") return web.Response(status=204) @request_mapping(path="/{id:\d+}/content", method="DELETE", auth_required=False) diff --git a/venv3/bin/autopep8 b/venv3/bin/autopep8 new file mode 100755 index 0000000..667fcc6 --- /dev/null +++ b/venv3/bin/autopep8 @@ -0,0 +1,11 @@ +#!/Users/manuelfritsch/Documents/git/cbpi4/venv3/bin/python3 + +# -*- coding: utf-8 -*- +import re +import sys + +from autopep8 import main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(main()) diff --git a/venv3/bin/pycodestyle b/venv3/bin/pycodestyle new file mode 100755 index 0000000..dd801cf --- /dev/null +++ b/venv3/bin/pycodestyle @@ -0,0 +1,11 @@ +#!/Users/manuelfritsch/Documents/git/cbpi4/venv3/bin/python3 + +# -*- coding: utf-8 -*- +import re +import sys + +from pycodestyle import _main + +if __name__ == '__main__': + sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) + sys.exit(_main()) diff --git a/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/AUTHORS.rst b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/AUTHORS.rst new file mode 100644 index 0000000..e2781e4 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/AUTHORS.rst @@ -0,0 +1,48 @@ +Main contributors +----------------- +- Hideo Hattori (https://github.com/hhatto) +- Steven Myint (https://github.com/myint) +- Bill Wendling (https://github.com/gwelymernans) + +Patches +------- +- Fraser Tweedale (https://github.com/frasertweedale) +- clach04 (https://github.com/clach04) +- Marc Abramowitz (https://github.com/msabramo) +- dellis23 (https://github.com/dellis23) +- Sam Vilain (https://github.com/samv) +- Florent Xicluna (https://github.com/florentx) +- Andras Tim (https://github.com/andras-tim) +- tomscytale (https://github.com/tomscytale) +- Filip Noetzel (https://github.com/peritus) +- Erik Bray (https://github.com/iguananaut) +- Christopher Medrela (https://github.com/chrismedrela) +- 小明 (https://github.com/dongweiming) +- Andy Hayden (https://github.com/hayd) +- Fabio Zadrozny (https://github.com/fabioz) +- Alex Chernetz (https://github.com/achernet) +- Marc Schlaich (https://github.com/schlamar) +- E. M. Bray (https://github.com/embray) +- Thomas Hisch (https://github.com/thisch) +- Florian Best (https://github.com/spaceone) +- Ian Clark (https://github.com/evenicoulddoit) +- Khairi Hafsham (https://github.com/khairihafsham) +- Neil Halelamien (https://github.com/neilsh) +- Hashem Nasarat (https://github.com/Hnasar) +- Hugo van Kemenade (https://github.com/hugovk) +- gmbnomis (https://github.com/gmbnomis) +- Samuel Lelièvre (https://github.com/slel) +- bigredengineer (https://github.com/bigredengineer) +- Kai Chen (https://github.com/kx-chen) +- Anthony Sottile (https://github.com/asottile) +- 秋葉 (https://github.com/Hanaasagi) +- Christian Clauss (https://github.com/cclauss) +- tobixx (https://github.com/tobixx) +- bigredengineer (https://github.com/bigredengineer) +- Bastien Gérard (https://github.com/bagerard) +- nicolasbonifas (https://github.com/nicolasbonifas) +- Andrii Yurchuk (https://github.com/Ch00k) +- José M. Guisado (https://github.com/pvxe) +- Dai Truong (https://github.com/NovaDev94) +- jnozsc (https://github.com/jnozsc) +- Edwin Shepherd (https://github.com/shardros) diff --git a/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/INSTALLER b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/LICENSE b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/LICENSE new file mode 100644 index 0000000..df9738f --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/LICENSE @@ -0,0 +1,23 @@ +Copyright (C) 2010-2011 Hideo Hattori +Copyright (C) 2011-2013 Hideo Hattori, Steven Myint +Copyright (C) 2013-2016 Hideo Hattori, Steven Myint, Bill Wendling + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/METADATA b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/METADATA new file mode 100644 index 0000000..ad9ea39 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/METADATA @@ -0,0 +1,453 @@ +Metadata-Version: 2.1 +Name: autopep8 +Version: 1.5.5 +Summary: A tool that automatically formats Python code to conform to the PEP 8 style guide +Home-page: https://github.com/hhatto/autopep8 +Author: Hideo Hattori +Author-email: hhatto.jp@gmail.com +License: Expat License +Keywords: automation,pep8,format,pycodestyle +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Software Development :: Quality Assurance +Requires-Dist: pycodestyle (>=2.6.0) +Requires-Dist: toml + +======== +autopep8 +======== + +.. image:: https://img.shields.io/pypi/v/autopep8.svg + :target: https://pypi.org/project/autopep8/ + :alt: PyPI Version + +.. image:: https://github.com/hhatto/autopep8/workflows/Python%20package/badge.svg + :target: https://github.com/hhatto/autopep8/actions + :alt: Build status + +.. image:: https://codecov.io/gh/hhatto/autopep8/branch/master/graph/badge.svg + :target: https://codecov.io/gh/hhatto/autopep8 + :alt: Code Coverage + +autopep8 automatically formats Python code to conform to the `PEP 8`_ style +guide. It uses the pycodestyle_ utility to determine what parts of the code +needs to be formatted. autopep8 is capable of fixing most of the formatting +issues_ that can be reported by pycodestyle. + +.. _PEP 8: https://www.python.org/dev/peps/pep-0008/ +.. _issues: https://pycodestyle.readthedocs.org/en/latest/intro.html#error-codes + +.. contents:: + + +Installation +============ + +From pip:: + + $ pip install --upgrade autopep8 + +Consider using the ``--user`` option_. + +.. _option: https://pip.pypa.io/en/latest/user_guide/#user-installs + + +Requirements +============ + +autopep8 requires pycodestyle_. + +.. _pycodestyle: https://github.com/PyCQA/pycodestyle + + +Usage +===== + +To modify a file in place (with aggressive level 2):: + + $ autopep8 --in-place --aggressive --aggressive + +Before running autopep8. + +.. code-block:: python + + import math, sys; + + def example1(): + ####This is a long comment. This should be wrapped to fit within 72 characters. + some_tuple=( 1,2, 3,'a' ); + some_variable={'long':'Long code lines should be wrapped within 79 characters.', + 'other':[math.pi, 100,200,300,9876543210,'This is a long string that goes on'], + 'more':{'inner':'This whole logical line should be wrapped.',some_tuple:[1, + 20,300,40000,500000000,60000000000000000]}} + return (some_tuple, some_variable) + def example2(): return {'has_key() is deprecated':True}.has_key({'f':2}.has_key('')); + class Example3( object ): + def __init__ ( self, bar ): + #Comments should have a space after the hash. + if bar : bar+=1; bar=bar* bar ; return bar + else: + some_string = """ + Indentation in multiline strings should not be touched. + Only actual code should be reindented. + """ + return (sys.path, some_string) + +After running autopep8. + +.. code-block:: python + + import math + import sys + + + def example1(): + # This is a long comment. This should be wrapped to fit within 72 + # characters. + some_tuple = (1, 2, 3, 'a') + some_variable = { + 'long': 'Long code lines should be wrapped within 79 characters.', + 'other': [ + math.pi, + 100, + 200, + 300, + 9876543210, + 'This is a long string that goes on'], + 'more': { + 'inner': 'This whole logical line should be wrapped.', + some_tuple: [ + 1, + 20, + 300, + 40000, + 500000000, + 60000000000000000]}} + return (some_tuple, some_variable) + + + def example2(): return ('' in {'f': 2}) in {'has_key() is deprecated': True} + + + class Example3(object): + def __init__(self, bar): + # Comments should have a space after the hash. + if bar: + bar += 1 + bar = bar * bar + return bar + else: + some_string = """ + Indentation in multiline strings should not be touched. + Only actual code should be reindented. + """ + return (sys.path, some_string) + +Options:: + + usage: autopep8 [-h] [--version] [-v] [-d] [-i] [--global-config filename] + [--ignore-local-config] [-r] [-j n] [-p n] [-a] + [--experimental] [--exclude globs] [--list-fixes] + [--ignore errors] [--select errors] [--max-line-length n] + [--line-range line line] [--hang-closing] [--exit-code] + [files [files ...]] + + Automatically formats Python code to conform to the PEP 8 style guide. + + positional arguments: + files files to format or '-' for standard in + + optional arguments: + -h, --help show this help message and exit + --version show program's version number and exit + -v, --verbose print verbose messages; multiple -v result in more + verbose messages + -d, --diff print the diff for the fixed source + -i, --in-place make changes to files in place + --global-config filename + path to a global pep8 config file; if this file does + not exist then this is ignored (default: + ~/.config/pep8) + --ignore-local-config + don't look for and apply local config files; if not + passed, defaults are updated with any config files in + the project's root directory + -r, --recursive run recursively over directories; must be used with + --in-place or --diff + -j n, --jobs n number of parallel jobs; match CPU count if value is + less than 1 + -p n, --pep8-passes n + maximum number of additional pep8 passes (default: + infinite) + -a, --aggressive enable non-whitespace changes; multiple -a result in + more aggressive changes + --experimental enable experimental fixes + --exclude globs exclude file/directory names that match these comma- + separated globs + --list-fixes list codes for fixes; used by --ignore and --select + --ignore errors do not fix these errors/warnings (default: + E226,E24,W50,W690) + --select errors fix only these errors/warnings (e.g. E4,W) + --max-line-length n set maximum allowed line length (default: 79) + --line-range line line, --range line line + only fix errors found within this inclusive range of + line numbers (e.g. 1 99); line numbers are indexed at + 1 + --hang-closing hang-closing option passed to pycodestyle + --exit-code change to behavior of exit code. default behavior of + return value, 0 is no differences, 1 is error exit. + return 2 when add this option. 2 is exists + differences. + + +Features +======== + +autopep8 fixes the following issues_ reported by pycodestyle_:: + + E101 - Reindent all lines. + E11 - Fix indentation. + E121 - Fix indentation to be a multiple of four. + E122 - Add absent indentation for hanging indentation. + E123 - Align closing bracket to match opening bracket. + E124 - Align closing bracket to match visual indentation. + E125 - Indent to distinguish line from next logical line. + E126 - Fix over-indented hanging indentation. + E127 - Fix visual indentation. + E128 - Fix visual indentation. + E129 - Fix visual indentation. + E131 - Fix hanging indent for unaligned continuation line. + E133 - Fix missing indentation for closing bracket. + E20 - Remove extraneous whitespace. + E211 - Remove extraneous whitespace. + E22 - Fix extraneous whitespace around keywords. + E224 - Remove extraneous whitespace around operator. + E225 - Fix missing whitespace around operator. + E226 - Fix missing whitespace around arithmetic operator. + E227 - Fix missing whitespace around bitwise/shift operator. + E228 - Fix missing whitespace around modulo operator. + E231 - Add missing whitespace. + E241 - Fix extraneous whitespace around keywords. + E242 - Remove extraneous whitespace around operator. + E251 - Remove whitespace around parameter '=' sign. + E252 - Missing whitespace around parameter equals. + E26 - Fix spacing after comment hash for inline comments. + E265 - Fix spacing after comment hash for block comments. + E266 - Fix too many leading '#' for block comments. + E27 - Fix extraneous whitespace around keywords. + E301 - Add missing blank line. + E302 - Add missing 2 blank lines. + E303 - Remove extra blank lines. + E304 - Remove blank line following function decorator. + E305 - Expected 2 blank lines after end of function or class. + E306 - Expected 1 blank line before a nested definition. + E401 - Put imports on separate lines. + E402 - Fix module level import not at top of file + E501 - Try to make lines fit within --max-line-length characters. + E502 - Remove extraneous escape of newline. + E701 - Put colon-separated compound statement on separate lines. + E70 - Put semicolon-separated compound statement on separate lines. + E711 - Fix comparison with None. + E712 - Fix comparison with boolean. + E713 - Use 'not in' for test for membership. + E714 - Use 'is not' test for object identity. + E721 - Use "isinstance()" instead of comparing types directly. + E722 - Fix bare except. + E731 - Use a def when use do not assign a lambda expression. + W291 - Remove trailing whitespace. + W292 - Add a single newline at the end of the file. + W293 - Remove trailing whitespace on blank line. + W391 - Remove trailing blank lines. + W503 - Fix line break before binary operator. + W504 - Fix line break after binary operator. + W601 - Use "in" rather than "has_key()". + W602 - Fix deprecated form of raising exception. + W603 - Use "!=" instead of "<>" + W604 - Use "repr()" instead of backticks. + W605 - Fix invalid escape sequence 'x'. + W690 - Fix various deprecated code (via lib2to3). + +autopep8 also fixes some issues not found by pycodestyle_. + +- Correct deprecated or non-idiomatic Python code (via ``lib2to3``). Use this + for making Python 2.7 code more compatible with Python 3. (This is triggered + if ``W690`` is enabled.) +- Normalize files with mixed line endings. +- Put a blank line between a class docstring and its first method + declaration. (Enabled with ``E301``.) +- Remove blank lines between a function declaration and its docstring. (Enabled + with ``E303``.) + +autopep8 avoids fixing some issues found by pycodestyle_. + +- ``E112``/``E113`` for non comments are reports of bad indentation that break + syntax rules. These should not be modified at all. +- ``E265``, which refers to spacing after comment hash, is ignored if the + comment looks like code. autopep8 avoids modifying these since they are not + real comments. If you really want to get rid of the pycodestyle_ warning, + consider just removing the commented-out code. (This can be automated via + eradicate_.) + +.. _eradicate: https://github.com/myint/eradicate + + +More advanced usage +=================== + +By default autopep8 only makes whitespace changes. Thus, by default, it does +not fix ``E711`` and ``E712``. (Changing ``x == None`` to ``x is None`` may +change the meaning of the program if ``x`` has its ``__eq__`` method +overridden.) Nor does it correct deprecated code ``W6``. To enable these +more aggressive fixes, use the ``--aggressive`` option:: + + $ autopep8 --aggressive + +Use multiple ``--aggressive`` to increase the aggressiveness level. For +example, ``E712`` requires aggressiveness level 2 (since ``x == True`` could be +changed to either ``x`` or ``x is True``, but autopep8 chooses the former). + +``--aggressive`` will also shorten lines more aggressively. It will also remove +trailing whitespace more aggressively. (Usually, we don't touch trailing +whitespace in docstrings and other multiline strings. And to do even more +aggressive changes to docstrings, use docformatter_.) + +.. _docformatter: https://github.com/myint/docformatter + +To enable only a subset of the fixes, use the ``--select`` option. For example, +to fix various types of indentation issues:: + + $ autopep8 --select=E1,W1 + +Similarly, to just fix deprecated code:: + + $ autopep8 --aggressive --select=W6 + +The above is useful when trying to port a single code base to work with both +Python 2 and Python 3 at the same time. + +If the file being fixed is large, you may want to enable verbose progress +messages:: + + $ autopep8 -v + +Passing in ``--experimental`` enables the following functionality: + +- Shortens code lines by taking its length into account + +:: + +$ autopep8 --experimental + +Use as a module +=============== + +The simplest way of using autopep8 as a module is via the ``fix_code()`` +function: + + >>> import autopep8 + >>> autopep8.fix_code('x= 123\n') + 'x = 123\n' + +Or with options: + + >>> import autopep8 + >>> autopep8.fix_code('x.has_key(y)\n', + ... options={'aggressive': 1}) + 'y in x\n' + >>> autopep8.fix_code('print( 123 )\n', + ... options={'ignore': ['E']}) + 'print( 123 )\n' + + +Configuration +============= + +By default, if ``$HOME/.config/pycodestyle`` (``~\.pycodestyle`` in Windows +environment) exists, it will be used as global configuration file. +Alternatively, you can specify the global configuration file with the +``--global-config`` option. + +Also, if ``setup.cfg``, ``tox.ini``, ``.pep8`` and ``.flake8`` files exist +in the directory where the target file exists, it will be used as the +configuration file. + +``pep8``, ``pycodestyle``, and ``flake8`` can be used as a section. + +configuration file example:: + + [pycodestyle] + max_line_length = 120 + ignore = E501 + +pyproject.toml +-------------- + +autopep8 can also use ``pyproject.toml``. +section must use ``[tool.autopep8]``, and ``pyproject.toml`` takes precedence +over any other configuration files. + +configuration file example:: + + [tool.autopep8] + max_line_length = 120 + ignore = "E501,W6" # or ["E501", "W6"] + + +Testing +======= + +Test cases are in ``test/test_autopep8.py``. They can be run directly via +``python test/test_autopep8.py`` or via tox_. The latter is useful for +testing against multiple Python interpreters. (We currently test against +CPython versions 2.7, 3.6 3.7 and 3.8. We also test against PyPy.) + +.. _`tox`: https://pypi.org/project/tox/ + +Broad spectrum testing is available via ``test/acid.py``. This script runs +autopep8 against Python code and checks for correctness and completeness of the +code fixes. It can check that the bytecode remains identical. +``test/acid_pypi.py`` makes use of ``acid.py`` to test against the latest +released packages on PyPI. + + +Troubleshooting +=============== + +``pkg_resources.DistributionNotFound`` +-------------------------------------- + +If you are using an ancient version of ``setuptools``, you might encounter +``pkg_resources.DistributionNotFound`` when trying to run ``autopep8``. Try +upgrading ``setuptools`` to workaround this ``setuptools`` problem:: + + $ pip install --upgrade setuptools + +Use ``sudo`` if you are installing to the system. + + +Links +===== + +* PyPI_ +* GitHub_ +* `Travis CI`_ +* Coveralls_ + +.. _PyPI: https://pypi.org/project/autopep8/ +.. _GitHub: https://github.com/hhatto/autopep8 +.. _`Travis CI`: https://travis-ci.org/hhatto/autopep8 +.. _`Coveralls`: https://coveralls.io/r/hhatto/autopep8 + + diff --git a/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/RECORD b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/RECORD new file mode 100644 index 0000000..2bedd2c --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/RECORD @@ -0,0 +1,11 @@ +autopep8.py,sha256=xHkIfzd5IPgtTeyNq19o2SLHF28bregdzXEcx3G7ucQ,151970 +autopep8-1.5.5.dist-info/AUTHORS.rst,sha256=tiTPsbzGl9dtXCMEWXbWSV1zan1M-BoWtiixs46GIWk,2003 +autopep8-1.5.5.dist-info/LICENSE,sha256=jR0COOSFQ0QZFMqwdB1N4-Bwobg2f3h69fIJr7YLCWo,1181 +autopep8-1.5.5.dist-info/METADATA,sha256=3IbGpS9FlKP4rzVE6EXbN-0O4D2AZoZcgaXEyYpAq3c,16661 +autopep8-1.5.5.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 +autopep8-1.5.5.dist-info/entry_points.txt,sha256=iHNa5_cSXw2ablVbRmfiFGMG1CNrpEPRCEjn3nspaJ8,44 +autopep8-1.5.5.dist-info/top_level.txt,sha256=s2x-di3QBwGxr7kd5xErt2pom8dsFRdINbmwsOEgLfU,9 +autopep8-1.5.5.dist-info/RECORD,, +../../../bin/autopep8,sha256=nvzzNDnv7luxQekE2qaJPtL_f3kgvxUAJNKivQVar48,253 +autopep8-1.5.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +__pycache__/autopep8.cpython-37.pyc,, diff --git a/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/WHEEL b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/WHEEL new file mode 100644 index 0000000..ef99c6c --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/entry_points.txt b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/entry_points.txt new file mode 100644 index 0000000..e3b2c4f --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +autopep8 = autopep8:main + diff --git a/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/top_level.txt b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/top_level.txt new file mode 100644 index 0000000..d81c0c2 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8-1.5.5.dist-info/top_level.txt @@ -0,0 +1 @@ +autopep8 diff --git a/venv3/lib/python3.7/site-packages/autopep8.py b/venv3/lib/python3.7/site-packages/autopep8.py new file mode 100644 index 0000000..8abc918 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/autopep8.py @@ -0,0 +1,4469 @@ +#!/usr/bin/env python + +# Copyright (C) 2010-2011 Hideo Hattori +# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint +# Copyright (C) 2013-2016 Hideo Hattori, Steven Myint, Bill Wendling +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Automatically formats Python code to conform to the PEP 8 style guide. + +Fixes that only need be done once can be added by adding a function of the form +"fix_(source)" to this module. They should return the fixed source code. +These fixes are picked up by apply_global_fixes(). + +Fixes that depend on pycodestyle should be added as methods to FixPEP8. See the +class documentation for more information. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import argparse +import codecs +import collections +import copy +import difflib +import fnmatch +import inspect +import io +import itertools +import keyword +import locale +import os +import re +import signal +import sys +import textwrap +import token +import tokenize +import warnings +import ast +try: + from configparser import ConfigParser as SafeConfigParser + from configparser import Error +except ImportError: + from ConfigParser import SafeConfigParser + from ConfigParser import Error + +import pycodestyle +from pycodestyle import STARTSWITH_INDENT_STATEMENT_REGEX + + +try: + unicode +except NameError: + unicode = str + + +__version__ = '1.5.5' + + +CR = '\r' +LF = '\n' +CRLF = '\r\n' + + +PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$') +LAMBDA_REGEX = re.compile(r'([\w.]+)\s=\slambda\s*([\(\)=\w,\s.]*):') +COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+([^][)(}{]+?)\s+(in|is)\s') +COMPARE_NEGATIVE_REGEX_THROUGH = re.compile(r'\b(not\s+in|is\s+not)\s') +BARE_EXCEPT_REGEX = re.compile(r'except\s*:') +STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\s.*\):') +DOCSTRING_START_REGEX = re.compile(r'^u?r?(?P["\']{3})') +ENABLE_REGEX = re.compile(r'# *(fmt|autopep8): *on') +DISABLE_REGEX = re.compile(r'# *(fmt|autopep8): *off') + +EXIT_CODE_OK = 0 +EXIT_CODE_ERROR = 1 +EXIT_CODE_EXISTS_DIFF = 2 + +# For generating line shortening candidates. +SHORTEN_OPERATOR_GROUPS = frozenset([ + frozenset([',']), + frozenset(['%']), + frozenset([',', '(', '[', '{']), + frozenset(['%', '(', '[', '{']), + frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']), + frozenset(['%', '+', '-', '*', '/', '//']), +]) + + +DEFAULT_IGNORE = 'E226,E24,W50,W690' # TODO: use pycodestyle.DEFAULT_IGNORE +DEFAULT_INDENT_SIZE = 4 +# these fixes conflict with each other, if the `--ignore` setting causes both +# to be enabled, disable both of them +CONFLICTING_CODES = ('W503', 'W504') + +SELECTED_GLOBAL_FIXED_METHOD_CODES = ['W602', ] + +# W602 is handled separately due to the need to avoid "with_traceback". +CODE_TO_2TO3 = { + 'E231': ['ws_comma'], + 'E721': ['idioms'], + 'W601': ['has_key'], + 'W603': ['ne'], + 'W604': ['repr'], + 'W690': ['apply', + 'except', + 'exitfunc', + 'numliterals', + 'operator', + 'paren', + 'reduce', + 'renames', + 'standarderror', + 'sys_exc', + 'throw', + 'tuple_params', + 'xreadlines']} + + +if sys.platform == 'win32': # pragma: no cover + DEFAULT_CONFIG = os.path.expanduser(r'~\.pycodestyle') +else: + DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or + os.path.expanduser('~/.config'), + 'pycodestyle') +# fallback, use .pep8 +if not os.path.exists(DEFAULT_CONFIG): # pragma: no cover + if sys.platform == 'win32': + DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') + else: + DEFAULT_CONFIG = os.path.join(os.path.expanduser('~/.config'), 'pep8') +PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8', '.flake8') + + +MAX_PYTHON_FILE_DETECTION_BYTES = 1024 + + +def open_with_encoding(filename, mode='r', encoding=None, limit_byte_check=-1): + """Return opened file with a specific encoding.""" + if not encoding: + encoding = detect_encoding(filename, limit_byte_check=limit_byte_check) + + return io.open(filename, mode=mode, encoding=encoding, + newline='') # Preserve line endings + + +def detect_encoding(filename, limit_byte_check=-1): + """Return file encoding.""" + try: + with open(filename, 'rb') as input_file: + from lib2to3.pgen2 import tokenize as lib2to3_tokenize + encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0] + + with open_with_encoding(filename, encoding=encoding) as test_file: + test_file.read(limit_byte_check) + + return encoding + except (LookupError, SyntaxError, UnicodeDecodeError): + return 'latin-1' + + +def readlines_from_file(filename): + """Return contents of file.""" + with open_with_encoding(filename) as input_file: + return input_file.readlines() + + +def extended_blank_lines(logical_line, + blank_lines, + blank_before, + indent_level, + previous_logical): + """Check for missing blank lines after class declaration.""" + if previous_logical.startswith('def '): + if blank_lines and pycodestyle.DOCSTRING_REGEX.match(logical_line): + yield (0, 'E303 too many blank lines ({})'.format(blank_lines)) + elif pycodestyle.DOCSTRING_REGEX.match(previous_logical): + # Missing blank line between class docstring and method declaration. + if ( + indent_level and + not blank_lines and + not blank_before and + logical_line.startswith(('def ')) and + '(self' in logical_line + ): + yield (0, 'E301 expected 1 blank line, found 0') + + +pycodestyle.register_check(extended_blank_lines) + + +def continued_indentation(logical_line, tokens, indent_level, hang_closing, + indent_char, noqa): + """Override pycodestyle's function to provide indentation information.""" + first_row = tokens[0][2][0] + nrows = 1 + tokens[-1][2][0] - first_row + if noqa or nrows == 1: + return + + # indent_next tells us whether the next block is indented. Assuming + # that it is indented by 4 spaces, then we should not allow 4-space + # indents on the final continuation line. In turn, some other + # indents are allowed to have an extra 4 spaces. + indent_next = logical_line.endswith(':') + + row = depth = 0 + valid_hangs = ( + (DEFAULT_INDENT_SIZE,) + if indent_char != '\t' else (DEFAULT_INDENT_SIZE, + 2 * DEFAULT_INDENT_SIZE) + ) + + # Remember how many brackets were opened on each line. + parens = [0] * nrows + + # Relative indents of physical lines. + rel_indent = [0] * nrows + + # For each depth, collect a list of opening rows. + open_rows = [[0]] + # For each depth, memorize the hanging indentation. + hangs = [None] + + # Visual indents. + indent_chances = {} + last_indent = tokens[0][2] + indent = [last_indent[1]] + + last_token_multiline = None + line = None + last_line = '' + last_line_begins_with_multiline = False + for token_type, text, start, end, line in tokens: + + newline = row < start[0] - first_row + if newline: + row = start[0] - first_row + newline = (not last_token_multiline and + token_type not in (tokenize.NL, tokenize.NEWLINE)) + last_line_begins_with_multiline = last_token_multiline + + if newline: + # This is the beginning of a continuation line. + last_indent = start + + # Record the initial indent. + rel_indent[row] = pycodestyle.expand_indent(line) - indent_level + + # Identify closing bracket. + close_bracket = (token_type == tokenize.OP and text in ']})') + + # Is the indent relative to an opening bracket line? + for open_row in reversed(open_rows[depth]): + hang = rel_indent[row] - rel_indent[open_row] + hanging_indent = hang in valid_hangs + if hanging_indent: + break + if hangs[depth]: + hanging_indent = (hang == hangs[depth]) + + visual_indent = (not close_bracket and hang > 0 and + indent_chances.get(start[1])) + + if close_bracket and indent[depth]: + # Closing bracket for visual indent. + if start[1] != indent[depth]: + yield (start, 'E124 {}'.format(indent[depth])) + elif close_bracket and not hang: + # closing bracket matches indentation of opening bracket's line + if hang_closing: + yield (start, 'E133 {}'.format(indent[depth])) + elif indent[depth] and start[1] < indent[depth]: + # Visual indent is broken. + yield (start, 'E128 {}'.format(indent[depth])) + elif (hanging_indent or + (indent_next and + rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)): + # Hanging indent is verified. + if close_bracket and not hang_closing: + yield (start, 'E123 {}'.format(indent_level + + rel_indent[open_row])) + hangs[depth] = hang + elif visual_indent is True: + # Visual indent is verified. + indent[depth] = start[1] + elif visual_indent in (text, unicode): + # Ignore token lined up with matching one from a previous line. + pass + else: + one_indented = (indent_level + rel_indent[open_row] + + DEFAULT_INDENT_SIZE) + # Indent is broken. + if hang <= 0: + error = ('E122', one_indented) + elif indent[depth]: + error = ('E127', indent[depth]) + elif not close_bracket and hangs[depth]: + error = ('E131', one_indented) + elif hang > DEFAULT_INDENT_SIZE: + error = ('E126', one_indented) + else: + hangs[depth] = hang + error = ('E121', one_indented) + + yield (start, '{} {}'.format(*error)) + + # Look for visual indenting. + if ( + parens[row] and + token_type not in (tokenize.NL, tokenize.COMMENT) and + not indent[depth] + ): + indent[depth] = start[1] + indent_chances[start[1]] = True + # Deal with implicit string concatenation. + elif (token_type in (tokenize.STRING, tokenize.COMMENT) or + text in ('u', 'ur', 'b', 'br')): + indent_chances[start[1]] = unicode + # Special case for the "if" statement because len("if (") is equal to + # 4. + elif not indent_chances and not row and not depth and text == 'if': + indent_chances[end[1] + 1] = True + elif text == ':' and line[end[1]:].isspace(): + open_rows[depth].append(row) + + # Keep track of bracket depth. + if token_type == tokenize.OP: + if text in '([{': + depth += 1 + indent.append(0) + hangs.append(None) + if len(open_rows) == depth: + open_rows.append([]) + open_rows[depth].append(row) + parens[row] += 1 + elif text in ')]}' and depth > 0: + # Parent indents should not be more than this one. + prev_indent = indent.pop() or last_indent[1] + hangs.pop() + for d in range(depth): + if indent[d] > prev_indent: + indent[d] = 0 + for ind in list(indent_chances): + if ind >= prev_indent: + del indent_chances[ind] + del open_rows[depth + 1:] + depth -= 1 + if depth: + indent_chances[indent[depth]] = True + for idx in range(row, -1, -1): + if parens[idx]: + parens[idx] -= 1 + break + assert len(indent) == depth + 1 + if ( + start[1] not in indent_chances and + # This is for purposes of speeding up E121 (GitHub #90). + not last_line.rstrip().endswith(',') + ): + # Allow to line up tokens. + indent_chances[start[1]] = text + + last_token_multiline = (start[0] != end[0]) + if last_token_multiline: + rel_indent[end[0] - first_row] = rel_indent[row] + + last_line = line + + if ( + indent_next and + not last_line_begins_with_multiline and + pycodestyle.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE + ): + pos = (start[0], indent[0] + 4) + desired_indent = indent_level + 2 * DEFAULT_INDENT_SIZE + if visual_indent: + yield (pos, 'E129 {}'.format(desired_indent)) + else: + yield (pos, 'E125 {}'.format(desired_indent)) + + +del pycodestyle._checks['logical_line'][pycodestyle.continued_indentation] +pycodestyle.register_check(continued_indentation) + + +class FixPEP8(object): + + """Fix invalid code. + + Fixer methods are prefixed "fix_". The _fix_source() method looks for these + automatically. + + The fixer method can take either one or two arguments (in addition to + self). The first argument is "result", which is the error information from + pycodestyle. The second argument, "logical", is required only for + logical-line fixes. + + The fixer method can return the list of modified lines or None. An empty + list would mean that no changes were made. None would mean that only the + line reported in the pycodestyle error was modified. Note that the modified + line numbers that are returned are indexed at 1. This typically would + correspond with the line number reported in the pycodestyle error + information. + + [fixed method list] + - e111,e114,e115,e116 + - e121,e122,e123,e124,e125,e126,e127,e128,e129 + - e201,e202,e203 + - e211 + - e221,e222,e223,e224,e225 + - e231 + - e251,e252 + - e261,e262 + - e271,e272,e273,e274 + - e301,e302,e303,e304,e305,e306 + - e401,e402 + - e502 + - e701,e702,e703,e704 + - e711,e712,e713,e714 + - e722 + - e731 + - w291 + - w503,504 + + """ + + def __init__(self, filename, + options, + contents=None, + long_line_ignore_cache=None): + self.filename = filename + if contents is None: + self.source = readlines_from_file(filename) + else: + sio = io.StringIO(contents) + self.source = sio.readlines() + self.options = options + self.indent_word = _get_indentword(''.join(self.source)) + + # collect imports line + self.imports = {} + for i, line in enumerate(self.source): + if (line.find("import ") == 0 or line.find("from ") == 0) and \ + line not in self.imports: + # collect only import statements that first appeared + self.imports[line] = i + + self.long_line_ignore_cache = ( + set() if long_line_ignore_cache is None + else long_line_ignore_cache) + + # Many fixers are the same even though pycodestyle categorizes them + # differently. + self.fix_e115 = self.fix_e112 + self.fix_e121 = self._fix_reindent + self.fix_e122 = self._fix_reindent + self.fix_e123 = self._fix_reindent + self.fix_e124 = self._fix_reindent + self.fix_e126 = self._fix_reindent + self.fix_e127 = self._fix_reindent + self.fix_e128 = self._fix_reindent + self.fix_e129 = self._fix_reindent + self.fix_e133 = self.fix_e131 + self.fix_e202 = self.fix_e201 + self.fix_e203 = self.fix_e201 + self.fix_e211 = self.fix_e201 + self.fix_e221 = self.fix_e271 + self.fix_e222 = self.fix_e271 + self.fix_e223 = self.fix_e271 + self.fix_e226 = self.fix_e225 + self.fix_e227 = self.fix_e225 + self.fix_e228 = self.fix_e225 + self.fix_e241 = self.fix_e271 + self.fix_e242 = self.fix_e224 + self.fix_e252 = self.fix_e225 + self.fix_e261 = self.fix_e262 + self.fix_e272 = self.fix_e271 + self.fix_e273 = self.fix_e271 + self.fix_e274 = self.fix_e271 + self.fix_e306 = self.fix_e301 + self.fix_e501 = ( + self.fix_long_line_logically if + options and (options.aggressive >= 2 or options.experimental) else + self.fix_long_line_physically) + self.fix_e703 = self.fix_e702 + self.fix_w293 = self.fix_w291 + + def _fix_source(self, results): + try: + (logical_start, logical_end) = _find_logical(self.source) + logical_support = True + except (SyntaxError, tokenize.TokenError): # pragma: no cover + logical_support = False + + completed_lines = set() + for result in sorted(results, key=_priority_key): + if result['line'] in completed_lines: + continue + + fixed_methodname = 'fix_' + result['id'].lower() + if hasattr(self, fixed_methodname): + fix = getattr(self, fixed_methodname) + + line_index = result['line'] - 1 + original_line = self.source[line_index] + + is_logical_fix = len(_get_parameters(fix)) > 2 + if is_logical_fix: + logical = None + if logical_support: + logical = _get_logical(self.source, + result, + logical_start, + logical_end) + if logical and set(range( + logical[0][0] + 1, + logical[1][0] + 1)).intersection( + completed_lines): + continue + + modified_lines = fix(result, logical) + else: + modified_lines = fix(result) + + if modified_lines is None: + # Force logical fixes to report what they modified. + assert not is_logical_fix + + if self.source[line_index] == original_line: + modified_lines = [] + + if modified_lines: + completed_lines.update(modified_lines) + elif modified_lines == []: # Empty list means no fix + if self.options.verbose >= 2: + print( + '---> Not fixing {error} on line {line}'.format( + error=result['id'], line=result['line']), + file=sys.stderr) + else: # We assume one-line fix when None. + completed_lines.add(result['line']) + else: + if self.options.verbose >= 3: + print( + "---> '{}' is not defined.".format(fixed_methodname), + file=sys.stderr) + + info = result['info'].strip() + print('---> {}:{}:{}:{}'.format(self.filename, + result['line'], + result['column'], + info), + file=sys.stderr) + + def fix(self): + """Return a version of the source code with PEP 8 violations fixed.""" + pep8_options = { + 'ignore': self.options.ignore, + 'select': self.options.select, + 'max_line_length': self.options.max_line_length, + 'hang_closing': self.options.hang_closing, + } + results = _execute_pep8(pep8_options, self.source) + + if self.options.verbose: + progress = {} + for r in results: + if r['id'] not in progress: + progress[r['id']] = set() + progress[r['id']].add(r['line']) + print('---> {n} issue(s) to fix {progress}'.format( + n=len(results), progress=progress), file=sys.stderr) + + if self.options.line_range: + start, end = self.options.line_range + results = [r for r in results + if start <= r['line'] <= end] + + self._fix_source(filter_results(source=''.join(self.source), + results=results, + aggressive=self.options.aggressive)) + + if self.options.line_range: + # If number of lines has changed then change line_range. + count = sum(sline.count('\n') + for sline in self.source[start - 1:end]) + self.options.line_range[1] = start + count - 1 + + return ''.join(self.source) + + def _fix_reindent(self, result): + """Fix a badly indented line. + + This is done by adding or removing from its initial indent only. + + """ + num_indent_spaces = int(result['info'].split()[1]) + line_index = result['line'] - 1 + target = self.source[line_index] + + self.source[line_index] = ' ' * num_indent_spaces + target.lstrip() + + def fix_e112(self, result): + """Fix under-indented comments.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + if not target.lstrip().startswith('#'): + # Don't screw with invalid syntax. + return [] + + self.source[line_index] = self.indent_word + target + + def fix_e113(self, result): + """Fix unexpected indentation.""" + line_index = result['line'] - 1 + target = self.source[line_index] + indent = _get_indentation(target) + stripped = target.lstrip() + self.source[line_index] = indent[1:] + stripped + + def fix_e116(self, result): + """Fix over-indented comments.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + indent = _get_indentation(target) + stripped = target.lstrip() + + if not stripped.startswith('#'): + # Don't screw with invalid syntax. + return [] + + self.source[line_index] = indent[1:] + stripped + + def fix_e117(self, result): + """Fix over-indented.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + indent = _get_indentation(target) + if indent == '\t': + return [] + + stripped = target.lstrip() + + self.source[line_index] = indent[1:] + stripped + + def fix_e125(self, result): + """Fix indentation undistinguish from the next logical line.""" + num_indent_spaces = int(result['info'].split()[1]) + line_index = result['line'] - 1 + target = self.source[line_index] + + spaces_to_add = num_indent_spaces - len(_get_indentation(target)) + indent = len(_get_indentation(target)) + modified_lines = [] + + while len(_get_indentation(self.source[line_index])) >= indent: + self.source[line_index] = (' ' * spaces_to_add + + self.source[line_index]) + modified_lines.append(1 + line_index) # Line indexed at 1. + line_index -= 1 + + return modified_lines + + def fix_e131(self, result): + """Fix indentation undistinguish from the next logical line.""" + num_indent_spaces = int(result['info'].split()[1]) + line_index = result['line'] - 1 + target = self.source[line_index] + + spaces_to_add = num_indent_spaces - len(_get_indentation(target)) + + if spaces_to_add >= 0: + self.source[line_index] = (' ' * spaces_to_add + + self.source[line_index]) + else: + offset = abs(spaces_to_add) + self.source[line_index] = self.source[line_index][offset:] + + def fix_e201(self, result): + """Remove extraneous whitespace.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] - 1 + + fixed = fix_whitespace(target, + offset=offset, + replacement='') + + self.source[line_index] = fixed + + def fix_e224(self, result): + """Remove extraneous whitespace around operator.""" + target = self.source[result['line'] - 1] + offset = result['column'] - 1 + fixed = target[:offset] + target[offset:].replace('\t', ' ') + self.source[result['line'] - 1] = fixed + + def fix_e225(self, result): + """Fix missing whitespace around operator.""" + target = self.source[result['line'] - 1] + offset = result['column'] - 1 + fixed = target[:offset] + ' ' + target[offset:] + + # Only proceed if non-whitespace characters match. + # And make sure we don't break the indentation. + if ( + fixed.replace(' ', '') == target.replace(' ', '') and + _get_indentation(fixed) == _get_indentation(target) + ): + self.source[result['line'] - 1] = fixed + error_code = result.get('id', 0) + try: + ts = generate_tokens(fixed) + except (SyntaxError, tokenize.TokenError): + return + if not check_syntax(fixed.lstrip()): + return + errors = list( + pycodestyle.missing_whitespace_around_operator(fixed, ts)) + for e in reversed(errors): + if error_code != e[1].split()[0]: + continue + offset = e[0][1] + fixed = fixed[:offset] + ' ' + fixed[offset:] + self.source[result['line'] - 1] = fixed + else: + return [] + + def fix_e231(self, result): + """Add missing whitespace.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] + fixed = target[:offset].rstrip() + ' ' + target[offset:].lstrip() + self.source[line_index] = fixed + + def fix_e251(self, result): + """Remove whitespace around parameter '=' sign.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + # This is necessary since pycodestyle sometimes reports columns that + # goes past the end of the physical line. This happens in cases like, + # foo(bar\n=None) + c = min(result['column'] - 1, + len(target) - 1) + + if target[c].strip(): + fixed = target + else: + fixed = target[:c].rstrip() + target[c:].lstrip() + + # There could be an escaped newline + # + # def foo(a=\ + # 1) + if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')): + self.source[line_index] = fixed.rstrip('\n\r \t\\') + self.source[line_index + 1] = self.source[line_index + 1].lstrip() + return [line_index + 1, line_index + 2] # Line indexed at 1 + + self.source[result['line'] - 1] = fixed + + def fix_e262(self, result): + """Fix spacing after comment hash.""" + target = self.source[result['line'] - 1] + offset = result['column'] + + code = target[:offset].rstrip(' \t#') + comment = target[offset:].lstrip(' \t#') + + fixed = code + (' # ' + comment if comment.strip() else '\n') + + self.source[result['line'] - 1] = fixed + + def fix_e271(self, result): + """Fix extraneous whitespace around keywords.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] - 1 + + fixed = fix_whitespace(target, + offset=offset, + replacement=' ') + + if fixed == target: + return [] + else: + self.source[line_index] = fixed + + def fix_e301(self, result): + """Add missing blank line.""" + cr = '\n' + self.source[result['line'] - 1] = cr + self.source[result['line'] - 1] + + def fix_e302(self, result): + """Add missing 2 blank lines.""" + add_linenum = 2 - int(result['info'].split()[-1]) + offset = 1 + if self.source[result['line'] - 2].strip() == "\\": + offset = 2 + cr = '\n' * add_linenum + self.source[result['line'] - offset] = ( + cr + self.source[result['line'] - offset] + ) + + def fix_e303(self, result): + """Remove extra blank lines.""" + delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2 + delete_linenum = max(1, delete_linenum) + + # We need to count because pycodestyle reports an offset line number if + # there are comments. + cnt = 0 + line = result['line'] - 2 + modified_lines = [] + while cnt < delete_linenum and line >= 0: + if not self.source[line].strip(): + self.source[line] = '' + modified_lines.append(1 + line) # Line indexed at 1 + cnt += 1 + line -= 1 + + return modified_lines + + def fix_e304(self, result): + """Remove blank line following function decorator.""" + line = result['line'] - 2 + if not self.source[line].strip(): + self.source[line] = '' + + def fix_e305(self, result): + """Add missing 2 blank lines after end of function or class.""" + add_delete_linenum = 2 - int(result['info'].split()[-1]) + cnt = 0 + offset = result['line'] - 2 + modified_lines = [] + if add_delete_linenum < 0: + # delete cr + add_delete_linenum = abs(add_delete_linenum) + while cnt < add_delete_linenum and offset >= 0: + if not self.source[offset].strip(): + self.source[offset] = '' + modified_lines.append(1 + offset) # Line indexed at 1 + cnt += 1 + offset -= 1 + else: + # add cr + cr = '\n' + # check comment line + while True: + if offset < 0: + break + line = self.source[offset].lstrip() + if not line: + break + if line[0] != '#': + break + offset -= 1 + offset += 1 + self.source[offset] = cr + self.source[offset] + modified_lines.append(1 + offset) # Line indexed at 1. + return modified_lines + + def fix_e401(self, result): + """Put imports on separate lines.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] - 1 + + if not target.lstrip().startswith('import'): + return [] + + indentation = re.split(pattern=r'\bimport\b', + string=target, maxsplit=1)[0] + fixed = (target[:offset].rstrip('\t ,') + '\n' + + indentation + 'import ' + target[offset:].lstrip('\t ,')) + self.source[line_index] = fixed + + def fix_e402(self, result): + (line_index, offset, target) = get_index_offset_contents(result, + self.source) + for i in range(1, 100): + line = "".join(self.source[line_index:line_index+i]) + try: + generate_tokens("".join(line)) + except (SyntaxError, tokenize.TokenError): + continue + break + if not (target in self.imports and self.imports[target] != line_index): + mod_offset = get_module_imports_on_top_of_file(self.source, + line_index) + self.source[mod_offset] = line + self.source[mod_offset] + for offset in range(i): + self.source[line_index+offset] = '' + + def fix_long_line_logically(self, result, logical): + """Try to make lines fit within --max-line-length characters.""" + if ( + not logical or + len(logical[2]) == 1 or + self.source[result['line'] - 1].lstrip().startswith('#') + ): + return self.fix_long_line_physically(result) + + start_line_index = logical[0][0] + end_line_index = logical[1][0] + logical_lines = logical[2] + + previous_line = get_item(self.source, start_line_index - 1, default='') + next_line = get_item(self.source, end_line_index + 1, default='') + + single_line = join_logical_line(''.join(logical_lines)) + + try: + fixed = self.fix_long_line( + target=single_line, + previous_line=previous_line, + next_line=next_line, + original=''.join(logical_lines)) + except (SyntaxError, tokenize.TokenError): + return self.fix_long_line_physically(result) + + if fixed: + for line_index in range(start_line_index, end_line_index + 1): + self.source[line_index] = '' + self.source[start_line_index] = fixed + return range(start_line_index + 1, end_line_index + 1) + + return [] + + def fix_long_line_physically(self, result): + """Try to make lines fit within --max-line-length characters.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + previous_line = get_item(self.source, line_index - 1, default='') + next_line = get_item(self.source, line_index + 1, default='') + + try: + fixed = self.fix_long_line( + target=target, + previous_line=previous_line, + next_line=next_line, + original=target) + except (SyntaxError, tokenize.TokenError): + return [] + + if fixed: + self.source[line_index] = fixed + return [line_index + 1] + + return [] + + def fix_long_line(self, target, previous_line, + next_line, original): + cache_entry = (target, previous_line, next_line) + if cache_entry in self.long_line_ignore_cache: + return [] + + if target.lstrip().startswith('#'): + if self.options.aggressive: + # Wrap commented lines. + return shorten_comment( + line=target, + max_line_length=self.options.max_line_length, + last_comment=not next_line.lstrip().startswith('#')) + return [] + + fixed = get_fixed_long_line( + target=target, + previous_line=previous_line, + original=original, + indent_word=self.indent_word, + max_line_length=self.options.max_line_length, + aggressive=self.options.aggressive, + experimental=self.options.experimental, + verbose=self.options.verbose) + + if fixed and not code_almost_equal(original, fixed): + return fixed + + self.long_line_ignore_cache.add(cache_entry) + return None + + def fix_e502(self, result): + """Remove extraneous escape of newline.""" + (line_index, _, target) = get_index_offset_contents(result, + self.source) + self.source[line_index] = target.rstrip('\n\r \t\\') + '\n' + + def fix_e701(self, result): + """Put colon-separated compound statement on separate lines.""" + line_index = result['line'] - 1 + target = self.source[line_index] + c = result['column'] + + fixed_source = (target[:c] + '\n' + + _get_indentation(target) + self.indent_word + + target[c:].lstrip('\n\r \t\\')) + self.source[result['line'] - 1] = fixed_source + return [result['line'], result['line'] + 1] + + def fix_e702(self, result, logical): + """Put semicolon-separated compound statement on separate lines.""" + if not logical: + return [] # pragma: no cover + logical_lines = logical[2] + + # Avoid applying this when indented. + # https://docs.python.org/reference/compound_stmts.html + for line in logical_lines: + if (result['id'] == 'E702' and ':' in line + and STARTSWITH_INDENT_STATEMENT_REGEX.match(line)): + if self.options.verbose: + print( + '---> avoid fixing {error} with ' + 'other compound statements'.format(error=result['id']), + file=sys.stderr + ) + return [] + + line_index = result['line'] - 1 + target = self.source[line_index] + + if target.rstrip().endswith('\\'): + # Normalize '1; \\\n2' into '1; 2'. + self.source[line_index] = target.rstrip('\n \r\t\\') + self.source[line_index + 1] = self.source[line_index + 1].lstrip() + return [line_index + 1, line_index + 2] + + if target.rstrip().endswith(';'): + self.source[line_index] = target.rstrip('\n \r\t;') + '\n' + return [line_index + 1] + + offset = result['column'] - 1 + first = target[:offset].rstrip(';').rstrip() + second = (_get_indentation(logical_lines[0]) + + target[offset:].lstrip(';').lstrip()) + + # Find inline comment. + inline_comment = None + if target[offset:].lstrip(';').lstrip()[:2] == '# ': + inline_comment = target[offset:].lstrip(';') + + if inline_comment: + self.source[line_index] = first + inline_comment + else: + self.source[line_index] = first + '\n' + second + return [line_index + 1] + + def fix_e704(self, result): + """Fix multiple statements on one line def""" + (line_index, _, target) = get_index_offset_contents(result, + self.source) + match = STARTSWITH_DEF_REGEX.match(target) + if match: + self.source[line_index] = '{}\n{}{}'.format( + match.group(0), + _get_indentation(target) + self.indent_word, + target[match.end(0):].lstrip()) + + def fix_e711(self, result): + """Fix comparison with None.""" + (line_index, offset, target) = get_index_offset_contents(result, + self.source) + + right_offset = offset + 2 + if right_offset >= len(target): + return [] + + left = target[:offset].rstrip() + center = target[offset:right_offset] + right = target[right_offset:].lstrip() + + if center.strip() == '==': + new_center = 'is' + elif center.strip() == '!=': + new_center = 'is not' + else: + return [] + + self.source[line_index] = ' '.join([left, new_center, right]) + + def fix_e712(self, result): + """Fix (trivial case of) comparison with boolean.""" + (line_index, offset, target) = get_index_offset_contents(result, + self.source) + + # Handle very easy "not" special cases. + if re.match(r'^\s*if [\w."\'\[\]]+ == False:$', target): + self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) == False:', + r'if not \1:', target, count=1) + elif re.match(r'^\s*if [\w."\'\[\]]+ != True:$', target): + self.source[line_index] = re.sub(r'if ([\w."\'\[\]]+) != True:', + r'if not \1:', target, count=1) + else: + right_offset = offset + 2 + if right_offset >= len(target): + return [] + + left = target[:offset].rstrip() + center = target[offset:right_offset] + right = target[right_offset:].lstrip() + + # Handle simple cases only. + new_right = None + if center.strip() == '==': + if re.match(r'\bTrue\b', right): + new_right = re.sub(r'\bTrue\b *', '', right, count=1) + elif center.strip() == '!=': + if re.match(r'\bFalse\b', right): + new_right = re.sub(r'\bFalse\b *', '', right, count=1) + + if new_right is None: + return [] + + if new_right[0].isalnum(): + new_right = ' ' + new_right + + self.source[line_index] = left + new_right + + def fix_e713(self, result): + """Fix (trivial case of) non-membership check.""" + (line_index, offset, target) = get_index_offset_contents(result, + self.source) + + # to convert once 'not in' -> 'in' + before_target = target[:offset] + target = target[offset:] + match_notin = COMPARE_NEGATIVE_REGEX_THROUGH.search(target) + notin_pos_start, notin_pos_end = 0, 0 + if match_notin: + notin_pos_start = match_notin.start(1) + notin_pos_end = match_notin.end() + target = '{}{} {}'.format( + target[:notin_pos_start], 'in', target[notin_pos_end:]) + + # fix 'not in' + match = COMPARE_NEGATIVE_REGEX.search(target) + if match: + if match.group(3) == 'in': + pos_start = match.start(1) + new_target = '{5}{0}{1} {2} {3} {4}'.format( + target[:pos_start], match.group(2), match.group(1), + match.group(3), target[match.end():], before_target) + if match_notin: + # revert 'in' -> 'not in' + pos_start = notin_pos_start + offset + pos_end = notin_pos_end + offset - 4 # len('not ') + new_target = '{}{} {}'.format( + new_target[:pos_start], 'not in', new_target[pos_end:]) + self.source[line_index] = new_target + + def fix_e714(self, result): + """Fix object identity should be 'is not' case.""" + (line_index, offset, target) = get_index_offset_contents(result, + self.source) + + # to convert once 'is not' -> 'is' + before_target = target[:offset] + target = target[offset:] + match_isnot = COMPARE_NEGATIVE_REGEX_THROUGH.search(target) + isnot_pos_start, isnot_pos_end = 0, 0 + if match_isnot: + isnot_pos_start = match_isnot.start(1) + isnot_pos_end = match_isnot.end() + target = '{}{} {}'.format( + target[:isnot_pos_start], 'in', target[isnot_pos_end:]) + + match = COMPARE_NEGATIVE_REGEX.search(target) + if match: + if match.group(3).startswith('is'): + pos_start = match.start(1) + new_target = '{5}{0}{1} {2} {3} {4}'.format( + target[:pos_start], match.group(2), match.group(3), + match.group(1), target[match.end():], before_target) + if match_isnot: + # revert 'is' -> 'is not' + pos_start = isnot_pos_start + offset + pos_end = isnot_pos_end + offset - 4 # len('not ') + new_target = '{}{} {}'.format( + new_target[:pos_start], 'is not', new_target[pos_end:]) + self.source[line_index] = new_target + + def fix_e722(self, result): + """fix bare except""" + (line_index, _, target) = get_index_offset_contents(result, + self.source) + match = BARE_EXCEPT_REGEX.search(target) + if match: + self.source[line_index] = '{}{}{}'.format( + target[:result['column'] - 1], "except BaseException:", + target[match.end():]) + + def fix_e731(self, result): + """Fix do not assign a lambda expression check.""" + (line_index, _, target) = get_index_offset_contents(result, + self.source) + match = LAMBDA_REGEX.search(target) + if match: + end = match.end() + self.source[line_index] = '{}def {}({}): return {}'.format( + target[:match.start(0)], match.group(1), match.group(2), + target[end:].lstrip()) + + def fix_w291(self, result): + """Remove trailing whitespace.""" + fixed_line = self.source[result['line'] - 1].rstrip() + self.source[result['line'] - 1] = fixed_line + '\n' + + def fix_w391(self, _): + """Remove trailing blank lines.""" + blank_count = 0 + for line in reversed(self.source): + line = line.rstrip() + if line: + break + else: + blank_count += 1 + + original_length = len(self.source) + self.source = self.source[:original_length - blank_count] + return range(1, 1 + original_length) + + def fix_w503(self, result): + (line_index, _, target) = get_index_offset_contents(result, + self.source) + one_string_token = target.split()[0] + try: + ts = generate_tokens(one_string_token) + except (SyntaxError, tokenize.TokenError): + return + if not _is_binary_operator(ts[0][0], one_string_token): + return + # find comment + comment_index = 0 + found_not_comment_only_line = False + comment_only_linenum = 0 + for i in range(5): + # NOTE: try to parse code in 5 times + if (line_index - i) < 0: + break + from_index = line_index - i - 1 + if from_index < 0 or len(self.source) <= from_index: + break + to_index = line_index + 1 + strip_line = self.source[from_index].lstrip() + if ( + not found_not_comment_only_line and + strip_line and strip_line[0] == '#' + ): + comment_only_linenum += 1 + continue + found_not_comment_only_line = True + try: + ts = generate_tokens("".join(self.source[from_index:to_index])) + except (SyntaxError, tokenize.TokenError): + continue + newline_count = 0 + newline_index = [] + for index, t in enumerate(ts): + if t[0] in (tokenize.NEWLINE, tokenize.NL): + newline_index.append(index) + newline_count += 1 + if newline_count > 2: + tts = ts[newline_index[-3]:] + else: + tts = ts + old = [] + for t in tts: + if t[0] in (tokenize.NEWLINE, tokenize.NL): + newline_count -= 1 + if newline_count <= 1: + break + if tokenize.COMMENT == t[0] and old and old[0] != tokenize.NL: + comment_index = old[3][1] + break + old = t + break + i = target.index(one_string_token) + fix_target_line = line_index - 1 - comment_only_linenum + self.source[line_index] = '{}{}'.format( + target[:i], target[i + len(one_string_token):].lstrip()) + nl = find_newline(self.source[fix_target_line:line_index]) + before_line = self.source[fix_target_line] + bl = before_line.index(nl) + if comment_index: + self.source[fix_target_line] = '{} {} {}'.format( + before_line[:comment_index], one_string_token, + before_line[comment_index + 1:]) + else: + if before_line[:bl].endswith("#"): + # special case + # see: https://github.com/hhatto/autopep8/issues/503 + self.source[fix_target_line] = '{}{} {}'.format( + before_line[:bl-2], one_string_token, before_line[bl-2:]) + else: + self.source[fix_target_line] = '{} {}{}'.format( + before_line[:bl], one_string_token, before_line[bl:]) + + def fix_w504(self, result): + (line_index, _, target) = get_index_offset_contents(result, + self.source) + # NOTE: is not collect pointed out in pycodestyle==2.4.0 + comment_index = 0 + operator_position = None # (start_position, end_position) + for i in range(1, 6): + to_index = line_index + i + try: + ts = generate_tokens("".join(self.source[line_index:to_index])) + except (SyntaxError, tokenize.TokenError): + continue + newline_count = 0 + newline_index = [] + for index, t in enumerate(ts): + if _is_binary_operator(t[0], t[1]): + if t[2][0] == 1 and t[3][0] == 1: + operator_position = (t[2][1], t[3][1]) + elif t[0] == tokenize.NAME and t[1] in ("and", "or"): + if t[2][0] == 1 and t[3][0] == 1: + operator_position = (t[2][1], t[3][1]) + elif t[0] in (tokenize.NEWLINE, tokenize.NL): + newline_index.append(index) + newline_count += 1 + if newline_count > 2: + tts = ts[:newline_index[-3]] + else: + tts = ts + old = [] + for t in tts: + if tokenize.COMMENT == t[0] and old: + comment_row, comment_index = old[3] + break + old = t + break + if not operator_position: + return + target_operator = target[operator_position[0]:operator_position[1]] + + if comment_index and comment_row == 1: + self.source[line_index] = '{}{}'.format( + target[:operator_position[0]].rstrip(), + target[comment_index:]) + else: + self.source[line_index] = '{}{}{}'.format( + target[:operator_position[0]].rstrip(), + target[operator_position[1]:].lstrip(), + target[operator_position[1]:]) + + next_line = self.source[line_index + 1] + next_line_indent = 0 + m = re.match(r'\s*', next_line) + if m: + next_line_indent = m.span()[1] + self.source[line_index + 1] = '{}{} {}'.format( + next_line[:next_line_indent], target_operator, + next_line[next_line_indent:]) + + def fix_w605(self, result): + (line_index, offset, target) = get_index_offset_contents(result, + self.source) + self.source[line_index] = '{}\\{}'.format( + target[:offset + 1], target[offset + 1:]) + + +def get_module_imports_on_top_of_file(source, import_line_index): + """return import or from keyword position + + example: + > 0: import sys + 1: import os + 2: + 3: def function(): + """ + def is_string_literal(line): + if line[0] in 'uUbB': + line = line[1:] + if line and line[0] in 'rR': + line = line[1:] + return line and (line[0] == '"' or line[0] == "'") + + def is_future_import(line): + nodes = ast.parse(line) + for n in nodes.body: + if isinstance(n, ast.ImportFrom) and n.module == '__future__': + return True + return False + + def has_future_import(source): + offset = 0 + line = '' + for _, next_line in source: + for line_part in next_line.strip().splitlines(True): + line = line + line_part + try: + return is_future_import(line), offset + except SyntaxError: + continue + offset += 1 + return False, offset + + allowed_try_keywords = ('try', 'except', 'else', 'finally') + in_docstring = False + docstring_kind = '"""' + source_stream = iter(enumerate(source)) + for cnt, line in source_stream: + if not in_docstring: + m = DOCSTRING_START_REGEX.match(line.lstrip()) + if m is not None: + in_docstring = True + docstring_kind = m.group('kind') + remain = line[m.end(): m.endpos].rstrip() + if remain[-3:] == docstring_kind: # one line doc + in_docstring = False + continue + if in_docstring: + if line.rstrip()[-3:] == docstring_kind: + in_docstring = False + continue + + if not line.rstrip(): + continue + elif line.startswith('#'): + continue + + if line.startswith('import '): + if cnt == import_line_index: + continue + return cnt + elif line.startswith('from '): + if cnt == import_line_index: + continue + hit, offset = has_future_import( + itertools.chain([(cnt, line)], source_stream) + ) + if hit: + # move to the back + return cnt + offset + 1 + return cnt + elif pycodestyle.DUNDER_REGEX.match(line): + return cnt + elif any(line.startswith(kw) for kw in allowed_try_keywords): + continue + elif is_string_literal(line): + return cnt + else: + return cnt + return 0 + + +def get_index_offset_contents(result, source): + """Return (line_index, column_offset, line_contents).""" + line_index = result['line'] - 1 + return (line_index, + result['column'] - 1, + source[line_index]) + + +def get_fixed_long_line(target, previous_line, original, + indent_word=' ', max_line_length=79, + aggressive=False, experimental=False, verbose=False): + """Break up long line and return result. + + Do this by generating multiple reformatted candidates and then + ranking the candidates to heuristically select the best option. + + """ + indent = _get_indentation(target) + source = target[len(indent):] + assert source.lstrip() == source + assert not target.lstrip().startswith('#') + + # Check for partial multiline. + tokens = list(generate_tokens(source)) + + candidates = shorten_line( + tokens, source, indent, + indent_word, + max_line_length, + aggressive=aggressive, + experimental=experimental, + previous_line=previous_line) + + # Also sort alphabetically as a tie breaker (for determinism). + candidates = sorted( + sorted(set(candidates).union([target, original])), + key=lambda x: line_shortening_rank( + x, + indent_word, + max_line_length, + experimental=experimental)) + + if verbose >= 4: + print(('-' * 79 + '\n').join([''] + candidates + ['']), + file=wrap_output(sys.stderr, 'utf-8')) + + if candidates: + best_candidate = candidates[0] + + # Don't allow things to get longer. + if longest_line_length(best_candidate) > longest_line_length(original): + return None + + return best_candidate + + +def longest_line_length(code): + """Return length of longest line.""" + if len(code) == 0: + return 0 + return max(len(line) for line in code.splitlines()) + + +def join_logical_line(logical_line): + """Return single line based on logical line input.""" + indentation = _get_indentation(logical_line) + + return indentation + untokenize_without_newlines( + generate_tokens(logical_line.lstrip())) + '\n' + + +def untokenize_without_newlines(tokens): + """Return source code based on tokens.""" + text = '' + last_row = 0 + last_column = -1 + + for t in tokens: + token_string = t[1] + (start_row, start_column) = t[2] + (end_row, end_column) = t[3] + + if start_row > last_row: + last_column = 0 + if ( + (start_column > last_column or token_string == '\n') and + not text.endswith(' ') + ): + text += ' ' + + if token_string != '\n': + text += token_string + + last_row = end_row + last_column = end_column + + return text.rstrip() + + +def _find_logical(source_lines): + # Make a variable which is the index of all the starts of lines. + logical_start = [] + logical_end = [] + last_newline = True + parens = 0 + for t in generate_tokens(''.join(source_lines)): + if t[0] in [tokenize.COMMENT, tokenize.DEDENT, + tokenize.INDENT, tokenize.NL, + tokenize.ENDMARKER]: + continue + if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]: + last_newline = True + logical_end.append((t[3][0] - 1, t[2][1])) + continue + if last_newline and not parens: + logical_start.append((t[2][0] - 1, t[2][1])) + last_newline = False + if t[0] == tokenize.OP: + if t[1] in '([{': + parens += 1 + elif t[1] in '}])': + parens -= 1 + return (logical_start, logical_end) + + +def _get_logical(source_lines, result, logical_start, logical_end): + """Return the logical line corresponding to the result. + + Assumes input is already E702-clean. + + """ + row = result['line'] - 1 + col = result['column'] - 1 + ls = None + le = None + for i in range(0, len(logical_start), 1): + assert logical_end + x = logical_end[i] + if x[0] > row or (x[0] == row and x[1] > col): + le = x + ls = logical_start[i] + break + if ls is None: + return None + original = source_lines[ls[0]:le[0] + 1] + return ls, le, original + + +def get_item(items, index, default=None): + if 0 <= index < len(items): + return items[index] + + return default + + +def reindent(source, indent_size): + """Reindent all lines.""" + reindenter = Reindenter(source) + return reindenter.run(indent_size) + + +def code_almost_equal(a, b): + """Return True if code is similar. + + Ignore whitespace when comparing specific line. + + """ + split_a = split_and_strip_non_empty_lines(a) + split_b = split_and_strip_non_empty_lines(b) + + if len(split_a) != len(split_b): + return False + + for (index, _) in enumerate(split_a): + if ''.join(split_a[index].split()) != ''.join(split_b[index].split()): + return False + + return True + + +def split_and_strip_non_empty_lines(text): + """Return lines split by newline. + + Ignore empty lines. + + """ + return [line.strip() for line in text.splitlines() if line.strip()] + + +def fix_e265(source, aggressive=False): # pylint: disable=unused-argument + """Format block comments.""" + if '#' not in source: + # Optimization. + return source + + ignored_line_numbers = multiline_string_lines( + source, + include_docstrings=True) | set(commented_out_code_lines(source)) + + fixed_lines = [] + sio = io.StringIO(source) + for (line_number, line) in enumerate(sio.readlines(), start=1): + if ( + line.lstrip().startswith('#') and + line_number not in ignored_line_numbers and + not pycodestyle.noqa(line) + ): + indentation = _get_indentation(line) + line = line.lstrip() + + # Normalize beginning if not a shebang. + if len(line) > 1: + pos = next((index for index, c in enumerate(line) + if c != '#')) + if ( + # Leave multiple spaces like '# ' alone. + (line[:pos].count('#') > 1 or line[1].isalnum() or + not line[1].isspace()) and + line[1] not in ':!' and + # Leave stylistic outlined blocks alone. + not line.rstrip().endswith('#') + ): + line = '# ' + line.lstrip('# \t') + + fixed_lines.append(indentation + line) + else: + fixed_lines.append(line) + + return ''.join(fixed_lines) + + +def refactor(source, fixer_names, ignore=None, filename=''): + """Return refactored code using lib2to3. + + Skip if ignore string is produced in the refactored code. + + """ + from lib2to3 import pgen2 + try: + new_text = refactor_with_2to3(source, + fixer_names=fixer_names, + filename=filename) + except (pgen2.parse.ParseError, + SyntaxError, + UnicodeDecodeError, + UnicodeEncodeError): + return source + + if ignore: + if ignore in new_text and ignore not in source: + return source + + return new_text + + +def code_to_2to3(select, ignore, where='', verbose=False): + fixes = set() + for code, fix in CODE_TO_2TO3.items(): + if code_match(code, select=select, ignore=ignore): + if verbose: + print('---> Applying {} fix for {}'.format(where, + code.upper()), + file=sys.stderr) + fixes |= set(fix) + return fixes + + +def fix_2to3(source, + aggressive=True, select=None, ignore=None, filename='', + where='global', verbose=False): + """Fix various deprecated code (via lib2to3).""" + if not aggressive: + return source + + select = select or [] + ignore = ignore or [] + + return refactor(source, + code_to_2to3(select=select, + ignore=ignore, + where=where, + verbose=verbose), + filename=filename) + + +def fix_w602(source, aggressive=True): + """Fix deprecated form of raising exception.""" + if not aggressive: + return source + + return refactor(source, ['raise'], ignore='with_traceback') + + +def find_newline(source): + """Return type of newline used in source. + + Input is a list of lines. + + """ + assert not isinstance(source, unicode) + + counter = collections.defaultdict(int) + for line in source: + if line.endswith(CRLF): + counter[CRLF] += 1 + elif line.endswith(CR): + counter[CR] += 1 + elif line.endswith(LF): + counter[LF] += 1 + + return (sorted(counter, key=counter.get, reverse=True) or [LF])[0] + + +def _get_indentword(source): + """Return indentation type.""" + indent_word = ' ' # Default in case source has no indentation + try: + for t in generate_tokens(source): + if t[0] == token.INDENT: + indent_word = t[1] + break + except (SyntaxError, tokenize.TokenError): + pass + return indent_word + + +def _get_indentation(line): + """Return leading whitespace.""" + if line.strip(): + non_whitespace_index = len(line) - len(line.lstrip()) + return line[:non_whitespace_index] + + return '' + + +def get_diff_text(old, new, filename): + """Return text of unified diff between old and new.""" + newline = '\n' + diff = difflib.unified_diff( + old, new, + 'original/' + filename, + 'fixed/' + filename, + lineterm=newline) + + text = '' + for line in diff: + text += line + + # Work around missing newline (http://bugs.python.org/issue2142). + if text and not line.endswith(newline): + text += newline + r'\ No newline at end of file' + newline + + return text + + +def _priority_key(pep8_result): + """Key for sorting PEP8 results. + + Global fixes should be done first. This is important for things like + indentation. + + """ + priority = [ + # Fix multiline colon-based before semicolon based. + 'e701', + # Break multiline statements early. + 'e702', + # Things that make lines longer. + 'e225', 'e231', + # Remove extraneous whitespace before breaking lines. + 'e201', + # Shorten whitespace in comment before resorting to wrapping. + 'e262' + ] + middle_index = 10000 + lowest_priority = [ + # We need to shorten lines last since the logical fixer can get in a + # loop, which causes us to exit early. + 'e501', + ] + key = pep8_result['id'].lower() + try: + return priority.index(key) + except ValueError: + try: + return middle_index + lowest_priority.index(key) + 1 + except ValueError: + return middle_index + + +def shorten_line(tokens, source, indentation, indent_word, max_line_length, + aggressive=False, experimental=False, previous_line=''): + """Separate line at OPERATOR. + + Multiple candidates will be yielded. + + """ + for candidate in _shorten_line(tokens=tokens, + source=source, + indentation=indentation, + indent_word=indent_word, + aggressive=aggressive, + previous_line=previous_line): + yield candidate + + if aggressive: + for key_token_strings in SHORTEN_OPERATOR_GROUPS: + shortened = _shorten_line_at_tokens( + tokens=tokens, + source=source, + indentation=indentation, + indent_word=indent_word, + key_token_strings=key_token_strings, + aggressive=aggressive) + + if shortened is not None and shortened != source: + yield shortened + + if experimental: + for shortened in _shorten_line_at_tokens_new( + tokens=tokens, + source=source, + indentation=indentation, + max_line_length=max_line_length): + + yield shortened + + +def _shorten_line(tokens, source, indentation, indent_word, + aggressive=False, previous_line=''): + """Separate line at OPERATOR. + + The input is expected to be free of newlines except for inside multiline + strings and at the end. + + Multiple candidates will be yielded. + + """ + for (token_type, + token_string, + start_offset, + end_offset) in token_offsets(tokens): + + if ( + token_type == tokenize.COMMENT and + not is_probably_part_of_multiline(previous_line) and + not is_probably_part_of_multiline(source) and + not source[start_offset + 1:].strip().lower().startswith( + ('noqa', 'pragma:', 'pylint:')) + ): + # Move inline comments to previous line. + first = source[:start_offset] + second = source[start_offset:] + yield (indentation + second.strip() + '\n' + + indentation + first.strip() + '\n') + elif token_type == token.OP and token_string != '=': + # Don't break on '=' after keyword as this violates PEP 8. + + assert token_type != token.INDENT + + first = source[:end_offset] + + second_indent = indentation + if (first.rstrip().endswith('(') and + source[end_offset:].lstrip().startswith(')')): + pass + elif first.rstrip().endswith('('): + second_indent += indent_word + elif '(' in first: + second_indent += ' ' * (1 + first.find('(')) + else: + second_indent += indent_word + + second = (second_indent + source[end_offset:].lstrip()) + if ( + not second.strip() or + second.lstrip().startswith('#') + ): + continue + + # Do not begin a line with a comma + if second.lstrip().startswith(','): + continue + # Do end a line with a dot + if first.rstrip().endswith('.'): + continue + if token_string in '+-*/': + fixed = first + ' \\' + '\n' + second + else: + fixed = first + '\n' + second + + # Only fix if syntax is okay. + if check_syntax(normalize_multiline(fixed) + if aggressive else fixed): + yield indentation + fixed + + +def _is_binary_operator(token_type, text): + return ((token_type == tokenize.OP or text in ['and', 'or']) and + text not in '()[]{},:.;@=%~') + + +# A convenient way to handle tokens. +Token = collections.namedtuple('Token', ['token_type', 'token_string', + 'spos', 'epos', 'line']) + + +class ReformattedLines(object): + + """The reflowed lines of atoms. + + Each part of the line is represented as an "atom." They can be moved + around when need be to get the optimal formatting. + + """ + + ########################################################################### + # Private Classes + + class _Indent(object): + + """Represent an indentation in the atom stream.""" + + def __init__(self, indent_amt): + self._indent_amt = indent_amt + + def emit(self): + return ' ' * self._indent_amt + + @property + def size(self): + return self._indent_amt + + class _Space(object): + + """Represent a space in the atom stream.""" + + def emit(self): + return ' ' + + @property + def size(self): + return 1 + + class _LineBreak(object): + + """Represent a line break in the atom stream.""" + + def emit(self): + return '\n' + + @property + def size(self): + return 0 + + def __init__(self, max_line_length): + self._max_line_length = max_line_length + self._lines = [] + self._bracket_depth = 0 + self._prev_item = None + self._prev_prev_item = None + + def __repr__(self): + return self.emit() + + ########################################################################### + # Public Methods + + def add(self, obj, indent_amt, break_after_open_bracket): + if isinstance(obj, Atom): + self._add_item(obj, indent_amt) + return + + self._add_container(obj, indent_amt, break_after_open_bracket) + + def add_comment(self, item): + num_spaces = 2 + if len(self._lines) > 1: + if isinstance(self._lines[-1], self._Space): + num_spaces -= 1 + if len(self._lines) > 2: + if isinstance(self._lines[-2], self._Space): + num_spaces -= 1 + + while num_spaces > 0: + self._lines.append(self._Space()) + num_spaces -= 1 + self._lines.append(item) + + def add_indent(self, indent_amt): + self._lines.append(self._Indent(indent_amt)) + + def add_line_break(self, indent): + self._lines.append(self._LineBreak()) + self.add_indent(len(indent)) + + def add_line_break_at(self, index, indent_amt): + self._lines.insert(index, self._LineBreak()) + self._lines.insert(index + 1, self._Indent(indent_amt)) + + def add_space_if_needed(self, curr_text, equal=False): + if ( + not self._lines or isinstance( + self._lines[-1], (self._LineBreak, self._Indent, self._Space)) + ): + return + + prev_text = unicode(self._prev_item) + prev_prev_text = ( + unicode(self._prev_prev_item) if self._prev_prev_item else '') + + if ( + # The previous item was a keyword or identifier and the current + # item isn't an operator that doesn't require a space. + ((self._prev_item.is_keyword or self._prev_item.is_string or + self._prev_item.is_name or self._prev_item.is_number) and + (curr_text[0] not in '([{.,:}])' or + (curr_text[0] == '=' and equal))) or + + # Don't place spaces around a '.', unless it's in an 'import' + # statement. + ((prev_prev_text != 'from' and prev_text[-1] != '.' and + curr_text != 'import') and + + # Don't place a space before a colon. + curr_text[0] != ':' and + + # Don't split up ending brackets by spaces. + ((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or + + # Put a space after a colon or comma. + prev_text[-1] in ':,' or + + # Put space around '=' if asked to. + (equal and prev_text == '=') or + + # Put spaces around non-unary arithmetic operators. + ((self._prev_prev_item and + (prev_text not in '+-' and + (self._prev_prev_item.is_name or + self._prev_prev_item.is_number or + self._prev_prev_item.is_string)) and + prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in'))))) + ): + self._lines.append(self._Space()) + + def previous_item(self): + """Return the previous non-whitespace item.""" + return self._prev_item + + def fits_on_current_line(self, item_extent): + return self.current_size() + item_extent <= self._max_line_length + + def current_size(self): + """The size of the current line minus the indentation.""" + size = 0 + for item in reversed(self._lines): + size += item.size + if isinstance(item, self._LineBreak): + break + + return size + + def line_empty(self): + return (self._lines and + isinstance(self._lines[-1], + (self._LineBreak, self._Indent))) + + def emit(self): + string = '' + for item in self._lines: + if isinstance(item, self._LineBreak): + string = string.rstrip() + string += item.emit() + + return string.rstrip() + '\n' + + ########################################################################### + # Private Methods + + def _add_item(self, item, indent_amt): + """Add an item to the line. + + Reflow the line to get the best formatting after the item is + inserted. The bracket depth indicates if the item is being + inserted inside of a container or not. + + """ + if self._prev_item and self._prev_item.is_string and item.is_string: + # Place consecutive string literals on separate lines. + self._lines.append(self._LineBreak()) + self._lines.append(self._Indent(indent_amt)) + + item_text = unicode(item) + if self._lines and self._bracket_depth: + # Adding the item into a container. + self._prevent_default_initializer_splitting(item, indent_amt) + + if item_text in '.,)]}': + self._split_after_delimiter(item, indent_amt) + + elif self._lines and not self.line_empty(): + # Adding the item outside of a container. + if self.fits_on_current_line(len(item_text)): + self._enforce_space(item) + + else: + # Line break for the new item. + self._lines.append(self._LineBreak()) + self._lines.append(self._Indent(indent_amt)) + + self._lines.append(item) + self._prev_item, self._prev_prev_item = item, self._prev_item + + if item_text in '([{': + self._bracket_depth += 1 + + elif item_text in '}])': + self._bracket_depth -= 1 + assert self._bracket_depth >= 0 + + def _add_container(self, container, indent_amt, break_after_open_bracket): + actual_indent = indent_amt + 1 + + if ( + unicode(self._prev_item) != '=' and + not self.line_empty() and + not self.fits_on_current_line( + container.size + self._bracket_depth + 2) + ): + + if unicode(container)[0] == '(' and self._prev_item.is_name: + # Don't split before the opening bracket of a call. + break_after_open_bracket = True + actual_indent = indent_amt + 4 + elif ( + break_after_open_bracket or + unicode(self._prev_item) not in '([{' + ): + # If the container doesn't fit on the current line and the + # current line isn't empty, place the container on the next + # line. + self._lines.append(self._LineBreak()) + self._lines.append(self._Indent(indent_amt)) + break_after_open_bracket = False + else: + actual_indent = self.current_size() + 1 + break_after_open_bracket = False + + if isinstance(container, (ListComprehension, IfExpression)): + actual_indent = indent_amt + + # Increase the continued indentation only if recursing on a + # container. + container.reflow(self, ' ' * actual_indent, + break_after_open_bracket=break_after_open_bracket) + + def _prevent_default_initializer_splitting(self, item, indent_amt): + """Prevent splitting between a default initializer. + + When there is a default initializer, it's best to keep it all on + the same line. It's nicer and more readable, even if it goes + over the maximum allowable line length. This goes back along the + current line to determine if we have a default initializer, and, + if so, to remove extraneous whitespaces and add a line + break/indent before it if needed. + + """ + if unicode(item) == '=': + # This is the assignment in the initializer. Just remove spaces for + # now. + self._delete_whitespace() + return + + if (not self._prev_item or not self._prev_prev_item or + unicode(self._prev_item) != '='): + return + + self._delete_whitespace() + prev_prev_index = self._lines.index(self._prev_prev_item) + + if ( + isinstance(self._lines[prev_prev_index - 1], self._Indent) or + self.fits_on_current_line(item.size + 1) + ): + # The default initializer is already the only item on this line. + # Don't insert a newline here. + return + + # Replace the space with a newline/indent combo. + if isinstance(self._lines[prev_prev_index - 1], self._Space): + del self._lines[prev_prev_index - 1] + + self.add_line_break_at(self._lines.index(self._prev_prev_item), + indent_amt) + + def _split_after_delimiter(self, item, indent_amt): + """Split the line only after a delimiter.""" + self._delete_whitespace() + + if self.fits_on_current_line(item.size): + return + + last_space = None + for current_item in reversed(self._lines): + if ( + last_space and + (not isinstance(current_item, Atom) or + not current_item.is_colon) + ): + break + else: + last_space = None + if isinstance(current_item, self._Space): + last_space = current_item + if isinstance(current_item, (self._LineBreak, self._Indent)): + return + + if not last_space: + return + + self.add_line_break_at(self._lines.index(last_space), indent_amt) + + def _enforce_space(self, item): + """Enforce a space in certain situations. + + There are cases where we will want a space where normally we + wouldn't put one. This just enforces the addition of a space. + + """ + if isinstance(self._lines[-1], + (self._Space, self._LineBreak, self._Indent)): + return + + if not self._prev_item: + return + + item_text = unicode(item) + prev_text = unicode(self._prev_item) + + # Prefer a space around a '.' in an import statement, and between the + # 'import' and '('. + if ( + (item_text == '.' and prev_text == 'from') or + (item_text == 'import' and prev_text == '.') or + (item_text == '(' and prev_text == 'import') + ): + self._lines.append(self._Space()) + + def _delete_whitespace(self): + """Delete all whitespace from the end of the line.""" + while isinstance(self._lines[-1], (self._Space, self._LineBreak, + self._Indent)): + del self._lines[-1] + + +class Atom(object): + + """The smallest unbreakable unit that can be reflowed.""" + + def __init__(self, atom): + self._atom = atom + + def __repr__(self): + return self._atom.token_string + + def __len__(self): + return self.size + + def reflow( + self, reflowed_lines, continued_indent, extent, + break_after_open_bracket=False, + is_list_comp_or_if_expr=False, + next_is_dot=False + ): + if self._atom.token_type == tokenize.COMMENT: + reflowed_lines.add_comment(self) + return + + total_size = extent if extent else self.size + + if self._atom.token_string not in ',:([{}])': + # Some atoms will need an extra 1-sized space token after them. + total_size += 1 + + prev_item = reflowed_lines.previous_item() + if ( + not is_list_comp_or_if_expr and + not reflowed_lines.fits_on_current_line(total_size) and + not (next_is_dot and + reflowed_lines.fits_on_current_line(self.size + 1)) and + not reflowed_lines.line_empty() and + not self.is_colon and + not (prev_item and prev_item.is_name and + unicode(self) == '(') + ): + # Start a new line if there is already something on the line and + # adding this atom would make it go over the max line length. + reflowed_lines.add_line_break(continued_indent) + else: + reflowed_lines.add_space_if_needed(unicode(self)) + + reflowed_lines.add(self, len(continued_indent), + break_after_open_bracket) + + def emit(self): + return self.__repr__() + + @property + def is_keyword(self): + return keyword.iskeyword(self._atom.token_string) + + @property + def is_string(self): + return self._atom.token_type == tokenize.STRING + + @property + def is_name(self): + return self._atom.token_type == tokenize.NAME + + @property + def is_number(self): + return self._atom.token_type == tokenize.NUMBER + + @property + def is_comma(self): + return self._atom.token_string == ',' + + @property + def is_colon(self): + return self._atom.token_string == ':' + + @property + def size(self): + return len(self._atom.token_string) + + +class Container(object): + + """Base class for all container types.""" + + def __init__(self, items): + self._items = items + + def __repr__(self): + string = '' + last_was_keyword = False + + for item in self._items: + if item.is_comma: + string += ', ' + elif item.is_colon: + string += ': ' + else: + item_string = unicode(item) + if ( + string and + (last_was_keyword or + (not string.endswith(tuple('([{,.:}]) ')) and + not item_string.startswith(tuple('([{,.:}])')))) + ): + string += ' ' + string += item_string + + last_was_keyword = item.is_keyword + return string + + def __iter__(self): + for element in self._items: + yield element + + def __getitem__(self, idx): + return self._items[idx] + + def reflow(self, reflowed_lines, continued_indent, + break_after_open_bracket=False): + last_was_container = False + for (index, item) in enumerate(self._items): + next_item = get_item(self._items, index + 1) + + if isinstance(item, Atom): + is_list_comp_or_if_expr = ( + isinstance(self, (ListComprehension, IfExpression))) + item.reflow(reflowed_lines, continued_indent, + self._get_extent(index), + is_list_comp_or_if_expr=is_list_comp_or_if_expr, + next_is_dot=(next_item and + unicode(next_item) == '.')) + if last_was_container and item.is_comma: + reflowed_lines.add_line_break(continued_indent) + last_was_container = False + else: # isinstance(item, Container) + reflowed_lines.add(item, len(continued_indent), + break_after_open_bracket) + last_was_container = not isinstance(item, (ListComprehension, + IfExpression)) + + if ( + break_after_open_bracket and index == 0 and + # Prefer to keep empty containers together instead of + # separating them. + unicode(item) == self.open_bracket and + (not next_item or unicode(next_item) != self.close_bracket) and + (len(self._items) != 3 or not isinstance(next_item, Atom)) + ): + reflowed_lines.add_line_break(continued_indent) + break_after_open_bracket = False + else: + next_next_item = get_item(self._items, index + 2) + if ( + unicode(item) not in ['.', '%', 'in'] and + next_item and not isinstance(next_item, Container) and + unicode(next_item) != ':' and + next_next_item and (not isinstance(next_next_item, Atom) or + unicode(next_item) == 'not') and + not reflowed_lines.line_empty() and + not reflowed_lines.fits_on_current_line( + self._get_extent(index + 1) + 2) + ): + reflowed_lines.add_line_break(continued_indent) + + def _get_extent(self, index): + """The extent of the full element. + + E.g., the length of a function call or keyword. + + """ + extent = 0 + prev_item = get_item(self._items, index - 1) + seen_dot = prev_item and unicode(prev_item) == '.' + while index < len(self._items): + item = get_item(self._items, index) + index += 1 + + if isinstance(item, (ListComprehension, IfExpression)): + break + + if isinstance(item, Container): + if prev_item and prev_item.is_name: + if seen_dot: + extent += 1 + else: + extent += item.size + + prev_item = item + continue + elif (unicode(item) not in ['.', '=', ':', 'not'] and + not item.is_name and not item.is_string): + break + + if unicode(item) == '.': + seen_dot = True + + extent += item.size + prev_item = item + + return extent + + @property + def is_string(self): + return False + + @property + def size(self): + return len(self.__repr__()) + + @property + def is_keyword(self): + return False + + @property + def is_name(self): + return False + + @property + def is_comma(self): + return False + + @property + def is_colon(self): + return False + + @property + def open_bracket(self): + return None + + @property + def close_bracket(self): + return None + + +class Tuple(Container): + + """A high-level representation of a tuple.""" + + @property + def open_bracket(self): + return '(' + + @property + def close_bracket(self): + return ')' + + +class List(Container): + + """A high-level representation of a list.""" + + @property + def open_bracket(self): + return '[' + + @property + def close_bracket(self): + return ']' + + +class DictOrSet(Container): + + """A high-level representation of a dictionary or set.""" + + @property + def open_bracket(self): + return '{' + + @property + def close_bracket(self): + return '}' + + +class ListComprehension(Container): + + """A high-level representation of a list comprehension.""" + + @property + def size(self): + length = 0 + for item in self._items: + if isinstance(item, IfExpression): + break + length += item.size + return length + + +class IfExpression(Container): + + """A high-level representation of an if-expression.""" + + +def _parse_container(tokens, index, for_or_if=None): + """Parse a high-level container, such as a list, tuple, etc.""" + + # Store the opening bracket. + items = [Atom(Token(*tokens[index]))] + index += 1 + + num_tokens = len(tokens) + while index < num_tokens: + tok = Token(*tokens[index]) + + if tok.token_string in ',)]}': + # First check if we're at the end of a list comprehension or + # if-expression. Don't add the ending token as part of the list + # comprehension or if-expression, because they aren't part of those + # constructs. + if for_or_if == 'for': + return (ListComprehension(items), index - 1) + + elif for_or_if == 'if': + return (IfExpression(items), index - 1) + + # We've reached the end of a container. + items.append(Atom(tok)) + + # If not, then we are at the end of a container. + if tok.token_string == ')': + # The end of a tuple. + return (Tuple(items), index) + + elif tok.token_string == ']': + # The end of a list. + return (List(items), index) + + elif tok.token_string == '}': + # The end of a dictionary or set. + return (DictOrSet(items), index) + + elif tok.token_string in '([{': + # A sub-container is being defined. + (container, index) = _parse_container(tokens, index) + items.append(container) + + elif tok.token_string == 'for': + (container, index) = _parse_container(tokens, index, 'for') + items.append(container) + + elif tok.token_string == 'if': + (container, index) = _parse_container(tokens, index, 'if') + items.append(container) + + else: + items.append(Atom(tok)) + + index += 1 + + return (None, None) + + +def _parse_tokens(tokens): + """Parse the tokens. + + This converts the tokens into a form where we can manipulate them + more easily. + + """ + + index = 0 + parsed_tokens = [] + + num_tokens = len(tokens) + while index < num_tokens: + tok = Token(*tokens[index]) + + assert tok.token_type != token.INDENT + if tok.token_type == tokenize.NEWLINE: + # There's only one newline and it's at the end. + break + + if tok.token_string in '([{': + (container, index) = _parse_container(tokens, index) + if not container: + return None + parsed_tokens.append(container) + else: + parsed_tokens.append(Atom(tok)) + + index += 1 + + return parsed_tokens + + +def _reflow_lines(parsed_tokens, indentation, max_line_length, + start_on_prefix_line): + """Reflow the lines so that it looks nice.""" + + if unicode(parsed_tokens[0]) == 'def': + # A function definition gets indented a bit more. + continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE + else: + continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE + + break_after_open_bracket = not start_on_prefix_line + + lines = ReformattedLines(max_line_length) + lines.add_indent(len(indentation.lstrip('\r\n'))) + + if not start_on_prefix_line: + # If splitting after the opening bracket will cause the first element + # to be aligned weirdly, don't try it. + first_token = get_item(parsed_tokens, 0) + second_token = get_item(parsed_tokens, 1) + + if ( + first_token and second_token and + unicode(second_token)[0] == '(' and + len(indentation) + len(first_token) + 1 == len(continued_indent) + ): + return None + + for item in parsed_tokens: + lines.add_space_if_needed(unicode(item), equal=True) + + save_continued_indent = continued_indent + if start_on_prefix_line and isinstance(item, Container): + start_on_prefix_line = False + continued_indent = ' ' * (lines.current_size() + 1) + + item.reflow(lines, continued_indent, break_after_open_bracket) + continued_indent = save_continued_indent + + return lines.emit() + + +def _shorten_line_at_tokens_new(tokens, source, indentation, + max_line_length): + """Shorten the line taking its length into account. + + The input is expected to be free of newlines except for inside + multiline strings and at the end. + + """ + # Yield the original source so to see if it's a better choice than the + # shortened candidate lines we generate here. + yield indentation + source + + parsed_tokens = _parse_tokens(tokens) + + if parsed_tokens: + # Perform two reflows. The first one starts on the same line as the + # prefix. The second starts on the line after the prefix. + fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, + start_on_prefix_line=True) + if fixed and check_syntax(normalize_multiline(fixed.lstrip())): + yield fixed + + fixed = _reflow_lines(parsed_tokens, indentation, max_line_length, + start_on_prefix_line=False) + if fixed and check_syntax(normalize_multiline(fixed.lstrip())): + yield fixed + + +def _shorten_line_at_tokens(tokens, source, indentation, indent_word, + key_token_strings, aggressive): + """Separate line by breaking at tokens in key_token_strings. + + The input is expected to be free of newlines except for inside + multiline strings and at the end. + + """ + offsets = [] + for (index, _t) in enumerate(token_offsets(tokens)): + (token_type, + token_string, + start_offset, + end_offset) = _t + + assert token_type != token.INDENT + + if token_string in key_token_strings: + # Do not break in containers with zero or one items. + unwanted_next_token = { + '(': ')', + '[': ']', + '{': '}'}.get(token_string) + if unwanted_next_token: + if ( + get_item(tokens, + index + 1, + default=[None, None])[1] == unwanted_next_token or + get_item(tokens, + index + 2, + default=[None, None])[1] == unwanted_next_token + ): + continue + + if ( + index > 2 and token_string == '(' and + tokens[index - 1][1] in ',(%[' + ): + # Don't split after a tuple start, or before a tuple start if + # the tuple is in a list. + continue + + if end_offset < len(source) - 1: + # Don't split right before newline. + offsets.append(end_offset) + else: + # Break at adjacent strings. These were probably meant to be on + # separate lines in the first place. + previous_token = get_item(tokens, index - 1) + if ( + token_type == tokenize.STRING and + previous_token and previous_token[0] == tokenize.STRING + ): + offsets.append(start_offset) + + current_indent = None + fixed = None + for line in split_at_offsets(source, offsets): + if fixed: + fixed += '\n' + current_indent + line + + for symbol in '([{': + if line.endswith(symbol): + current_indent += indent_word + else: + # First line. + fixed = line + assert not current_indent + current_indent = indent_word + + assert fixed is not None + + if check_syntax(normalize_multiline(fixed) + if aggressive > 1 else fixed): + return indentation + fixed + + return None + + +def token_offsets(tokens): + """Yield tokens and offsets.""" + end_offset = 0 + previous_end_row = 0 + previous_end_column = 0 + for t in tokens: + token_type = t[0] + token_string = t[1] + (start_row, start_column) = t[2] + (end_row, end_column) = t[3] + + # Account for the whitespace between tokens. + end_offset += start_column + if previous_end_row == start_row: + end_offset -= previous_end_column + + # Record the start offset of the token. + start_offset = end_offset + + # Account for the length of the token itself. + end_offset += len(token_string) + + yield (token_type, + token_string, + start_offset, + end_offset) + + previous_end_row = end_row + previous_end_column = end_column + + +def normalize_multiline(line): + """Normalize multiline-related code that will cause syntax error. + + This is for purposes of checking syntax. + + """ + if line.startswith('def ') and line.rstrip().endswith(':'): + return line + ' pass' + elif line.startswith('return '): + return 'def _(): ' + line + elif line.startswith('@'): + return line + 'def _(): pass' + elif line.startswith('class '): + return line + ' pass' + elif line.startswith(('if ', 'elif ', 'for ', 'while ')): + return line + ' pass' + + return line + + +def fix_whitespace(line, offset, replacement): + """Replace whitespace at offset and return fixed line.""" + # Replace escaped newlines too + left = line[:offset].rstrip('\n\r \t\\') + right = line[offset:].lstrip('\n\r \t\\') + if right.startswith('#'): + return line + + return left + replacement + right + + +def _execute_pep8(pep8_options, source): + """Execute pycodestyle via python method calls.""" + class QuietReport(pycodestyle.BaseReport): + + """Version of checker that does not print.""" + + def __init__(self, options): + super(QuietReport, self).__init__(options) + self.__full_error_results = [] + + def error(self, line_number, offset, text, check): + """Collect errors.""" + code = super(QuietReport, self).error(line_number, + offset, + text, + check) + if code: + self.__full_error_results.append( + {'id': code, + 'line': line_number, + 'column': offset + 1, + 'info': text}) + + def full_error_results(self): + """Return error results in detail. + + Results are in the form of a list of dictionaries. Each + dictionary contains 'id', 'line', 'column', and 'info'. + + """ + return self.__full_error_results + + checker = pycodestyle.Checker('', lines=source, reporter=QuietReport, + **pep8_options) + checker.check_all() + return checker.report.full_error_results() + + +def _remove_leading_and_normalize(line): + # ignore FF in first lstrip() + return line.lstrip(' \t\v').rstrip(CR + LF) + '\n' + + +class Reindenter(object): + + """Reindents badly-indented code to uniformly use four-space indentation. + + Released to the public domain, by Tim Peters, 03 October 2000. + + """ + + def __init__(self, input_text): + sio = io.StringIO(input_text) + source_lines = sio.readlines() + + self.string_content_line_numbers = multiline_string_lines(input_text) + + # File lines, rstripped & tab-expanded. Dummy at start is so + # that we can use tokenize's 1-based line numbering easily. + # Note that a line is all-blank iff it is a newline. + self.lines = [] + for line_number, line in enumerate(source_lines, start=1): + # Do not modify if inside a multiline string. + if line_number in self.string_content_line_numbers: + self.lines.append(line) + else: + # Only expand leading tabs. + self.lines.append(_get_indentation(line).expandtabs() + + _remove_leading_and_normalize(line)) + + self.lines.insert(0, None) + self.index = 1 # index into self.lines of next line + self.input_text = input_text + + def run(self, indent_size=DEFAULT_INDENT_SIZE): + """Fix indentation and return modified line numbers. + + Line numbers are indexed at 1. + + """ + if indent_size < 1: + return self.input_text + + try: + stats = _reindent_stats(tokenize.generate_tokens(self.getline)) + except (SyntaxError, tokenize.TokenError): + return self.input_text + # Remove trailing empty lines. + lines = self.lines + # Sentinel. + stats.append((len(lines), 0)) + # Map count of leading spaces to # we want. + have2want = {} + # Program after transformation. + after = [] + # Copy over initial empty lines -- there's nothing to do until + # we see a line with *something* on it. + i = stats[0][0] + after.extend(lines[1:i]) + for i in range(len(stats) - 1): + thisstmt, thislevel = stats[i] + nextstmt = stats[i + 1][0] + have = _leading_space_count(lines[thisstmt]) + want = thislevel * indent_size + if want < 0: + # A comment line. + if have: + # An indented comment line. If we saw the same + # indentation before, reuse what it most recently + # mapped to. + want = have2want.get(have, -1) + if want < 0: + # Then it probably belongs to the next real stmt. + for j in range(i + 1, len(stats) - 1): + jline, jlevel = stats[j] + if jlevel >= 0: + if have == _leading_space_count(lines[jline]): + want = jlevel * indent_size + break + # Maybe it's a hanging comment like this one, + if want < 0: + # in which case we should shift it like its base + # line got shifted. + for j in range(i - 1, -1, -1): + jline, jlevel = stats[j] + if jlevel >= 0: + want = (have + _leading_space_count( + after[jline - 1]) - + _leading_space_count(lines[jline])) + break + if want < 0: + # Still no luck -- leave it alone. + want = have + else: + want = 0 + assert want >= 0 + have2want[have] = want + diff = want - have + if diff == 0 or have == 0: + after.extend(lines[thisstmt:nextstmt]) + else: + for line_number, line in enumerate(lines[thisstmt:nextstmt], + start=thisstmt): + if line_number in self.string_content_line_numbers: + after.append(line) + elif diff > 0: + if line == '\n': + after.append(line) + else: + after.append(' ' * diff + line) + else: + remove = min(_leading_space_count(line), -diff) + after.append(line[remove:]) + + return ''.join(after) + + def getline(self): + """Line-getter for tokenize.""" + if self.index >= len(self.lines): + line = '' + else: + line = self.lines[self.index] + self.index += 1 + return line + + +def _reindent_stats(tokens): + """Return list of (lineno, indentlevel) pairs. + + One for each stmt and comment line. indentlevel is -1 for comment + lines, as a signal that tokenize doesn't know what to do about them; + indeed, they're our headache! + + """ + find_stmt = 1 # Next token begins a fresh stmt? + level = 0 # Current indent level. + stats = [] + + for t in tokens: + token_type = t[0] + sline = t[2][0] + line = t[4] + + if token_type == tokenize.NEWLINE: + # A program statement, or ENDMARKER, will eventually follow, + # after some (possibly empty) run of tokens of the form + # (NL | COMMENT)* (INDENT | DEDENT+)? + find_stmt = 1 + + elif token_type == tokenize.INDENT: + find_stmt = 1 + level += 1 + + elif token_type == tokenize.DEDENT: + find_stmt = 1 + level -= 1 + + elif token_type == tokenize.COMMENT: + if find_stmt: + stats.append((sline, -1)) + # But we're still looking for a new stmt, so leave + # find_stmt alone. + + elif token_type == tokenize.NL: + pass + + elif find_stmt: + # This is the first "real token" following a NEWLINE, so it + # must be the first token of the next program statement, or an + # ENDMARKER. + find_stmt = 0 + if line: # Not endmarker. + stats.append((sline, level)) + + return stats + + +def _leading_space_count(line): + """Return number of leading spaces in line.""" + i = 0 + while i < len(line) and line[i] == ' ': + i += 1 + return i + + +def refactor_with_2to3(source_text, fixer_names, filename=''): + """Use lib2to3 to refactor the source. + + Return the refactored source code. + + """ + from lib2to3.refactor import RefactoringTool + fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names] + tool = RefactoringTool(fixer_names=fixers, explicit=fixers) + + from lib2to3.pgen2 import tokenize as lib2to3_tokenize + try: + # The name parameter is necessary particularly for the "import" fixer. + return unicode(tool.refactor_string(source_text, name=filename)) + except lib2to3_tokenize.TokenError: + return source_text + + +def check_syntax(code): + """Return True if syntax is okay.""" + try: + return compile(code, '', 'exec', dont_inherit=True) + except (SyntaxError, TypeError, ValueError): + return False + + +def find_with_line_numbers(pattern, contents): + """A wrapper around 're.finditer' to find line numbers. + + Returns a list of line numbers where pattern was found in contents. + """ + matches = list(re.finditer(pattern, contents)) + if not matches: + return [] + + end = matches[-1].start() + + # -1 so a failed `rfind` maps to the first line. + newline_offsets = { + -1: 0 + } + for line_num, m in enumerate(re.finditer(r'\n', contents), 1): + offset = m.start() + if offset > end: + break + newline_offsets[offset] = line_num + + def get_line_num(match, contents): + """Get the line number of string in a files contents. + + Failing to find the newline is OK, -1 maps to 0 + + """ + newline_offset = contents.rfind('\n', 0, match.start()) + return newline_offsets[newline_offset] + + return [get_line_num(match, contents) + 1 for match in matches] + + +def get_disabled_ranges(source): + """Returns a list of tuples representing the disabled ranges. + + If disabled and no re-enable will disable for rest of file. + + """ + enable_line_nums = find_with_line_numbers(ENABLE_REGEX, source) + disable_line_nums = find_with_line_numbers(DISABLE_REGEX, source) + total_lines = len(re.findall("\n", source)) + 1 + + enable_commands = {} + for num in enable_line_nums: + enable_commands[num] = True + for num in disable_line_nums: + enable_commands[num] = False + + disabled_ranges = [] + currently_enabled = True + disabled_start = None + + for line, commanded_enabled in sorted(enable_commands.items()): + if currently_enabled is True and commanded_enabled is False: + disabled_start = line + currently_enabled = False + elif currently_enabled is False and commanded_enabled is True: + disabled_ranges.append((disabled_start, line)) + currently_enabled = True + + if currently_enabled is False: + disabled_ranges.append((disabled_start, total_lines)) + + return disabled_ranges + + +def filter_results(source, results, aggressive): + """Filter out spurious reports from pycodestyle. + + If aggressive is True, we allow possibly unsafe fixes (E711, E712). + + """ + non_docstring_string_line_numbers = multiline_string_lines( + source, include_docstrings=False) + all_string_line_numbers = multiline_string_lines( + source, include_docstrings=True) + + commented_out_code_line_numbers = commented_out_code_lines(source) + + # Filter out the disabled ranges + disabled_ranges = get_disabled_ranges(source) + if len(disabled_ranges) > 0: + results = [result for result in results + if any(result['line'] not in range(*disabled_range) + for disabled_range in disabled_ranges) + ] + + has_e901 = any(result['id'].lower() == 'e901' for result in results) + + for r in results: + issue_id = r['id'].lower() + + if r['line'] in non_docstring_string_line_numbers: + if issue_id.startswith(('e1', 'e501', 'w191')): + continue + + if r['line'] in all_string_line_numbers: + if issue_id in ['e501']: + continue + + # We must offset by 1 for lines that contain the trailing contents of + # multiline strings. + if not aggressive and (r['line'] + 1) in all_string_line_numbers: + # Do not modify multiline strings in non-aggressive mode. Remove + # trailing whitespace could break doctests. + if issue_id.startswith(('w29', 'w39')): + continue + + if aggressive <= 0: + if issue_id.startswith(('e711', 'e72', 'w6')): + continue + + if aggressive <= 1: + if issue_id.startswith(('e712', 'e713', 'e714')): + continue + + if aggressive <= 2: + if issue_id.startswith(('e704')): + continue + + if r['line'] in commented_out_code_line_numbers: + if issue_id.startswith(('e26', 'e501')): + continue + + # Do not touch indentation if there is a token error caused by + # incomplete multi-line statement. Otherwise, we risk screwing up the + # indentation. + if has_e901: + if issue_id.startswith(('e1', 'e7')): + continue + + yield r + + +def multiline_string_lines(source, include_docstrings=False): + """Return line numbers that are within multiline strings. + + The line numbers are indexed at 1. + + Docstrings are ignored. + + """ + line_numbers = set() + previous_token_type = '' + try: + for t in generate_tokens(source): + token_type = t[0] + start_row = t[2][0] + end_row = t[3][0] + + if token_type == tokenize.STRING and start_row != end_row: + if ( + include_docstrings or + previous_token_type != tokenize.INDENT + ): + # We increment by one since we want the contents of the + # string. + line_numbers |= set(range(1 + start_row, 1 + end_row)) + + previous_token_type = token_type + except (SyntaxError, tokenize.TokenError): + pass + + return line_numbers + + +def commented_out_code_lines(source): + """Return line numbers of comments that are likely code. + + Commented-out code is bad practice, but modifying it just adds even + more clutter. + + """ + line_numbers = [] + try: + for t in generate_tokens(source): + token_type = t[0] + token_string = t[1] + start_row = t[2][0] + line = t[4] + + # Ignore inline comments. + if not line.lstrip().startswith('#'): + continue + + if token_type == tokenize.COMMENT: + stripped_line = token_string.lstrip('#').strip() + with warnings.catch_warnings(): + # ignore SyntaxWarning in Python3.8+ + # refs: + # https://bugs.python.org/issue15248 + # https://docs.python.org/3.8/whatsnew/3.8.html#other-language-changes + warnings.filterwarnings("ignore", category=SyntaxWarning) + if ( + ' ' in stripped_line and + '#' not in stripped_line and + check_syntax(stripped_line) + ): + line_numbers.append(start_row) + except (SyntaxError, tokenize.TokenError): + pass + + return line_numbers + + +def shorten_comment(line, max_line_length, last_comment=False): + """Return trimmed or split long comment line. + + If there are no comments immediately following it, do a text wrap. + Doing this wrapping on all comments in general would lead to jagged + comment text. + + """ + assert len(line) > max_line_length + line = line.rstrip() + + # PEP 8 recommends 72 characters for comment text. + indentation = _get_indentation(line) + '# ' + max_line_length = min(max_line_length, + len(indentation) + 72) + + MIN_CHARACTER_REPEAT = 5 + if ( + len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and + not line[-1].isalnum() + ): + # Trim comments that end with things like --------- + return line[:max_line_length] + '\n' + elif last_comment and re.match(r'\s*#+\s*\w+', line): + split_lines = textwrap.wrap(line.lstrip(' \t#'), + initial_indent=indentation, + subsequent_indent=indentation, + width=max_line_length, + break_long_words=False, + break_on_hyphens=False) + return '\n'.join(split_lines) + '\n' + + return line + '\n' + + +def normalize_line_endings(lines, newline): + """Return fixed line endings. + + All lines will be modified to use the most common line ending. + + """ + return [line.rstrip('\n\r') + newline for line in lines] + + +def mutual_startswith(a, b): + return b.startswith(a) or a.startswith(b) + + +def code_match(code, select, ignore): + if ignore: + assert not isinstance(ignore, unicode) + for ignored_code in [c.strip() for c in ignore]: + if mutual_startswith(code.lower(), ignored_code.lower()): + return False + + if select: + assert not isinstance(select, unicode) + for selected_code in [c.strip() for c in select]: + if mutual_startswith(code.lower(), selected_code.lower()): + return True + return False + + return True + + +def fix_code(source, options=None, encoding=None, apply_config=False): + """Return fixed source code. + + "encoding" will be used to decode "source" if it is a byte string. + + """ + options = _get_options(options, apply_config) + + if not isinstance(source, unicode): + source = source.decode(encoding or get_encoding()) + + sio = io.StringIO(source) + return fix_lines(sio.readlines(), options=options) + + +def _get_options(raw_options, apply_config): + """Return parsed options.""" + if not raw_options: + return parse_args([''], apply_config=apply_config) + + if isinstance(raw_options, dict): + options = parse_args([''], apply_config=apply_config) + for name, value in raw_options.items(): + if not hasattr(options, name): + raise ValueError("No such option '{}'".format(name)) + + # Check for very basic type errors. + expected_type = type(getattr(options, name)) + if not isinstance(expected_type, (str, unicode)): + if isinstance(value, (str, unicode)): + raise ValueError( + "Option '{}' should not be a string".format(name)) + setattr(options, name, value) + else: + options = raw_options + + return options + + +def fix_lines(source_lines, options, filename=''): + """Return fixed source code.""" + # Transform everything to line feed. Then change them back to original + # before returning fixed source code. + original_newline = find_newline(source_lines) + tmp_source = ''.join(normalize_line_endings(source_lines, '\n')) + + # Keep a history to break out of cycles. + previous_hashes = set() + + if options.line_range: + # Disable "apply_local_fixes()" for now due to issue #175. + fixed_source = tmp_source + else: + pep8_options = { + 'ignore': options.ignore, + 'select': options.select, + 'max_line_length': options.max_line_length, + 'hang_closing': options.hang_closing, + } + sio = io.StringIO(tmp_source) + contents = sio.readlines() + results = _execute_pep8(pep8_options, contents) + codes = {result['id'] for result in results + if result['id'] in SELECTED_GLOBAL_FIXED_METHOD_CODES} + # Apply global fixes only once (for efficiency). + fixed_source = apply_global_fixes(tmp_source, + options, + filename=filename, + codes=codes) + + passes = 0 + long_line_ignore_cache = set() + while hash(fixed_source) not in previous_hashes: + if options.pep8_passes >= 0 and passes > options.pep8_passes: + break + passes += 1 + + previous_hashes.add(hash(fixed_source)) + + tmp_source = copy.copy(fixed_source) + + fix = FixPEP8( + filename, + options, + contents=tmp_source, + long_line_ignore_cache=long_line_ignore_cache) + + fixed_source = fix.fix() + + sio = io.StringIO(fixed_source) + return ''.join(normalize_line_endings(sio.readlines(), original_newline)) + + +def fix_file(filename, options=None, output=None, apply_config=False): + if not options: + options = parse_args([filename], apply_config=apply_config) + + original_source = readlines_from_file(filename) + + fixed_source = original_source + + if options.in_place or options.diff or output: + encoding = detect_encoding(filename) + + if output: + output = LineEndingWrapper(wrap_output(output, encoding=encoding)) + + fixed_source = fix_lines(fixed_source, options, filename=filename) + + if options.diff: + new = io.StringIO(fixed_source) + new = new.readlines() + diff = get_diff_text(original_source, new, filename) + if output: + output.write(diff) + output.flush() + elif options.jobs > 1: + diff = diff.encode(encoding) + return diff + elif options.in_place: + original = "".join(original_source).splitlines() + fixed = fixed_source.splitlines() + original_source_last_line = ( + original_source[-1].split("\n")[-1] if original_source else "" + ) + fixed_source_last_line = fixed_source.split("\n")[-1] + if original != fixed or ( + original_source_last_line != fixed_source_last_line + ): + with open_with_encoding(filename, 'w', encoding=encoding) as fp: + fp.write(fixed_source) + return fixed_source + return None + else: + if output: + output.write(fixed_source) + output.flush() + return fixed_source + + +def global_fixes(): + """Yield multiple (code, function) tuples.""" + for function in list(globals().values()): + if inspect.isfunction(function): + arguments = _get_parameters(function) + if arguments[:1] != ['source']: + continue + + code = extract_code_from_function(function) + if code: + yield (code, function) + + +def _get_parameters(function): + # pylint: disable=deprecated-method + if sys.version_info.major >= 3: + # We need to match "getargspec()", which includes "self" as the first + # value for methods. + # https://bugs.python.org/issue17481#msg209469 + if inspect.ismethod(function): + function = function.__func__ + + return list(inspect.signature(function).parameters) + else: + return inspect.getargspec(function)[0] + + +def apply_global_fixes(source, options, where='global', filename='', + codes=None): + """Run global fixes on source code. + + These are fixes that only need be done once (unlike those in + FixPEP8, which are dependent on pycodestyle). + + """ + if codes is None: + codes = [] + if any(code_match(code, select=options.select, ignore=options.ignore) + for code in ['E101', 'E111']): + source = reindent(source, + indent_size=options.indent_size) + + for (code, function) in global_fixes(): + if code.upper() in SELECTED_GLOBAL_FIXED_METHOD_CODES \ + and code.upper() not in codes: + continue + if code_match(code, select=options.select, ignore=options.ignore): + if options.verbose: + print('---> Applying {} fix for {}'.format(where, + code.upper()), + file=sys.stderr) + source = function(source, + aggressive=options.aggressive) + + source = fix_2to3(source, + aggressive=options.aggressive, + select=options.select, + ignore=options.ignore, + filename=filename, + where=where, + verbose=options.verbose) + + return source + + +def extract_code_from_function(function): + """Return code handled by function.""" + if not function.__name__.startswith('fix_'): + return None + + code = re.sub('^fix_', '', function.__name__) + if not code: + return None + + try: + int(code[1:]) + except ValueError: + return None + + return code + + +def _get_package_version(): + packages = ["pycodestyle: {}".format(pycodestyle.__version__)] + return ", ".join(packages) + + +def create_parser(): + """Return command-line parser.""" + parser = argparse.ArgumentParser(description=docstring_summary(__doc__), + prog='autopep8') + parser.add_argument('--version', action='version', + version='%(prog)s {} ({})'.format( + __version__, _get_package_version())) + parser.add_argument('-v', '--verbose', action='count', + default=0, + help='print verbose messages; ' + 'multiple -v result in more verbose messages') + parser.add_argument('-d', '--diff', action='store_true', + help='print the diff for the fixed source') + parser.add_argument('-i', '--in-place', action='store_true', + help='make changes to files in place') + parser.add_argument('--global-config', metavar='filename', + default=DEFAULT_CONFIG, + help='path to a global pep8 config file; if this file ' + 'does not exist then this is ignored ' + '(default: {})'.format(DEFAULT_CONFIG)) + parser.add_argument('--ignore-local-config', action='store_true', + help="don't look for and apply local config files; " + 'if not passed, defaults are updated with any ' + "config files in the project's root directory") + parser.add_argument('-r', '--recursive', action='store_true', + help='run recursively over directories; ' + 'must be used with --in-place or --diff') + parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1, + help='number of parallel jobs; ' + 'match CPU count if value is less than 1') + parser.add_argument('-p', '--pep8-passes', metavar='n', + default=-1, type=int, + help='maximum number of additional pep8 passes ' + '(default: infinite)') + parser.add_argument('-a', '--aggressive', action='count', default=0, + help='enable non-whitespace changes; ' + 'multiple -a result in more aggressive changes') + parser.add_argument('--experimental', action='store_true', + help='enable experimental fixes') + parser.add_argument('--exclude', metavar='globs', + help='exclude file/directory names that match these ' + 'comma-separated globs') + parser.add_argument('--list-fixes', action='store_true', + help='list codes for fixes; ' + 'used by --ignore and --select') + parser.add_argument('--ignore', metavar='errors', default='', + help='do not fix these errors/warnings ' + '(default: {})'.format(DEFAULT_IGNORE)) + parser.add_argument('--select', metavar='errors', default='', + help='fix only these errors/warnings (e.g. E4,W)') + parser.add_argument('--max-line-length', metavar='n', default=79, type=int, + help='set maximum allowed line length ' + '(default: %(default)s)') + parser.add_argument('--line-range', '--range', metavar='line', + default=None, type=int, nargs=2, + help='only fix errors found within this inclusive ' + 'range of line numbers (e.g. 1 99); ' + 'line numbers are indexed at 1') + parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE, + type=int, help=argparse.SUPPRESS) + parser.add_argument('--hang-closing', action='store_true', + help='hang-closing option passed to pycodestyle') + parser.add_argument('--exit-code', action='store_true', + help='change to behavior of exit code.' + ' default behavior of return value, 0 is no ' + 'differences, 1 is error exit. return 2 when' + ' add this option. 2 is exists differences.') + parser.add_argument('files', nargs='*', + help="files to format or '-' for standard in") + + return parser + + +def _expand_codes(codes, ignore_codes): + """expand to individual E/W codes""" + ret = set() + + is_conflict = False + if all( + any( + conflicting_code.startswith(code) + for code in codes + ) + for conflicting_code in CONFLICTING_CODES + ): + is_conflict = True + + is_ignore_w503 = "W503" in ignore_codes + is_ignore_w504 = "W504" in ignore_codes + + for code in codes: + if code == "W": + if is_ignore_w503 and is_ignore_w504: + ret.update({"W1", "W2", "W3", "W505", "W6"}) + elif is_ignore_w503: + ret.update({"W1", "W2", "W3", "W504", "W505", "W6"}) + else: + ret.update({"W1", "W2", "W3", "W503", "W505", "W6"}) + elif code in ("W5", "W50"): + if is_ignore_w503 and is_ignore_w504: + ret.update({"W505"}) + elif is_ignore_w503: + ret.update({"W504", "W505"}) + else: + ret.update({"W503", "W505"}) + elif not (code in ("W503", "W504") and is_conflict): + ret.add(code) + + return ret + + +def parse_args(arguments, apply_config=False): + """Parse command-line options.""" + parser = create_parser() + args = parser.parse_args(arguments) + + if not args.files and not args.list_fixes: + parser.error('incorrect number of arguments') + + args.files = [decode_filename(name) for name in args.files] + + if apply_config: + parser = read_config(args, parser) + # prioritize settings when exist pyproject.toml's tool.autopep8 section + try: + parser_with_pyproject_toml = read_pyproject_toml(args, parser) + except Exception: + parser_with_pyproject_toml = None + if parser_with_pyproject_toml: + parser = parser_with_pyproject_toml + args = parser.parse_args(arguments) + args.files = [decode_filename(name) for name in args.files] + + if '-' in args.files: + if len(args.files) > 1: + parser.error('cannot mix stdin and regular files') + + if args.diff: + parser.error('--diff cannot be used with standard input') + + if args.in_place: + parser.error('--in-place cannot be used with standard input') + + if args.recursive: + parser.error('--recursive cannot be used with standard input') + + if len(args.files) > 1 and not (args.in_place or args.diff): + parser.error('autopep8 only takes one filename as argument ' + 'unless the "--in-place" or "--diff" args are ' + 'used') + + if args.recursive and not (args.in_place or args.diff): + parser.error('--recursive must be used with --in-place or --diff') + + if args.in_place and args.diff: + parser.error('--in-place and --diff are mutually exclusive') + + if args.max_line_length <= 0: + parser.error('--max-line-length must be greater than 0') + + if args.select: + args.select = _expand_codes( + _split_comma_separated(args.select), + (_split_comma_separated(args.ignore) if args.ignore else []) + ) + + if args.ignore: + args.ignore = _split_comma_separated(args.ignore) + if all( + not any( + conflicting_code.startswith(ignore_code) + for ignore_code in args.ignore + ) + for conflicting_code in CONFLICTING_CODES + ): + args.ignore.update(CONFLICTING_CODES) + elif not args.select: + if args.aggressive: + # Enable everything by default if aggressive. + args.select = {'E', 'W1', 'W2', 'W3', 'W6'} + else: + args.ignore = _split_comma_separated(DEFAULT_IGNORE) + + if args.exclude: + args.exclude = _split_comma_separated(args.exclude) + else: + args.exclude = {} + + if args.jobs < 1: + # Do not import multiprocessing globally in case it is not supported + # on the platform. + import multiprocessing + args.jobs = multiprocessing.cpu_count() + + if args.jobs > 1 and not (args.in_place or args.diff): + parser.error('parallel jobs requires --in-place') + + if args.line_range: + if args.line_range[0] <= 0: + parser.error('--range must be positive numbers') + if args.line_range[0] > args.line_range[1]: + parser.error('First value of --range should be less than or equal ' + 'to the second') + + return args + + +def _get_normalize_options(config, section, option_list): + for (k, _) in config.items(section): + norm_opt = k.lstrip('-').replace('-', '_') + if not option_list.get(norm_opt): + continue + opt_type = option_list[norm_opt] + if opt_type is int: + value = config.getint(section, k) + elif opt_type is bool: + value = config.getboolean(section, k) + else: + value = config.get(section, k) + yield norm_opt, k, value + + +def read_config(args, parser): + """Read both user configuration and local configuration.""" + config = SafeConfigParser() + + try: + config.read(args.global_config) + + if not args.ignore_local_config: + parent = tail = args.files and os.path.abspath( + os.path.commonprefix(args.files)) + while tail: + if config.read([os.path.join(parent, fn) + for fn in PROJECT_CONFIG]): + break + (parent, tail) = os.path.split(parent) + + defaults = {} + option_list = {o.dest: o.type or type(o.default) + for o in parser._actions} + + for section in ['pep8', 'pycodestyle', 'flake8']: + if not config.has_section(section): + continue + for norm_opt, k, value in _get_normalize_options(config, section, + option_list): + if args.verbose: + print("enable config: section={}, key={}, value={}".format( + section, k, value)) + defaults[norm_opt] = value + + parser.set_defaults(**defaults) + except Error: + # Ignore for now. + pass + + return parser + + +def read_pyproject_toml(args, parser): + """Read pyproject.toml and load configuration.""" + import toml + + config = None + + if os.path.exists(args.global_config): + with open(args.global_config) as fp: + config = toml.load(fp) + + if not args.ignore_local_config: + parent = tail = args.files and os.path.abspath( + os.path.commonprefix(args.files)) + while tail: + pyproject_toml = os.path.join(parent, "pyproject.toml") + if os.path.exists(pyproject_toml): + with open(pyproject_toml) as fp: + config = toml.load(fp) + break + (parent, tail) = os.path.split(parent) + + if not config: + return None + + if config.get("tool", {}).get("autopep8") is None: + return None + + config = config.get("tool").get("autopep8") + + defaults = {} + option_list = {o.dest: o.type or type(o.default) + for o in parser._actions} + + TUPLED_OPTIONS = ("ignore", "select") + for (k, v) in config.items(): + norm_opt = k.lstrip('-').replace('-', '_') + if not option_list.get(norm_opt): + continue + if type(v) in (list, tuple) and norm_opt in TUPLED_OPTIONS: + value = ",".join(v) + else: + value = v + if args.verbose: + print("enable pyproject.toml config: " + "key={}, value={}".format(k, value)) + defaults[norm_opt] = value + + if defaults: + # set value when exists key-value in defaults dict + parser.set_defaults(**defaults) + + return parser + + +def _split_comma_separated(string): + """Return a set of strings.""" + return {text.strip() for text in string.split(',') if text.strip()} + + +def decode_filename(filename): + """Return Unicode filename.""" + if isinstance(filename, unicode): + return filename + + return filename.decode(sys.getfilesystemencoding()) + + +def supported_fixes(): + """Yield pep8 error codes that autopep8 fixes. + + Each item we yield is a tuple of the code followed by its + description. + + """ + yield ('E101', docstring_summary(reindent.__doc__)) + + instance = FixPEP8(filename=None, options=None, contents='') + for attribute in dir(instance): + code = re.match('fix_([ew][0-9][0-9][0-9])', attribute) + if code: + yield ( + code.group(1).upper(), + re.sub(r'\s+', ' ', + docstring_summary(getattr(instance, attribute).__doc__)) + ) + + for (code, function) in sorted(global_fixes()): + yield (code.upper() + (4 - len(code)) * ' ', + re.sub(r'\s+', ' ', docstring_summary(function.__doc__))) + + for code in sorted(CODE_TO_2TO3): + yield (code.upper() + (4 - len(code)) * ' ', + re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__))) + + +def docstring_summary(docstring): + """Return summary of docstring.""" + return docstring.split('\n')[0] if docstring else '' + + +def line_shortening_rank(candidate, indent_word, max_line_length, + experimental=False): + """Return rank of candidate. + + This is for sorting candidates. + + """ + if not candidate.strip(): + return 0 + + rank = 0 + lines = candidate.rstrip().split('\n') + + offset = 0 + if ( + not lines[0].lstrip().startswith('#') and + lines[0].rstrip()[-1] not in '([{' + ): + for (opening, closing) in ('()', '[]', '{}'): + # Don't penalize empty containers that aren't split up. Things like + # this "foo(\n )" aren't particularly good. + opening_loc = lines[0].find(opening) + closing_loc = lines[0].find(closing) + if opening_loc >= 0: + if closing_loc < 0 or closing_loc != opening_loc + 1: + offset = max(offset, 1 + opening_loc) + + current_longest = max(offset + len(x.strip()) for x in lines) + + rank += 4 * max(0, current_longest - max_line_length) + + rank += len(lines) + + # Too much variation in line length is ugly. + rank += 2 * standard_deviation(len(line) for line in lines) + + bad_staring_symbol = { + '(': ')', + '[': ']', + '{': '}'}.get(lines[0][-1]) + + if len(lines) > 1: + if ( + bad_staring_symbol and + lines[1].lstrip().startswith(bad_staring_symbol) + ): + rank += 20 + + for lineno, current_line in enumerate(lines): + current_line = current_line.strip() + + if current_line.startswith('#'): + continue + + for bad_start in ['.', '%', '+', '-', '/']: + if current_line.startswith(bad_start): + rank += 100 + + # Do not tolerate operators on their own line. + if current_line == bad_start: + rank += 1000 + + if ( + current_line.endswith(('.', '%', '+', '-', '/')) and + "': " in current_line + ): + rank += 1000 + + if current_line.endswith(('(', '[', '{', '.')): + # Avoid lonely opening. They result in longer lines. + if len(current_line) <= len(indent_word): + rank += 100 + + # Avoid the ugliness of ", (\n". + if ( + current_line.endswith('(') and + current_line[:-1].rstrip().endswith(',') + ): + rank += 100 + + # Avoid the ugliness of "something[\n" and something[index][\n. + if ( + current_line.endswith('[') and + len(current_line) > 1 and + (current_line[-2].isalnum() or current_line[-2] in ']') + ): + rank += 300 + + # Also avoid the ugliness of "foo.\nbar" + if current_line.endswith('.'): + rank += 100 + + if has_arithmetic_operator(current_line): + rank += 100 + + # Avoid breaking at unary operators. + if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')): + rank += 1000 + + if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')): + rank += 1000 + + if current_line.endswith(('%', '(', '[', '{')): + rank -= 20 + + # Try to break list comprehensions at the "for". + if current_line.startswith('for '): + rank -= 50 + + if current_line.endswith('\\'): + # If a line ends in \-newline, it may be part of a + # multiline string. In that case, we would like to know + # how long that line is without the \-newline. If it's + # longer than the maximum, or has comments, then we assume + # that the \-newline is an okay candidate and only + # penalize it a bit. + total_len = len(current_line) + lineno += 1 + while lineno < len(lines): + total_len += len(lines[lineno]) + + if lines[lineno].lstrip().startswith('#'): + total_len = max_line_length + break + + if not lines[lineno].endswith('\\'): + break + + lineno += 1 + + if total_len < max_line_length: + rank += 10 + else: + rank += 100 if experimental else 1 + + # Prefer breaking at commas rather than colon. + if ',' in current_line and current_line.endswith(':'): + rank += 10 + + # Avoid splitting dictionaries between key and value. + if current_line.endswith(':'): + rank += 100 + + rank += 10 * count_unbalanced_brackets(current_line) + + return max(0, rank) + + +def standard_deviation(numbers): + """Return standard deviation.""" + numbers = list(numbers) + if not numbers: + return 0 + mean = sum(numbers) / len(numbers) + return (sum((n - mean) ** 2 for n in numbers) / + len(numbers)) ** .5 + + +def has_arithmetic_operator(line): + """Return True if line contains any arithmetic operators.""" + for operator in pycodestyle.ARITHMETIC_OP: + if operator in line: + return True + + return False + + +def count_unbalanced_brackets(line): + """Return number of unmatched open/close brackets.""" + count = 0 + for opening, closing in ['()', '[]', '{}']: + count += abs(line.count(opening) - line.count(closing)) + + return count + + +def split_at_offsets(line, offsets): + """Split line at offsets. + + Return list of strings. + + """ + result = [] + + previous_offset = 0 + current_offset = 0 + for current_offset in sorted(offsets): + if current_offset < len(line) and previous_offset != current_offset: + result.append(line[previous_offset:current_offset].strip()) + previous_offset = current_offset + + result.append(line[current_offset:]) + + return result + + +class LineEndingWrapper(object): + + r"""Replace line endings to work with sys.stdout. + + It seems that sys.stdout expects only '\n' as the line ending, no matter + the platform. Otherwise, we get repeated line endings. + + """ + + def __init__(self, output): + self.__output = output + + def write(self, s): + self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n')) + + def flush(self): + self.__output.flush() + + +def match_file(filename, exclude): + """Return True if file is okay for modifying/recursing.""" + base_name = os.path.basename(filename) + + if base_name.startswith('.'): + return False + + for pattern in exclude: + if fnmatch.fnmatch(base_name, pattern): + return False + if fnmatch.fnmatch(filename, pattern): + return False + + if not os.path.isdir(filename) and not is_python_file(filename): + return False + + return True + + +def find_files(filenames, recursive, exclude): + """Yield filenames.""" + while filenames: + name = filenames.pop(0) + if recursive and os.path.isdir(name): + for root, directories, children in os.walk(name): + filenames += [os.path.join(root, f) for f in children + if match_file(os.path.join(root, f), + exclude)] + directories[:] = [d for d in directories + if match_file(os.path.join(root, d), + exclude)] + else: + is_exclude_match = False + for pattern in exclude: + if fnmatch.fnmatch(name, pattern): + is_exclude_match = True + break + if not is_exclude_match: + yield name + + +def _fix_file(parameters): + """Helper function for optionally running fix_file() in parallel.""" + if parameters[1].verbose: + print('[file:{}]'.format(parameters[0]), file=sys.stderr) + try: + return fix_file(*parameters) + except IOError as error: + print(unicode(error), file=sys.stderr) + + +def fix_multiple_files(filenames, options, output=None): + """Fix list of files. + + Optionally fix files recursively. + + """ + results = [] + filenames = find_files(filenames, options.recursive, options.exclude) + if options.jobs > 1: + import multiprocessing + pool = multiprocessing.Pool(options.jobs) + ret = pool.map(_fix_file, [(name, options) for name in filenames]) + if options.diff: + for r in ret: + sys.stdout.write(r.decode()) + sys.stdout.flush() + results.extend([x for x in ret if x is not None]) + else: + for name in filenames: + ret = _fix_file((name, options, output)) + if ret is None: + continue + if options.diff: + if ret != '': + results.append(ret) + elif options.in_place: + results.append(ret) + else: + original_source = readlines_from_file(name) + if "".join(original_source).splitlines() != ret.splitlines(): + results.append(ret) + return results + + +def is_python_file(filename): + """Return True if filename is Python file.""" + if filename.endswith('.py'): + return True + + try: + with open_with_encoding( + filename, + limit_byte_check=MAX_PYTHON_FILE_DETECTION_BYTES) as f: + text = f.read(MAX_PYTHON_FILE_DETECTION_BYTES) + if not text: + return False + first_line = text.splitlines()[0] + except (IOError, IndexError): + return False + + if not PYTHON_SHEBANG_REGEX.match(first_line): + return False + + return True + + +def is_probably_part_of_multiline(line): + """Return True if line is likely part of a multiline string. + + When multiline strings are involved, pep8 reports the error as being + at the start of the multiline string, which doesn't work for us. + + """ + return ( + '"""' in line or + "'''" in line or + line.rstrip().endswith('\\') + ) + + +def wrap_output(output, encoding): + """Return output with specified encoding.""" + return codecs.getwriter(encoding)(output.buffer + if hasattr(output, 'buffer') + else output) + + +def get_encoding(): + """Return preferred encoding.""" + return locale.getpreferredencoding() or sys.getdefaultencoding() + + +def main(argv=None, apply_config=True): + """Command-line entry.""" + if argv is None: + argv = sys.argv + + try: + # Exit on broken pipe. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + except AttributeError: # pragma: no cover + # SIGPIPE is not available on Windows. + pass + + try: + args = parse_args(argv[1:], apply_config=apply_config) + + if args.list_fixes: + for code, description in sorted(supported_fixes()): + print('{code} - {description}'.format( + code=code, description=description)) + return EXIT_CODE_OK + + if args.files == ['-']: + assert not args.in_place + + encoding = sys.stdin.encoding or get_encoding() + read_stdin = sys.stdin.read() + fixed_stdin = fix_code(read_stdin, args, encoding=encoding) + + # LineEndingWrapper is unnecessary here due to the symmetry between + # standard in and standard out. + wrap_output(sys.stdout, encoding=encoding).write(fixed_stdin) + + if hash(read_stdin) != hash(fixed_stdin): + if args.exit_code: + return EXIT_CODE_EXISTS_DIFF + else: + if args.in_place or args.diff: + args.files = list(set(args.files)) + else: + assert len(args.files) == 1 + assert not args.recursive + + results = fix_multiple_files(args.files, args, sys.stdout) + if args.diff: + ret = any([len(ret) != 0 for ret in results]) + else: + # with in-place option + ret = any([ret is not None for ret in results]) + if args.exit_code and ret: + return EXIT_CODE_EXISTS_DIFF + except KeyboardInterrupt: + return EXIT_CODE_ERROR # pragma: no cover + + +class CachedTokenizer(object): + + """A one-element cache around tokenize.generate_tokens(). + + Original code written by Ned Batchelder, in coverage.py. + + """ + + def __init__(self): + self.last_text = None + self.last_tokens = None + + def generate_tokens(self, text): + """A stand-in for tokenize.generate_tokens().""" + if text != self.last_text: + string_io = io.StringIO(text) + self.last_tokens = list( + tokenize.generate_tokens(string_io.readline) + ) + self.last_text = text + return self.last_tokens + + +_cached_tokenizer = CachedTokenizer() +generate_tokens = _cached_tokenizer.generate_tokens + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/INSTALLER b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/LICENSE b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/LICENSE new file mode 100644 index 0000000..72d9921 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/LICENSE @@ -0,0 +1,25 @@ +Copyright © 2006-2009 Johann C. Rocholl +Copyright © 2009-2014 Florent Xicluna +Copyright © 2014-2020 Ian Lee + +Licensed under the terms of the Expat License + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/METADATA b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/METADATA new file mode 100644 index 0000000..7287b4e --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/METADATA @@ -0,0 +1,1035 @@ +Metadata-Version: 2.1 +Name: pycodestyle +Version: 2.6.0 +Summary: Python style guide checker +Home-page: https://pycodestyle.readthedocs.io/ +Author: Johann C. Rocholl +Author-email: johann@rocholl.net +Maintainer: Ian Lee +Maintainer-email: IanLee1521@gmail.com +License: Expat license +Keywords: pycodestyle,pep8,PEP 8,PEP-8,PEP8 +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* + +pycodestyle (formerly called pep8) - Python style guide checker +=============================================================== + +.. image:: https://img.shields.io/travis/PyCQA/pycodestyle.svg + :target: https://travis-ci.org/PyCQA/pycodestyle + :alt: Build status + +.. image:: https://readthedocs.org/projects/pycodestyle/badge/?version=latest + :target: https://pycodestyle.readthedocs.io + :alt: Documentation Status + +.. image:: https://img.shields.io/pypi/wheel/pycodestyle.svg + :target: https://pypi.org/project/pycodestyle/ + :alt: Wheel Status + +.. image:: https://badges.gitter.im/PyCQA/pycodestyle.svg + :alt: Join the chat at https://gitter.im/PyCQA/pycodestyle + :target: https://gitter.im/PyCQA/pycodestyle?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + +pycodestyle is a tool to check your Python code against some of the style +conventions in `PEP 8`_. + +.. _PEP 8: http://www.python.org/dev/peps/pep-0008/ + +.. note:: + + This package used to be called ``pep8`` but was renamed to ``pycodestyle`` + to reduce confusion. Further discussion can be found `in the issue where + Guido requested this + change `_, or in the + lightning talk at PyCon 2016 by @IanLee1521: + `slides `_ + `video `_. + +Features +-------- + +* Plugin architecture: Adding new checks is easy. + +* Parseable output: Jump to error location in your editor. + +* Small: Just one Python file, requires only stdlib. You can use just + the ``pycodestyle.py`` file for this purpose. + +* Comes with a comprehensive test suite. + +Installation +------------ + +You can install, upgrade, and uninstall ``pycodestyle.py`` with these commands:: + + $ pip install pycodestyle + $ pip install --upgrade pycodestyle + $ pip uninstall pycodestyle + +There's also a package for Debian/Ubuntu, but it's not always the +latest version. + +Example usage and output +------------------------ + +:: + + $ pycodestyle --first optparse.py + optparse.py:69:11: E401 multiple imports on one line + optparse.py:77:1: E302 expected 2 blank lines, found 1 + optparse.py:88:5: E301 expected 1 blank line, found 0 + optparse.py:222:34: W602 deprecated form of raising exception + optparse.py:347:31: E211 whitespace before '(' + optparse.py:357:17: E201 whitespace after '{' + optparse.py:472:29: E221 multiple spaces before operator + optparse.py:544:21: W601 .has_key() is deprecated, use 'in' + +You can also make ``pycodestyle.py`` show the source code for each error, and +even the relevant text from PEP 8:: + + $ pycodestyle --show-source --show-pep8 testsuite/E40.py + testsuite/E40.py:2:10: E401 multiple imports on one line + import os, sys + ^ + Imports should usually be on separate lines. + + Okay: import os\nimport sys + E401: import sys, os + + +Or you can display how often each error was found:: + + $ pycodestyle --statistics -qq Python-2.5/Lib + 232 E201 whitespace after '[' + 599 E202 whitespace before ')' + 631 E203 whitespace before ',' + 842 E211 whitespace before '(' + 2531 E221 multiple spaces before operator + 4473 E301 expected 1 blank line, found 0 + 4006 E302 expected 2 blank lines, found 1 + 165 E303 too many blank lines (4) + 325 E401 multiple imports on one line + 3615 E501 line too long (82 characters) + 612 W601 .has_key() is deprecated, use 'in' + 1188 W602 deprecated form of raising exception + +Links +----- + +* `Read the documentation `_ + +* `Fork me on GitHub `_ + + +Changelog +========= + +2.6.0 (2020-05-11) +------------------ + +Announcements: + +* Anthony Sottile (@asottile) joined the team as a core developer. :tada: + +Changes: + +* E306: fix detection inside ``async def``. PR #929. +* E301: fix regression disallowing decorated one-liners. PR #927. +* E714: fix false positive with chained ``is not``. PR #931. + + +2.6.0a1 (2020-04-23) +-------------------- + +New checks: + +* E225: require whitespace around ``and`` ``in`` ``is`` and ``or``. PR #847. + +Changes: + +* E117: fix indentation using tabs by treating as 8-space indents. PR #837. +* E721: fix false positive with names containg ``istype``. PR #850. +* E741: allow ``l`` as a named argument in a function call. PR #853. +* E302: fix false-negative with decorated functions. PR #859. +* W504: ellipsis (``...``) is no longer treated as a binary operator. PR #875. +* E402: allow ``with``, ``if``, ``elif``, ``else`` to guard imports. PR #834. +* Add support for assignment expressions ``:=`` (PEP 572). PR #879. +* Add support for positional-only arguments ``/`` (PEP 570). PR #872, #918. +* Add support for python 3.8. +* Add support for matrix multiplication operator ``@`` (PEP 465). PR #897. +* Support visual indent for continuation lines for ``with`` / ``assert`` / + ``raise``. PR #912. +* E302: allow two blank lines after a block of one-liners. PR #913. +* E302: allow two-and-fewer newlines at the top of the file. PR #919. + + +2.5.0 (2019-01-29) +------------------ + +New checks: + +* E117: Over-indented code blocks +* W505: Maximum doc-string length only when configured with --max-doc-length + +Changes: + +* Remove support for EOL Python 2.6 and 3.3. PR #720. +* Add E117 error for over-indented code blocks. +* Allow W605 to be silenced by `# noqa` and fix the position reported by W605 +* Allow users to omit blank lines around one-liner definitions of classes and + functions +* Include the function return annotation (``->``) as requiring surrounding + whitespace only on Python 3 +* Verify that only names can follow ``await``. Previously we allowed numbers + and strings. +* Add support for Python 3.7 +* Fix detection of annotated argument defaults for E252 +* Correct the position reported by W504 + + +2.4.0 (2018-04-10) +------------------ + +New checks: + +* Add W504 warning for checking that a break doesn't happen after a binary + operator. This check is ignored by default. PR #502. +* Add W605 warning for invalid escape sequences in string literals. PR #676. +* Add W606 warning for 'async' and 'await' reserved keywords being introduced + in Python 3.7. PR #684. +* Add E252 error for missing whitespace around equal sign in type annotated + function arguments with defaults values. PR #717. + +Changes: + +* An internal bisect search has replaced a linear search in order to improve + efficiency. PR #648. +* pycodestyle now uses PyPI trove classifiers in order to document supported + python versions on PyPI. PR #654. +* 'setup.cfg' '[wheel]' section has been renamed to '[bdist_wheel]', as + the former is legacy. PR #653. +* pycodestyle now handles very long lines much more efficiently for python + 3.2+. Fixes #643. PR #644. +* You can now write 'pycodestyle.StyleGuide(verbose=True)' instead of + 'pycodestyle.StyleGuide(verbose=True, paths=['-v'])' in order to achieve + verbosity. PR #663. +* The distribution of pycodestyle now includes the license text in order to + comply with open source licenses which require this. PR #694. +* 'maximum_line_length' now ignores shebang ('#!') lines. PR #736. +* Add configuration option for the allowed number of blank lines. It is + implemented as a top level dictionary which can be easily overwritten. Fixes + #732. PR #733. + +Bugs: + +* Prevent a 'DeprecationWarning', and a 'SyntaxError' in future python, caused + by an invalid escape sequence. PR #625. +* Correctly report E501 when the first line of a docstring is too long. + Resolves #622. PR #630. +* Support variable annotation when variable start by a keyword, such as class + variable type annotations in python 3.6. PR #640. +* pycodestyle internals have been changed in order to allow 'python3 -m + cProfile' to report correct metrics. PR #647. +* Fix a spelling mistake in the description of E722. PR #697. +* 'pycodestyle --diff' now does not break if your 'gitconfig' enables + 'mnemonicprefix'. PR #706. + +2.3.1 (2017-01-31) +------------------ + +Bugs: + +* Fix regression in detection of E302 and E306; #618, #620 + +2.3.0 (2017-01-30) +------------------ + +New Checks: + +* Add E722 warning for bare ``except`` clauses +* Report E704 for async function definitions (``async def``) + +Bugs: + +* Fix another E305 false positive for variables beginning with "class" or + "def" +* Fix detection of multiple spaces between ``async`` and ``def`` +* Fix handling of variable annotations. Stop reporting E701 on Python 3.6 for + variable annotations. + +2.2.0 (2016-11-14) +------------------ + +Announcements: + +* Added Make target to obtain proper tarball file permissions; #599 + +Bugs: + +* Fixed E305 regression caused by #400; #593 + +2.1.0 (2016-11-04) +------------------ + +Announcements: + +* Change all references to the pep8 project to say pycodestyle; #530 + +Changes: + +* Report E302 for blank lines before an "async def"; #556 +* Update our list of tested and supported Python versions which are 2.6, 2.7, + 3.2, 3.3, 3.4 and 3.5 as well as the nightly Python build and PyPy. +* Report E742 and E743 for functions and classes badly named 'l', 'O', or 'I'. +* Report E741 on 'global' and 'nonlocal' statements, as well as prohibited + single-letter variables. +* Deprecated use of `[pep8]` section name in favor of `[pycodestyle]`; #591 +* Report E722 when bare except clause is used; #579 + +Bugs: + +* Fix opt_type AssertionError when using Flake8 2.6.2 and pycodestyle; #561 +* Require two blank lines after toplevel def, class; #536 +* Remove accidentally quadratic computation based on the number of colons. This + will make pycodestyle faster in some cases; #314 + +2.0.0 (2016-05-31) +------------------ + +Announcements: + +* Repository renamed to `pycodestyle`; Issue #466 / #481. +* Added joint Code of Conduct as member of PyCQA; #483 + +Changes: + +* Added tox test support for Python 3.5 and pypy3 +* Added check E275 for whitespace on `from ... import ...` lines; #489 / #491 +* Added W503 to the list of codes ignored by default ignore list; #498 +* Removed use of project level `.pep8` configuration file; #364 + +Bugs: + +* Fixed bug with treating `~` operator as binary; #383 / #384 +* Identify binary operators as unary; #484 / #485 + +1.7.0 (2016-01-12) +------------------ + +Announcements: + +* Repository moved to PyCQA Organization on GitHub: + https://github.com/pycqa/pep8 + +Changes: + +* Reverted the fix in #368, "options passed on command line are only ones + accepted" feature. This has many unintended consequences in pep8 and flake8 + and needs to be reworked when I have more time. +* Added support for Python 3.5. (Issue #420 & #459) +* Added support for multi-line config_file option parsing. (Issue #429) +* Improved parameter parsing. (Issues #420 & #456) + +Bugs: + +* Fixed BytesWarning on Python 3. (Issue #459) + +1.6.2 (2015-02-15) +------------------ + +Changes: + +* Added check for breaking around a binary operator. (Issue #197, Pull #305) + +Bugs: + +* Restored config_file parameter in process_options(). (Issue #380) + + +1.6.1 (2015-02-08) +------------------ + +Changes: + +* Assign variables before referenced. (Issue #287) + +Bugs: + +* Exception thrown due to unassigned ``local_dir`` variable. (Issue #377) + + +1.6.0 (2015-02-06) +------------------ + +News: + +* Ian Lee joined the project as a maintainer. + +Changes: + +* Report E731 for lambda assignment. (Issue #277) + +* Report E704 for one-liner def instead of E701. + Do not report this error in the default configuration. (Issue #277) + +* Replace codes E111, E112 and E113 with codes E114, E115 and E116 + for bad indentation of comments. (Issue #274) + +* Report E266 instead of E265 when the block comment starts with + multiple ``#``. (Issue #270) + +* Report E402 for import statements not at the top of the file. (Issue #264) + +* Do not enforce whitespaces around ``**`` operator. (Issue #292) + +* Strip whitespace from around paths during normalization. (Issue #339 / #343) + +* Update ``--format`` documentation. (Issue #198 / Pull Request #310) + +* Add ``.tox/`` to default excludes. (Issue #335) + +* Do not report E121 or E126 in the default configuration. (Issues #256 / #316) + +* Allow spaces around the equals sign in an annotated function. (Issue #357) + +* Allow trailing backslash if in an inline comment. (Issue #374) + +* If ``--config`` is used, only that configuration is processed. Otherwise, + merge the user and local configurations are merged. (Issue #368 / #369) + +Bug fixes: + +* Don't crash if Checker.build_tokens_line() returns None. (Issue #306) + +* Don't crash if os.path.expanduser() throws an ImportError. (Issue #297) + +* Missing space around keyword parameter equal not always reported, E251. + (Issue #323) + +* Fix false positive E711/E712/E713. (Issues #330 and #336) + +* Do not skip physical checks if the newline is escaped. (Issue #319) + +* Flush sys.stdout to avoid race conditions with printing. See flake8 bug: + https://gitlab.com/pycqa/flake8/issues/17 for more details. (Issue #363) + + +1.5.7 (2014-05-29) +------------------ + +Bug fixes: + +* Skip the traceback on "Broken pipe" signal. (Issue #275) + +* Do not exit when an option in ``setup.cfg`` or ``tox.ini`` + is not recognized. + +* Check the last line even if it does not end with a newline. (Issue #286) + +* Always open files in universal newlines mode in Python 2. (Issue #288) + + +1.5.6 (2014-04-14) +------------------ + +Bug fixes: + +* Check the last line even if it has no end-of-line. (Issue #273) + + +1.5.5 (2014-04-10) +------------------ + +Bug fixes: + +* Fix regression with E22 checks and inline comments. (Issue #271) + + +1.5.4 (2014-04-07) +------------------ + +Bug fixes: + +* Fix negative offset with E303 before a multi-line docstring. + (Issue #269) + + +1.5.3 (2014-04-04) +------------------ + +Bug fixes: + +* Fix wrong offset computation when error is on the last char + of a physical line. (Issue #268) + + +1.5.2 (2014-04-04) +------------------ + +Changes: + +* Distribute a universal wheel file. + +Bug fixes: + +* Report correct line number for E303 with comments. (Issue #60) + +* Do not allow newline after parameter equal. (Issue #252) + +* Fix line number reported for multi-line strings. (Issue #220) + +* Fix false positive E121/E126 with multi-line strings. (Issue #265) + +* Fix E501 not detected in comments with Python 2.5. + +* Fix caret position with ``--show-source`` when line contains tabs. + + +1.5.1 (2014-03-27) +------------------ + +Bug fixes: + +* Fix a crash with E125 on multi-line strings. (Issue #263) + + +1.5 (2014-03-26) +---------------- + +Changes: + +* Report E129 instead of E125 for visually indented line with same + indent as next logical line. (Issue #126) + +* Report E265 for space before block comment. (Issue #190) + +* Report E713 and E714 when operators ``not in`` and ``is not`` are + recommended. (Issue #236) + +* Allow long lines in multiline strings and comments if they cannot + be wrapped. (Issue #224). + +* Optionally disable physical line checks inside multiline strings, + using ``# noqa``. (Issue #242) + +* Change text for E121 to report "continuation line under-indented + for hanging indent" instead of indentation not being a + multiple of 4. + +* Report E131 instead of E121 / E126 if the hanging indent is not + consistent within the same continuation block. It helps when + error E121 or E126 is in the ``ignore`` list. + +* Report E126 instead of E121 when the continuation line is hanging + with extra indentation, even if indentation is not a multiple of 4. + +Bug fixes: + +* Allow the checkers to report errors on empty files. (Issue #240) + +* Fix ignoring too many checks when ``--select`` is used with codes + declared in a flake8 extension. (Issue #216) + +* Fix regression with multiple brackets. (Issue #214) + +* Fix ``StyleGuide`` to parse the local configuration if the + keyword argument ``paths`` is specified. (Issue #246) + +* Fix a false positive E124 for hanging indent. (Issue #254) + +* Fix a false positive E126 with embedded colon. (Issue #144) + +* Fix a false positive E126 when indenting with tabs. (Issue #204) + +* Fix behaviour when ``exclude`` is in the configuration file and + the current directory is not the project directory. (Issue #247) + +* The logical checks can return ``None`` instead of an empty iterator. + (Issue #250) + +* Do not report multiple E101 if only the first indentation starts + with a tab. (Issue #237) + +* Fix a rare false positive W602. (Issue #34) + + +1.4.6 (2013-07-02) +------------------ + +Changes: + +* Honor ``# noqa`` for errors E711 and E712. (Issue #180) + +* When both a ``tox.ini`` and a ``setup.cfg`` are present in the project + directory, merge their contents. The ``tox.ini`` file takes + precedence (same as before). (Issue #182) + +* Give priority to ``--select`` over ``--ignore``. (Issue #188) + +* Compare full path when excluding a file. (Issue #186) + +* New option ``--hang-closing`` to switch to the alternative style of + closing bracket indentation for hanging indent. Add error E133 for + closing bracket which is missing indentation. (Issue #103) + +* Accept both styles of closing bracket indentation for hanging indent. + Do not report error E123 in the default configuration. (Issue #103) + +Bug fixes: + +* Do not crash when running AST checks and the document contains null bytes. + (Issue #184) + +* Correctly report other E12 errors when E123 is ignored. (Issue #103) + +* Fix false positive E261/E262 when the file contains a BOM. (Issue #193) + +* Fix E701, E702 and E703 not detected sometimes. (Issue #196) + +* Fix E122 not detected in some cases. (Issue #201 and #208) + +* Fix false positive E121 with multiple brackets. (Issue #203) + + +1.4.5 (2013-03-06) +------------------ + +* When no path is specified, do not try to read from stdin. The feature + was added in 1.4.3, but it is not supported on Windows. Use ``-`` + filename argument to read from stdin. This usage is supported + since 1.3.4. (Issue #170) + +* Do not require ``setuptools`` in setup.py. It works around an issue + with ``pip`` and Python 3. (Issue #172) + +* Add ``__pycache__`` to the ignore list. + +* Change misleading message for E251. (Issue #171) + +* Do not report false E302 when the source file has a coding cookie or a + comment on the first line. (Issue #174) + +* Reorganize the tests and add tests for the API and for the command line + usage and options. (Issues #161 and #162) + +* Ignore all checks which are not explicitly selected when ``select`` is + passed to the ``StyleGuide`` constructor. + + +1.4.4 (2013-02-24) +------------------ + +* Report E227 or E228 instead of E225 for whitespace around bitwise, shift + or modulo operators. (Issue #166) + +* Change the message for E226 to make clear that it is about arithmetic + operators. + +* Fix a false positive E128 for continuation line indentation with tabs. + +* Fix regression with the ``--diff`` option. (Issue #169) + +* Fix the ``TestReport`` class to print the unexpected warnings and + errors. + + +1.4.3 (2013-02-22) +------------------ + +* Hide the ``--doctest`` and ``--testsuite`` options when installed. + +* Fix crash with AST checkers when the syntax is invalid. (Issue #160) + +* Read from standard input if no path is specified. + +* Initiate a graceful shutdown on ``Control+C``. + +* Allow changing the ``checker_class`` for the ``StyleGuide``. + + +1.4.2 (2013-02-10) +------------------ + +* Support AST checkers provided by third-party applications. + +* Register new checkers with ``register_check(func_or_cls, codes)``. + +* Allow constructing a ``StyleGuide`` with a custom parser. + +* Accept visual indentation without parenthesis after the ``if`` + statement. (Issue #151) + +* Fix UnboundLocalError when using ``# noqa`` with continued lines. + (Issue #158) + +* Re-order the lines for the ``StandardReport``. + +* Expand tabs when checking E12 continuation lines. (Issue #155) + +* Refactor the testing class ``TestReport`` and the specific test + functions into a separate test module. + + +1.4.1 (2013-01-18) +------------------ + +* Allow sphinx.ext.autodoc syntax for comments. (Issue #110) + +* Report E703 instead of E702 for the trailing semicolon. (Issue #117) + +* Honor ``# noqa`` in addition to ``# nopep8``. (Issue #149) + +* Expose the ``OptionParser`` factory for better extensibility. + + +1.4 (2012-12-22) +---------------- + +* Report E226 instead of E225 for optional whitespace around common + operators (``*``, ``**``, ``/``, ``+`` and ``-``). This new error + code is ignored in the default configuration because PEP 8 recommends + to "use your own judgement". (Issue #96) + +* Lines with a ``# nopep8`` at the end will not issue errors on line + length E501 or continuation line indentation E12*. (Issue #27) + +* Fix AssertionError when the source file contains an invalid line + ending ``"\r\r\n"``. (Issue #119) + +* Read the ``[pep8]`` section of ``tox.ini`` or ``setup.cfg`` if present. + (Issue #93 and #141) + +* Add the Sphinx-based documentation, and publish it + on https://pycodestyle.readthedocs.io/. (Issue #105) + + +1.3.4 (2012-12-18) +------------------ + +* Fix false positive E124 and E128 with comments. (Issue #100) + +* Fix error on stdin when running with bpython. (Issue #101) + +* Fix false positive E401. (Issue #104) + +* Report E231 for nested dictionary in list. (Issue #142) + +* Catch E271 at the beginning of the line. (Issue #133) + +* Fix false positive E126 for multi-line comments. (Issue #138) + +* Fix false positive E221 when operator is preceded by a comma. (Issue #135) + +* Fix ``--diff`` failing on one-line hunk. (Issue #137) + +* Fix the ``--exclude`` switch for directory paths. (Issue #111) + +* Use ``-`` filename to read from standard input. (Issue #128) + + +1.3.3 (2012-06-27) +------------------ + +* Fix regression with continuation line checker. (Issue #98) + + +1.3.2 (2012-06-26) +------------------ + +* Revert to the previous behaviour for ``--show-pep8``: + do not imply ``--first``. (Issue #89) + +* Add E902 for IO errors. (Issue #87) + +* Fix false positive for E121, and missed E124. (Issue #92) + +* Set a sensible default path for config file on Windows. (Issue #95) + +* Allow ``verbose`` in the configuration file. (Issue #91) + +* Show the enforced ``max-line-length`` in the error message. (Issue #86) + + +1.3.1 (2012-06-18) +------------------ + +* Explain which configuration options are expected. Accept and recommend + the options names with hyphen instead of underscore. (Issue #82) + +* Do not read the user configuration when used as a module + (except if ``config_file=True`` is passed to the ``StyleGuide`` constructor). + +* Fix wrong or missing cases for the E12 series. + +* Fix cases where E122 was missed. (Issue #81) + + +1.3 (2012-06-15) +---------------- + +.. warning:: + The internal API is backwards incompatible. + +* Remove global configuration and refactor the library around + a ``StyleGuide`` class; add the ability to configure various + reporters. (Issue #35 and #66) + +* Read user configuration from ``~/.config/pep8`` + and local configuration from ``./.pep8``. (Issue #22) + +* Fix E502 for backslash embedded in multi-line string. (Issue #68) + +* Fix E225 for Python 3 iterable unpacking (PEP 3132). (Issue #72) + +* Enable the new checkers from the E12 series in the default + configuration. + +* Suggest less error-prone alternatives for E712 errors. + +* Rewrite checkers to run faster (E22, E251, E27). + +* Fixed a crash when parsed code is invalid (too many + closing brackets). + +* Fix E127 and E128 for continuation line indentation. (Issue #74) + +* New option ``--format`` to customize the error format. (Issue #23) + +* New option ``--diff`` to check only modified code. The unified + diff is read from STDIN. Example: ``hg diff | pep8 --diff`` + (Issue #39) + +* Correctly report the count of failures and set the exit code to 1 + when the ``--doctest`` or the ``--testsuite`` fails. + +* Correctly detect the encoding in Python 3. (Issue #69) + +* Drop support for Python 2.3, 2.4 and 3.0. (Issue #78) + + +1.2 (2012-06-01) +---------------- + +* Add E121 through E128 for continuation line indentation. These + checks are disabled by default. If you want to force all checks, + use switch ``--select=E,W``. Patch by Sam Vilain. (Issue #64) + +* Add E721 for direct type comparisons. (Issue #47) + +* Add E711 and E712 for comparisons to singletons. (Issue #46) + +* Fix spurious E225 and E701 for function annotations. (Issue #29) + +* Add E502 for explicit line join between brackets. + +* Fix E901 when printing source with ``--show-source``. + +* Report all errors for each checker, instead of reporting only the + first occurrence for each line. + +* Option ``--show-pep8`` implies ``--first``. + + +1.1 (2012-05-24) +---------------- + +* Add E901 for syntax errors. (Issues #63 and #30) + +* Add E271, E272, E273 and E274 for extraneous whitespace around + keywords. (Issue #57) + +* Add ``tox.ini`` configuration file for tests. (Issue #61) + +* Add ``.travis.yml`` configuration file for continuous integration. + (Issue #62) + + +1.0.1 (2012-04-06) +------------------ + +* Fix inconsistent version numbers. + + +1.0 (2012-04-04) +---------------- + +* Fix W602 ``raise`` to handle multi-char names. (Issue #53) + + +0.7.0 (2012-03-26) +------------------ + +* Now ``--first`` prints only the first occurrence of each error. + The ``--repeat`` flag becomes obsolete because it is the default + behaviour. (Issue #6) + +* Allow specifying ``--max-line-length``. (Issue #36) + +* Make the shebang more flexible. (Issue #26) + +* Add testsuite to the bundle. (Issue #25) + +* Fixes for Jython. (Issue #49) + +* Add PyPI classifiers. (Issue #43) + +* Fix the ``--exclude`` option. (Issue #48) + +* Fix W602, accept ``raise`` with 3 arguments. (Issue #34) + +* Correctly select all tests if ``DEFAULT_IGNORE == ''``. + + +0.6.1 (2010-10-03) +------------------ + +* Fix inconsistent version numbers. (Issue #21) + + +0.6.0 (2010-09-19) +------------------ + +* Test suite reorganized and enhanced in order to check more failures + with fewer test files. Read the ``run_tests`` docstring for details + about the syntax. + +* Fix E225: accept ``print >>sys.stderr, "..."`` syntax. + +* Fix E501 for lines containing multibyte encoded characters. (Issue #7) + +* Fix E221, E222, E223, E224 not detected in some cases. (Issue #16) + +* Fix E211 to reject ``v = dic['a'] ['b']``. (Issue #17) + +* Exit code is always 1 if any error or warning is found. (Issue #10) + +* ``--ignore`` checks are now really ignored, especially in + conjunction with ``--count``. (Issue #8) + +* Blank lines with spaces yield W293 instead of W291: some developers + want to ignore this warning and indent the blank lines to paste their + code easily in the Python interpreter. + +* Fix E301: do not require a blank line before an indented block. (Issue #14) + +* Fix E203 to accept NumPy slice notation ``a[0, :]``. (Issue #13) + +* Performance improvements. + +* Fix decoding and checking non-UTF8 files in Python 3. + +* Fix E225: reject ``True+False`` when running on Python 3. + +* Fix an exception when the line starts with an operator. + +* Allow a new line before closing ``)``, ``}`` or ``]``. (Issue #5) + + +0.5.0 (2010-02-17) +------------------ + +* Changed the ``--count`` switch to print to sys.stderr and set + exit code to 1 if any error or warning is found. + +* E241 and E242 are removed from the standard checks. If you want to + include these checks, use switch ``--select=E,W``. (Issue #4) + +* Blank line is not mandatory before the first class method or nested + function definition, even if there's a docstring. (Issue #1) + +* Add the switch ``--version``. + +* Fix decoding errors with Python 3. (Issue #13 [1]_) + +* Add ``--select`` option which is mirror of ``--ignore``. + +* Add checks E261 and E262 for spaces before inline comments. + +* New check W604 warns about deprecated usage of backticks. + +* New check W603 warns about the deprecated operator ``<>``. + +* Performance improvement, due to rewriting of E225. + +* E225 now accepts: + + - no whitespace after unary operator or similar. (Issue #9 [1]_) + + - lambda function with argument unpacking or keyword defaults. + +* Reserve "2 blank lines" for module-level logical blocks. (E303) + +* Allow multi-line comments. (E302, issue #10 [1]_) + + +0.4.2 (2009-10-22) +------------------ + +* Decorators on classes and class methods are OK now. + + +0.4 (2009-10-20) +---------------- + +* Support for all versions of Python from 2.3 to 3.1. + +* New and greatly expanded self tests. + +* Added ``--count`` option to print the total number of errors and warnings. + +* Further improvements to the handling of comments and blank lines. + (Issue #1 [1]_ and others changes.) + +* Check all py files in directory when passed a directory (Issue + #2 [1]_). This also prevents an exception when traversing directories + with non ``*.py`` files. + +* E231 should allow commas to be followed by ``)``. (Issue #3 [1]_) + +* Spaces are no longer required around the equals sign for keyword + arguments or default parameter values. + + +.. [1] These issues refer to the `previous issue tracker`__. +.. __: http://github.com/cburroughs/pep8.py/issues + + +0.3.1 (2009-09-14) +------------------ + +* Fixes for comments: do not count them when checking for blank lines between + items. + +* Added setup.py for pypi upload and easy_installability. + + +0.2 (2007-10-16) +---------------- + +* Loads of fixes and improvements. + + +0.1 (2006-10-01) +---------------- + +* First release. + + diff --git a/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/RECORD b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/RECORD new file mode 100644 index 0000000..56fa41b --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/RECORD @@ -0,0 +1,11 @@ +pycodestyle.py,sha256=xYcAkNSMHMAlz6cmdkfhwK7QC5RsRwK2pfG2_uPI2xM,103376 +pycodestyle-2.6.0.dist-info/LICENSE,sha256=93IpXoGvNHjTTojlLQdiACMOx91qOeEjvFyzWqZqva4,1254 +pycodestyle-2.6.0.dist-info/METADATA,sha256=WI4-bMnR66kT7MKGLVFW7xqmuotPGP0uLJProv7nhD4,30287 +pycodestyle-2.6.0.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 +pycodestyle-2.6.0.dist-info/entry_points.txt,sha256=6JU_7SAppC93MBSQi1_QxDwEQUyg6cgK71ab9q_Hxco,51 +pycodestyle-2.6.0.dist-info/namespace_packages.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +pycodestyle-2.6.0.dist-info/top_level.txt,sha256=rHbIEiXmvsJ016mFcLVcF_d-dKgP3VdfOB6CWbivZug,12 +pycodestyle-2.6.0.dist-info/RECORD,, +../../../bin/pycodestyle,sha256=pIp6u2iVgkZ61DS1jKVvK_T_RtnyEky3j3Hlf_fhhN8,258 +pycodestyle-2.6.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +__pycache__/pycodestyle.cpython-37.pyc,, diff --git a/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/WHEEL b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/WHEEL new file mode 100644 index 0000000..ef99c6c --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/entry_points.txt b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/entry_points.txt new file mode 100644 index 0000000..71bd885 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +pycodestyle = pycodestyle:_main + diff --git a/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/namespace_packages.txt b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/namespace_packages.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/namespace_packages.txt @@ -0,0 +1 @@ + diff --git a/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/top_level.txt b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/top_level.txt new file mode 100644 index 0000000..282a93f --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle-2.6.0.dist-info/top_level.txt @@ -0,0 +1 @@ +pycodestyle diff --git a/venv3/lib/python3.7/site-packages/pycodestyle.py b/venv3/lib/python3.7/site-packages/pycodestyle.py new file mode 100644 index 0000000..deb4539 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/pycodestyle.py @@ -0,0 +1,2763 @@ +#!/usr/bin/env python +# pycodestyle.py - Check Python source code formatting, according to +# PEP 8 +# +# Copyright (C) 2006-2009 Johann C. Rocholl +# Copyright (C) 2009-2014 Florent Xicluna +# Copyright (C) 2014-2016 Ian Lee +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation files +# (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, +# publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +r""" +Check Python source code formatting, according to PEP 8. + +For usage and a list of options, try this: +$ python pycodestyle.py -h + +This program and its regression test suite live here: +https://github.com/pycqa/pycodestyle + +Groups of errors and warnings: +E errors +W warnings +100 indentation +200 whitespace +300 blank lines +400 imports +500 line length +600 deprecation +700 statements +900 syntax error +""" +from __future__ import with_statement + +import inspect +import keyword +import os +import re +import sys +import time +import tokenize +import warnings +import bisect + +try: + from functools import lru_cache +except ImportError: + def lru_cache(maxsize=128): # noqa as it's a fake implementation. + """Does not really need a real a lru_cache, it's just + optimization, so let's just do nothing here. Python 3.2+ will + just get better performances, time to upgrade? + """ + return lambda function: function + +from fnmatch import fnmatch +from optparse import OptionParser + +try: + from configparser import RawConfigParser + from io import TextIOWrapper +except ImportError: + from ConfigParser import RawConfigParser + +__version__ = '2.6.0' + +DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' +DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504' +try: + if sys.platform == 'win32': + USER_CONFIG = os.path.expanduser(r'~\.pycodestyle') + else: + USER_CONFIG = os.path.join( + os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), + 'pycodestyle' + ) +except ImportError: + USER_CONFIG = None + +PROJECT_CONFIG = ('setup.cfg', 'tox.ini') +TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') +MAX_LINE_LENGTH = 79 +# Number of blank lines between various code parts. +BLANK_LINES_CONFIG = { + # Top level class and function. + 'top_level': 2, + # Methods and nested class and function. + 'method': 1, +} +MAX_DOC_LENGTH = 72 +REPORT_FORMAT = { + 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', + 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', +} + +PyCF_ONLY_AST = 1024 +SINGLETONS = frozenset(['False', 'None', 'True']) +KEYWORDS = frozenset(keyword.kwlist + ['print', 'async']) - SINGLETONS +UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) +ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-', '@']) +WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) +# Warn for -> function annotation operator in py3.5+ (issue 803) +FUNCTION_RETURN_ANNOTATION_OP = ['->'] if sys.version_info >= (3, 5) else [] +ASSIGNMENT_EXPRESSION_OP = [':='] if sys.version_info >= (3, 8) else [] +WS_NEEDED_OPERATORS = frozenset([ + '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>', + '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '=', + 'and', 'in', 'is', 'or'] + + FUNCTION_RETURN_ANNOTATION_OP + + ASSIGNMENT_EXPRESSION_OP) +WHITESPACE = frozenset(' \t') +NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) +SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) +# ERRORTOKEN is triggered by backticks in Python 3 +SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN]) +BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] + +INDENT_REGEX = re.compile(r'([ \t]*)') +RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,') +RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$') +ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b') +DOCSTRING_REGEX = re.compile(r'u?r?["\']') +EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[\[({] | [\]}),;]| :(?!=)') +WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') +COMPARE_SINGLETON_REGEX = re.compile(r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)' + r'\s*(?(1)|(None|False|True))\b') +COMPARE_NEGATIVE_REGEX = re.compile(r'\b(?%&^]+)(\s*)') +LAMBDA_REGEX = re.compile(r'\blambda\b') +HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') +STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\b') +STARTSWITH_TOP_LEVEL_REGEX = re.compile(r'^(async\s+def\s+|def\s+|class\s+|@)') +STARTSWITH_INDENT_STATEMENT_REGEX = re.compile( + r'^\s*({0})\b'.format('|'.join(s.replace(' ', r'\s+') for s in ( + 'def', 'async def', + 'for', 'async for', + 'if', 'elif', 'else', + 'try', 'except', 'finally', + 'with', 'async with', + 'class', + 'while', + ))) +) +DUNDER_REGEX = re.compile(r'^__([^\s]+)__ = ') + +_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} + + +def _get_parameters(function): + if sys.version_info >= (3, 3): + return [parameter.name + for parameter + in inspect.signature(function).parameters.values() + if parameter.kind == parameter.POSITIONAL_OR_KEYWORD] + else: + return inspect.getargspec(function)[0] + + +def register_check(check, codes=None): + """Register a new check object.""" + def _add_check(check, kind, codes, args): + if check in _checks[kind]: + _checks[kind][check][0].extend(codes or []) + else: + _checks[kind][check] = (codes or [''], args) + if inspect.isfunction(check): + args = _get_parameters(check) + if args and args[0] in ('physical_line', 'logical_line'): + if codes is None: + codes = ERRORCODE_REGEX.findall(check.__doc__ or '') + _add_check(check, args[0], codes, args) + elif inspect.isclass(check): + if _get_parameters(check.__init__)[:2] == ['self', 'tree']: + _add_check(check, 'tree', codes, None) + return check + + +######################################################################## +# Plugins (check functions) for physical lines +######################################################################## + +@register_check +def tabs_or_spaces(physical_line, indent_char): + r"""Never mix tabs and spaces. + + The most popular way of indenting Python is with spaces only. The + second-most popular way is with tabs only. Code indented with a + mixture of tabs and spaces should be converted to using spaces + exclusively. When invoking the Python command line interpreter with + the -t option, it issues warnings about code that illegally mixes + tabs and spaces. When using -tt these warnings become errors. + These options are highly recommended! + + Okay: if a == 0:\n a = 1\n b = 1 + E101: if a == 0:\n a = 1\n\tb = 1 + """ + indent = INDENT_REGEX.match(physical_line).group(1) + for offset, char in enumerate(indent): + if char != indent_char: + return offset, "E101 indentation contains mixed spaces and tabs" + + +@register_check +def tabs_obsolete(physical_line): + r"""On new projects, spaces-only are strongly recommended over tabs. + + Okay: if True:\n return + W191: if True:\n\treturn + """ + indent = INDENT_REGEX.match(physical_line).group(1) + if '\t' in indent: + return indent.index('\t'), "W191 indentation contains tabs" + + +@register_check +def trailing_whitespace(physical_line): + r"""Trailing whitespace is superfluous. + + The warning returned varies on whether the line itself is blank, + for easier filtering for those who want to indent their blank lines. + + Okay: spam(1)\n# + W291: spam(1) \n# + W293: class Foo(object):\n \n bang = 12 + """ + physical_line = physical_line.rstrip('\n') # chr(10), newline + physical_line = physical_line.rstrip('\r') # chr(13), carriage return + physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L + stripped = physical_line.rstrip(' \t\v') + if physical_line != stripped: + if stripped: + return len(stripped), "W291 trailing whitespace" + else: + return 0, "W293 blank line contains whitespace" + + +@register_check +def trailing_blank_lines(physical_line, lines, line_number, total_lines): + r"""Trailing blank lines are superfluous. + + Okay: spam(1) + W391: spam(1)\n + + However the last line should end with a new line (warning W292). + """ + if line_number == total_lines: + stripped_last_line = physical_line.rstrip() + if physical_line and not stripped_last_line: + return 0, "W391 blank line at end of file" + if stripped_last_line == physical_line: + return len(lines[-1]), "W292 no newline at end of file" + + +@register_check +def maximum_line_length(physical_line, max_line_length, multiline, + line_number, noqa): + r"""Limit all lines to a maximum of 79 characters. + + There are still many devices around that are limited to 80 character + lines; plus, limiting windows to 80 characters makes it possible to + have several windows side-by-side. The default wrapping on such + devices looks ugly. Therefore, please limit all lines to a maximum + of 79 characters. For flowing long blocks of text (docstrings or + comments), limiting the length to 72 characters is recommended. + + Reports error E501. + """ + line = physical_line.rstrip() + length = len(line) + if length > max_line_length and not noqa: + # Special case: ignore long shebang lines. + if line_number == 1 and line.startswith('#!'): + return + # Special case for long URLs in multi-line docstrings or + # comments, but still report the error when the 72 first chars + # are whitespaces. + chunks = line.split() + if ((len(chunks) == 1 and multiline) or + (len(chunks) == 2 and chunks[0] == '#')) and \ + len(line) - len(chunks[-1]) < max_line_length - 7: + return + if hasattr(line, 'decode'): # Python 2 + # The line could contain multi-byte characters + try: + length = len(line.decode('utf-8')) + except UnicodeError: + pass + if length > max_line_length: + return (max_line_length, "E501 line too long " + "(%d > %d characters)" % (length, max_line_length)) + + +######################################################################## +# Plugins (check functions) for logical lines +######################################################################## + + +def _is_one_liner(logical_line, indent_level, lines, line_number): + if not STARTSWITH_TOP_LEVEL_REGEX.match(logical_line): + return False + + line_idx = line_number - 1 + + if line_idx < 1: + prev_indent = 0 + else: + prev_indent = expand_indent(lines[line_idx - 1]) + + if prev_indent > indent_level: + return False + + while line_idx < len(lines): + line = lines[line_idx].strip() + if not line.startswith('@') and STARTSWITH_TOP_LEVEL_REGEX.match(line): + break + else: + line_idx += 1 + else: + return False # invalid syntax: EOF while searching for def/class + + next_idx = line_idx + 1 + while next_idx < len(lines): + if lines[next_idx].strip(): + break + else: + next_idx += 1 + else: + return True # line is last in the file + + return expand_indent(lines[next_idx]) <= indent_level + + +@register_check +def blank_lines(logical_line, blank_lines, indent_level, line_number, + blank_before, previous_logical, + previous_unindented_logical_line, previous_indent_level, + lines): + r"""Separate top-level function and class definitions with two blank + lines. + + Method definitions inside a class are separated by a single blank + line. + + Extra blank lines may be used (sparingly) to separate groups of + related functions. Blank lines may be omitted between a bunch of + related one-liners (e.g. a set of dummy implementations). + + Use blank lines in functions, sparingly, to indicate logical + sections. + + Okay: def a():\n pass\n\n\ndef b():\n pass + Okay: def a():\n pass\n\n\nasync def b():\n pass + Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass + Okay: default = 1\nfoo = 1 + Okay: classify = 1\nfoo = 1 + + E301: class Foo:\n b = 0\n def bar():\n pass + E302: def a():\n pass\n\ndef b(n):\n pass + E302: def a():\n pass\n\nasync def b(n):\n pass + E303: def a():\n pass\n\n\n\ndef b(n):\n pass + E303: def a():\n\n\n\n pass + E304: @decorator\n\ndef a():\n pass + E305: def a():\n pass\na() + E306: def a():\n def b():\n pass\n def c():\n pass + """ # noqa + top_level_lines = BLANK_LINES_CONFIG['top_level'] + method_lines = BLANK_LINES_CONFIG['method'] + + if not previous_logical and blank_before < top_level_lines: + return # Don't expect blank lines before the first line + if previous_logical.startswith('@'): + if blank_lines: + yield 0, "E304 blank lines found after function decorator" + elif (blank_lines > top_level_lines or + (indent_level and blank_lines == method_lines + 1) + ): + yield 0, "E303 too many blank lines (%d)" % blank_lines + elif STARTSWITH_TOP_LEVEL_REGEX.match(logical_line): + # allow a group of one-liners + if ( + _is_one_liner(logical_line, indent_level, lines, line_number) and + blank_before == 0 + ): + return + if indent_level: + if not (blank_before == method_lines or + previous_indent_level < indent_level or + DOCSTRING_REGEX.match(previous_logical) + ): + ancestor_level = indent_level + nested = False + # Search backwards for a def ancestor or tree root + # (top level). + for line in lines[line_number - top_level_lines::-1]: + if line.strip() and expand_indent(line) < ancestor_level: + ancestor_level = expand_indent(line) + nested = STARTSWITH_DEF_REGEX.match(line.lstrip()) + if nested or ancestor_level == 0: + break + if nested: + yield 0, "E306 expected %s blank line before a " \ + "nested definition, found 0" % (method_lines,) + else: + yield 0, "E301 expected %s blank line, found 0" % ( + method_lines,) + elif blank_before != top_level_lines: + yield 0, "E302 expected %s blank lines, found %d" % ( + top_level_lines, blank_before) + elif (logical_line and + not indent_level and + blank_before != top_level_lines and + previous_unindented_logical_line.startswith(('def ', 'class ')) + ): + yield 0, "E305 expected %s blank lines after " \ + "class or function definition, found %d" % ( + top_level_lines, blank_before) + + +@register_check +def extraneous_whitespace(logical_line): + r"""Avoid extraneous whitespace. + + Avoid extraneous whitespace in these situations: + - Immediately inside parentheses, brackets or braces. + - Immediately before a comma, semicolon, or colon. + + Okay: spam(ham[1], {eggs: 2}) + E201: spam( ham[1], {eggs: 2}) + E201: spam(ham[ 1], {eggs: 2}) + E201: spam(ham[1], { eggs: 2}) + E202: spam(ham[1], {eggs: 2} ) + E202: spam(ham[1 ], {eggs: 2}) + E202: spam(ham[1], {eggs: 2 }) + + E203: if x == 4: print x, y; x, y = y , x + E203: if x == 4: print x, y ; x, y = y, x + E203: if x == 4 : print x, y; x, y = y, x + """ + line = logical_line + for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): + text = match.group() + char = text.strip() + found = match.start() + if text == char + ' ': + # assert char in '([{' + yield found + 1, "E201 whitespace after '%s'" % char + elif line[found - 1] != ',': + code = ('E202' if char in '}])' else 'E203') # if char in ',;:' + yield found, "%s whitespace before '%s'" % (code, char) + + +@register_check +def whitespace_around_keywords(logical_line): + r"""Avoid extraneous whitespace around keywords. + + Okay: True and False + E271: True and False + E272: True and False + E273: True and\tFalse + E274: True\tand False + """ + for match in KEYWORD_REGEX.finditer(logical_line): + before, after = match.groups() + + if '\t' in before: + yield match.start(1), "E274 tab before keyword" + elif len(before) > 1: + yield match.start(1), "E272 multiple spaces before keyword" + + if '\t' in after: + yield match.start(2), "E273 tab after keyword" + elif len(after) > 1: + yield match.start(2), "E271 multiple spaces after keyword" + + +@register_check +def missing_whitespace_after_import_keyword(logical_line): + r"""Multiple imports in form from x import (a, b, c) should have + space between import statement and parenthesised name list. + + Okay: from foo import (bar, baz) + E275: from foo import(bar, baz) + E275: from importable.module import(bar, baz) + """ + line = logical_line + indicator = ' import(' + if line.startswith('from '): + found = line.find(indicator) + if -1 < found: + pos = found + len(indicator) - 1 + yield pos, "E275 missing whitespace after keyword" + + +@register_check +def missing_whitespace(logical_line): + r"""Each comma, semicolon or colon should be followed by whitespace. + + Okay: [a, b] + Okay: (3,) + Okay: a[1:4] + Okay: a[:4] + Okay: a[1:] + Okay: a[1:4:2] + E231: ['a','b'] + E231: foo(bar,baz) + E231: [{'a':'b'}] + """ + line = logical_line + for index in range(len(line) - 1): + char = line[index] + next_char = line[index + 1] + if char in ',;:' and next_char not in WHITESPACE: + before = line[:index] + if char == ':' and before.count('[') > before.count(']') and \ + before.rfind('{') < before.rfind('['): + continue # Slice syntax, no space required + if char == ',' and next_char == ')': + continue # Allow tuple with only one element: (3,) + if char == ':' and next_char == '=' and sys.version_info >= (3, 8): + continue # Allow assignment expression + yield index, "E231 missing whitespace after '%s'" % char + + +@register_check +def indentation(logical_line, previous_logical, indent_char, + indent_level, previous_indent_level): + r"""Use 4 spaces per indentation level. + + For really old code that you don't want to mess up, you can continue + to use 8-space tabs. + + Okay: a = 1 + Okay: if a == 0:\n a = 1 + E111: a = 1 + E114: # a = 1 + + Okay: for item in items:\n pass + E112: for item in items:\npass + E115: for item in items:\n# Hi\n pass + + Okay: a = 1\nb = 2 + E113: a = 1\n b = 2 + E116: a = 1\n # b = 2 + """ + c = 0 if logical_line else 3 + tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)" + if indent_level % 4: + yield 0, tmpl % (1 + c, "indentation is not a multiple of four") + indent_expect = previous_logical.endswith(':') + if indent_expect and indent_level <= previous_indent_level: + yield 0, tmpl % (2 + c, "expected an indented block") + elif not indent_expect and indent_level > previous_indent_level: + yield 0, tmpl % (3 + c, "unexpected indentation") + + if indent_expect: + expected_indent_amount = 8 if indent_char == '\t' else 4 + expected_indent_level = previous_indent_level + expected_indent_amount + if indent_level > expected_indent_level: + yield 0, tmpl % (7, 'over-indented') + + +@register_check +def continued_indentation(logical_line, tokens, indent_level, hang_closing, + indent_char, noqa, verbose): + r"""Continuation lines indentation. + + Continuation lines should align wrapped elements either vertically + using Python's implicit line joining inside parentheses, brackets + and braces, or using a hanging indent. + + When using a hanging indent these considerations should be applied: + - there should be no arguments on the first line, and + - further indentation should be used to clearly distinguish itself + as a continuation line. + + Okay: a = (\n) + E123: a = (\n ) + + Okay: a = (\n 42) + E121: a = (\n 42) + E122: a = (\n42) + E123: a = (\n 42\n ) + E124: a = (24,\n 42\n) + E125: if (\n b):\n pass + E126: a = (\n 42) + E127: a = (24,\n 42) + E128: a = (24,\n 42) + E129: if (a or\n b):\n pass + E131: a = (\n 42\n 24) + """ + first_row = tokens[0][2][0] + nrows = 1 + tokens[-1][2][0] - first_row + if noqa or nrows == 1: + return + + # indent_next tells us whether the next block is indented; assuming + # that it is indented by 4 spaces, then we should not allow 4-space + # indents on the final continuation line; in turn, some other + # indents are allowed to have an extra 4 spaces. + indent_next = logical_line.endswith(':') + + row = depth = 0 + valid_hangs = (4,) if indent_char != '\t' else (4, 8) + # remember how many brackets were opened on each line + parens = [0] * nrows + # relative indents of physical lines + rel_indent = [0] * nrows + # for each depth, collect a list of opening rows + open_rows = [[0]] + # for each depth, memorize the hanging indentation + hangs = [None] + # visual indents + indent_chances = {} + last_indent = tokens[0][2] + visual_indent = None + last_token_multiline = False + # for each depth, memorize the visual indent column + indent = [last_indent[1]] + if verbose >= 3: + print(">>> " + tokens[0][4].rstrip()) + + for token_type, text, start, end, line in tokens: + + newline = row < start[0] - first_row + if newline: + row = start[0] - first_row + newline = not last_token_multiline and token_type not in NEWLINE + + if newline: + # this is the beginning of a continuation line. + last_indent = start + if verbose >= 3: + print("... " + line.rstrip()) + + # record the initial indent. + rel_indent[row] = expand_indent(line) - indent_level + + # identify closing bracket + close_bracket = (token_type == tokenize.OP and text in ']})') + + # is the indent relative to an opening bracket line? + for open_row in reversed(open_rows[depth]): + hang = rel_indent[row] - rel_indent[open_row] + hanging_indent = hang in valid_hangs + if hanging_indent: + break + if hangs[depth]: + hanging_indent = (hang == hangs[depth]) + # is there any chance of visual indent? + visual_indent = (not close_bracket and hang > 0 and + indent_chances.get(start[1])) + + if close_bracket and indent[depth]: + # closing bracket for visual indent + if start[1] != indent[depth]: + yield (start, "E124 closing bracket does not match " + "visual indentation") + elif close_bracket and not hang: + # closing bracket matches indentation of opening + # bracket's line + if hang_closing: + yield start, "E133 closing bracket is missing indentation" + elif indent[depth] and start[1] < indent[depth]: + if visual_indent is not True: + # visual indent is broken + yield (start, "E128 continuation line " + "under-indented for visual indent") + elif hanging_indent or (indent_next and rel_indent[row] == 8): + # hanging indent is verified + if close_bracket and not hang_closing: + yield (start, "E123 closing bracket does not match " + "indentation of opening bracket's line") + hangs[depth] = hang + elif visual_indent is True: + # visual indent is verified + indent[depth] = start[1] + elif visual_indent in (text, str): + # ignore token lined up with matching one from a + # previous line + pass + else: + # indent is broken + if hang <= 0: + error = "E122", "missing indentation or outdented" + elif indent[depth]: + error = "E127", "over-indented for visual indent" + elif not close_bracket and hangs[depth]: + error = "E131", "unaligned for hanging indent" + else: + hangs[depth] = hang + if hang > 4: + error = "E126", "over-indented for hanging indent" + else: + error = "E121", "under-indented for hanging indent" + yield start, "%s continuation line %s" % error + + # look for visual indenting + if (parens[row] and + token_type not in (tokenize.NL, tokenize.COMMENT) and + not indent[depth]): + indent[depth] = start[1] + indent_chances[start[1]] = True + if verbose >= 4: + print("bracket depth %s indent to %s" % (depth, start[1])) + # deal with implicit string concatenation + elif (token_type in (tokenize.STRING, tokenize.COMMENT) or + text in ('u', 'ur', 'b', 'br')): + indent_chances[start[1]] = str + # visual indent after assert/raise/with + elif not row and not depth and text in ["assert", "raise", "with"]: + indent_chances[end[1] + 1] = True + # special case for the "if" statement because len("if (") == 4 + elif not indent_chances and not row and not depth and text == 'if': + indent_chances[end[1] + 1] = True + elif text == ':' and line[end[1]:].isspace(): + open_rows[depth].append(row) + + # keep track of bracket depth + if token_type == tokenize.OP: + if text in '([{': + depth += 1 + indent.append(0) + hangs.append(None) + if len(open_rows) == depth: + open_rows.append([]) + open_rows[depth].append(row) + parens[row] += 1 + if verbose >= 4: + print("bracket depth %s seen, col %s, visual min = %s" % + (depth, start[1], indent[depth])) + elif text in ')]}' and depth > 0: + # parent indents should not be more than this one + prev_indent = indent.pop() or last_indent[1] + hangs.pop() + for d in range(depth): + if indent[d] > prev_indent: + indent[d] = 0 + for ind in list(indent_chances): + if ind >= prev_indent: + del indent_chances[ind] + del open_rows[depth + 1:] + depth -= 1 + if depth: + indent_chances[indent[depth]] = True + for idx in range(row, -1, -1): + if parens[idx]: + parens[idx] -= 1 + break + assert len(indent) == depth + 1 + if start[1] not in indent_chances: + # allow lining up tokens + indent_chances[start[1]] = text + + last_token_multiline = (start[0] != end[0]) + if last_token_multiline: + rel_indent[end[0] - first_row] = rel_indent[row] + + if indent_next and expand_indent(line) == indent_level + 4: + pos = (start[0], indent[0] + 4) + if visual_indent: + code = "E129 visually indented line" + else: + code = "E125 continuation line" + yield pos, "%s with same indent as next logical line" % code + + +@register_check +def whitespace_before_parameters(logical_line, tokens): + r"""Avoid extraneous whitespace. + + Avoid extraneous whitespace in the following situations: + - before the open parenthesis that starts the argument list of a + function call. + - before the open parenthesis that starts an indexing or slicing. + + Okay: spam(1) + E211: spam (1) + + Okay: dict['key'] = list[index] + E211: dict ['key'] = list[index] + E211: dict['key'] = list [index] + """ + prev_type, prev_text, __, prev_end, __ = tokens[0] + for index in range(1, len(tokens)): + token_type, text, start, end, __ = tokens[index] + if (token_type == tokenize.OP and + text in '([' and + start != prev_end and + (prev_type == tokenize.NAME or prev_text in '}])') and + # Syntax "class A (B):" is allowed, but avoid it + (index < 2 or tokens[index - 2][1] != 'class') and + # Allow "return (a.foo for a in range(5))" + not keyword.iskeyword(prev_text)): + yield prev_end, "E211 whitespace before '%s'" % text + prev_type = token_type + prev_text = text + prev_end = end + + +@register_check +def whitespace_around_operator(logical_line): + r"""Avoid extraneous whitespace around an operator. + + Okay: a = 12 + 3 + E221: a = 4 + 5 + E222: a = 4 + 5 + E223: a = 4\t+ 5 + E224: a = 4 +\t5 + """ + for match in OPERATOR_REGEX.finditer(logical_line): + before, after = match.groups() + + if '\t' in before: + yield match.start(1), "E223 tab before operator" + elif len(before) > 1: + yield match.start(1), "E221 multiple spaces before operator" + + if '\t' in after: + yield match.start(2), "E224 tab after operator" + elif len(after) > 1: + yield match.start(2), "E222 multiple spaces after operator" + + +@register_check +def missing_whitespace_around_operator(logical_line, tokens): + r"""Surround operators with a single space on either side. + + - Always surround these binary operators with a single space on + either side: assignment (=), augmented assignment (+=, -= etc.), + comparisons (==, <, >, !=, <=, >=, in, not in, is, is not), + Booleans (and, or, not). + + - If operators with different priorities are used, consider adding + whitespace around the operators with the lowest priorities. + + Okay: i = i + 1 + Okay: submitted += 1 + Okay: x = x * 2 - 1 + Okay: hypot2 = x * x + y * y + Okay: c = (a + b) * (a - b) + Okay: foo(bar, key='word', *args, **kwargs) + Okay: alpha[:-i] + + E225: i=i+1 + E225: submitted +=1 + E225: x = x /2 - 1 + E225: z = x **y + E225: z = 1and 1 + E226: c = (a+b) * (a-b) + E226: hypot2 = x*x + y*y + E227: c = a|b + E228: msg = fmt%(errno, errmsg) + """ + parens = 0 + need_space = False + prev_type = tokenize.OP + prev_text = prev_end = None + operator_types = (tokenize.OP, tokenize.NAME) + for token_type, text, start, end, line in tokens: + if token_type in SKIP_COMMENTS: + continue + if text in ('(', 'lambda'): + parens += 1 + elif text == ')': + parens -= 1 + if need_space: + if start != prev_end: + # Found a (probably) needed space + if need_space is not True and not need_space[1]: + yield (need_space[0], + "E225 missing whitespace around operator") + need_space = False + elif text == '>' and prev_text in ('<', '-'): + # Tolerate the "<>" operator, even if running Python 3 + # Deal with Python 3's annotated return value "->" + pass + elif ( + # def f(a, /, b): + # ^ + # def f(a, b, /): + # ^ + prev_text == '/' and text in {',', ')'} or + # def f(a, b, /): + # ^ + prev_text == ')' and text == ':' + ): + # Tolerate the "/" operator in function definition + # For more info see PEP570 + pass + else: + if need_space is True or need_space[1]: + # A needed trailing space was not found + yield prev_end, "E225 missing whitespace around operator" + elif prev_text != '**': + code, optype = 'E226', 'arithmetic' + if prev_text == '%': + code, optype = 'E228', 'modulo' + elif prev_text not in ARITHMETIC_OP: + code, optype = 'E227', 'bitwise or shift' + yield (need_space[0], "%s missing whitespace " + "around %s operator" % (code, optype)) + need_space = False + elif token_type in operator_types and prev_end is not None: + if text == '=' and parens: + # Allow keyword args or defaults: foo(bar=None). + pass + elif text in WS_NEEDED_OPERATORS: + need_space = True + elif text in UNARY_OPERATORS: + # Check if the operator is used as a binary operator + # Allow unary operators: -123, -x, +1. + # Allow argument unpacking: foo(*args, **kwargs). + if (prev_text in '}])' if prev_type == tokenize.OP + else prev_text not in KEYWORDS): + need_space = None + elif text in WS_OPTIONAL_OPERATORS: + need_space = None + + if need_space is None: + # Surrounding space is optional, but ensure that + # trailing space matches opening space + need_space = (prev_end, start != prev_end) + elif need_space and start == prev_end: + # A needed opening space was not found + yield prev_end, "E225 missing whitespace around operator" + need_space = False + prev_type = token_type + prev_text = text + prev_end = end + + +@register_check +def whitespace_around_comma(logical_line): + r"""Avoid extraneous whitespace after a comma or a colon. + + Note: these checks are disabled by default + + Okay: a = (1, 2) + E241: a = (1, 2) + E242: a = (1,\t2) + """ + line = logical_line + for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): + found = m.start() + 1 + if '\t' in m.group(): + yield found, "E242 tab after '%s'" % m.group()[0] + else: + yield found, "E241 multiple spaces after '%s'" % m.group()[0] + + +@register_check +def whitespace_around_named_parameter_equals(logical_line, tokens): + r"""Don't use spaces around the '=' sign in function arguments. + + Don't use spaces around the '=' sign when used to indicate a + keyword argument or a default parameter value, except when + using a type annotation. + + Okay: def complex(real, imag=0.0): + Okay: return magic(r=real, i=imag) + Okay: boolean(a == b) + Okay: boolean(a != b) + Okay: boolean(a <= b) + Okay: boolean(a >= b) + Okay: def foo(arg: int = 42): + Okay: async def foo(arg: int = 42): + + E251: def complex(real, imag = 0.0): + E251: return magic(r = real, i = imag) + E252: def complex(real, image: float=0.0): + """ + parens = 0 + no_space = False + require_space = False + prev_end = None + annotated_func_arg = False + in_def = bool(STARTSWITH_DEF_REGEX.match(logical_line)) + + message = "E251 unexpected spaces around keyword / parameter equals" + missing_message = "E252 missing whitespace around parameter equals" + + for token_type, text, start, end, line in tokens: + if token_type == tokenize.NL: + continue + if no_space: + no_space = False + if start != prev_end: + yield (prev_end, message) + if require_space: + require_space = False + if start == prev_end: + yield (prev_end, missing_message) + if token_type == tokenize.OP: + if text in '([': + parens += 1 + elif text in ')]': + parens -= 1 + elif in_def and text == ':' and parens == 1: + annotated_func_arg = True + elif parens == 1 and text == ',': + annotated_func_arg = False + elif parens and text == '=': + if annotated_func_arg and parens == 1: + require_space = True + if start == prev_end: + yield (prev_end, missing_message) + else: + no_space = True + if start != prev_end: + yield (prev_end, message) + if not parens: + annotated_func_arg = False + + prev_end = end + + +@register_check +def whitespace_before_comment(logical_line, tokens): + r"""Separate inline comments by at least two spaces. + + An inline comment is a comment on the same line as a statement. + Inline comments should be separated by at least two spaces from the + statement. They should start with a # and a single space. + + Each line of a block comment starts with a # and a single space + (unless it is indented text inside the comment). + + Okay: x = x + 1 # Increment x + Okay: x = x + 1 # Increment x + Okay: # Block comment + E261: x = x + 1 # Increment x + E262: x = x + 1 #Increment x + E262: x = x + 1 # Increment x + E265: #Block comment + E266: ### Block comment + """ + prev_end = (0, 0) + for token_type, text, start, end, line in tokens: + if token_type == tokenize.COMMENT: + inline_comment = line[:start[1]].strip() + if inline_comment: + if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: + yield (prev_end, + "E261 at least two spaces before inline comment") + symbol, sp, comment = text.partition(' ') + bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') + if inline_comment: + if bad_prefix or comment[:1] in WHITESPACE: + yield start, "E262 inline comment should start with '# '" + elif bad_prefix and (bad_prefix != '!' or start[0] > 1): + if bad_prefix != '#': + yield start, "E265 block comment should start with '# '" + elif comment: + yield start, "E266 too many leading '#' for block comment" + elif token_type != tokenize.NL: + prev_end = end + + +@register_check +def imports_on_separate_lines(logical_line): + r"""Place imports on separate lines. + + Okay: import os\nimport sys + E401: import sys, os + + Okay: from subprocess import Popen, PIPE + Okay: from myclas import MyClass + Okay: from foo.bar.yourclass import YourClass + Okay: import myclass + Okay: import foo.bar.yourclass + """ + line = logical_line + if line.startswith('import '): + found = line.find(',') + if -1 < found and ';' not in line[:found]: + yield found, "E401 multiple imports on one line" + + +@register_check +def module_imports_on_top_of_file( + logical_line, indent_level, checker_state, noqa): + r"""Place imports at the top of the file. + + Always put imports at the top of the file, just after any module + comments and docstrings, and before module globals and constants. + + Okay: import os + Okay: # this is a comment\nimport os + Okay: '''this is a module docstring'''\nimport os + Okay: r'''this is a module docstring'''\nimport os + Okay: + try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y + Okay: + try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y + E402: a=1\nimport os + E402: 'One string'\n"Two string"\nimport os + E402: a=1\nfrom sys import x + + Okay: if x:\n import os + """ # noqa + def is_string_literal(line): + if line[0] in 'uUbB': + line = line[1:] + if line and line[0] in 'rR': + line = line[1:] + return line and (line[0] == '"' or line[0] == "'") + + allowed_keywords = ( + 'try', 'except', 'else', 'finally', 'with', 'if', 'elif') + + if indent_level: # Allow imports in conditional statement/function + return + if not logical_line: # Allow empty lines or comments + return + if noqa: + return + line = logical_line + if line.startswith('import ') or line.startswith('from '): + if checker_state.get('seen_non_imports', False): + yield 0, "E402 module level import not at top of file" + elif re.match(DUNDER_REGEX, line): + return + elif any(line.startswith(kw) for kw in allowed_keywords): + # Allow certain keywords intermixed with imports in order to + # support conditional or filtered importing + return + elif is_string_literal(line): + # The first literal is a docstring, allow it. Otherwise, report + # error. + if checker_state.get('seen_docstring', False): + checker_state['seen_non_imports'] = True + else: + checker_state['seen_docstring'] = True + else: + checker_state['seen_non_imports'] = True + + +@register_check +def compound_statements(logical_line): + r"""Compound statements (on the same line) are generally + discouraged. + + While sometimes it's okay to put an if/for/while with a small body + on the same line, never do this for multi-clause statements. + Also avoid folding such long lines! + + Always use a def statement instead of an assignment statement that + binds a lambda expression directly to a name. + + Okay: if foo == 'blah':\n do_blah_thing() + Okay: do_one() + Okay: do_two() + Okay: do_three() + + E701: if foo == 'blah': do_blah_thing() + E701: for x in lst: total += x + E701: while t < 10: t = delay() + E701: if foo == 'blah': do_blah_thing() + E701: else: do_non_blah_thing() + E701: try: something() + E701: finally: cleanup() + E701: if foo == 'blah': one(); two(); three() + E702: do_one(); do_two(); do_three() + E703: do_four(); # useless semicolon + E704: def f(x): return 2*x + E731: f = lambda x: 2*x + """ + line = logical_line + last_char = len(line) - 1 + found = line.find(':') + prev_found = 0 + counts = {char: 0 for char in '{}[]()'} + while -1 < found < last_char: + update_counts(line[prev_found:found], counts) + if ((counts['{'] <= counts['}'] and # {'a': 1} (dict) + counts['['] <= counts[']'] and # [1:2] (slice) + counts['('] <= counts[')']) and # (annotation) + not (sys.version_info >= (3, 8) and + line[found + 1] == '=')): # assignment expression + lambda_kw = LAMBDA_REGEX.search(line, 0, found) + if lambda_kw: + before = line[:lambda_kw.start()].rstrip() + if before[-1:] == '=' and isidentifier(before[:-1].strip()): + yield 0, ("E731 do not assign a lambda expression, use a " + "def") + break + if STARTSWITH_DEF_REGEX.match(line): + yield 0, "E704 multiple statements on one line (def)" + elif STARTSWITH_INDENT_STATEMENT_REGEX.match(line): + yield found, "E701 multiple statements on one line (colon)" + prev_found = found + found = line.find(':', found + 1) + found = line.find(';') + while -1 < found: + if found < last_char: + yield found, "E702 multiple statements on one line (semicolon)" + else: + yield found, "E703 statement ends with a semicolon" + found = line.find(';', found + 1) + + +@register_check +def explicit_line_join(logical_line, tokens): + r"""Avoid explicit line join between brackets. + + The preferred way of wrapping long lines is by using Python's + implied line continuation inside parentheses, brackets and braces. + Long lines can be broken over multiple lines by wrapping expressions + in parentheses. These should be used in preference to using a + backslash for line continuation. + + E502: aaa = [123, \\n 123] + E502: aaa = ("bbb " \\n "ccc") + + Okay: aaa = [123,\n 123] + Okay: aaa = ("bbb "\n "ccc") + Okay: aaa = "bbb " \\n "ccc" + Okay: aaa = 123 # \\ + """ + prev_start = prev_end = parens = 0 + comment = False + backslash = None + for token_type, text, start, end, line in tokens: + if token_type == tokenize.COMMENT: + comment = True + if start[0] != prev_start and parens and backslash and not comment: + yield backslash, "E502 the backslash is redundant between brackets" + if end[0] != prev_end: + if line.rstrip('\r\n').endswith('\\'): + backslash = (end[0], len(line.splitlines()[-1]) - 1) + else: + backslash = None + prev_start = prev_end = end[0] + else: + prev_start = start[0] + if token_type == tokenize.OP: + if text in '([{': + parens += 1 + elif text in ')]}': + parens -= 1 + + +_SYMBOLIC_OPS = frozenset("()[]{},:.;@=%~") | frozenset(("...",)) + + +def _is_binary_operator(token_type, text): + is_op_token = token_type == tokenize.OP + is_conjunction = text in ['and', 'or'] + # NOTE(sigmavirus24): Previously the not_a_symbol check was executed + # conditionally. Since it is now *always* executed, text may be + # None. In that case we get a TypeError for `text not in str`. + not_a_symbol = text and text not in _SYMBOLIC_OPS + # The % character is strictly speaking a binary operator, but the + # common usage seems to be to put it next to the format parameters, + # after a line break. + return ((is_op_token or is_conjunction) and not_a_symbol) + + +def _break_around_binary_operators(tokens): + """Private function to reduce duplication. + + This factors out the shared details between + :func:`break_before_binary_operator` and + :func:`break_after_binary_operator`. + """ + line_break = False + unary_context = True + # Previous non-newline token types and text + previous_token_type = None + previous_text = None + for token_type, text, start, end, line in tokens: + if token_type == tokenize.COMMENT: + continue + if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: + line_break = True + else: + yield (token_type, text, previous_token_type, previous_text, + line_break, unary_context, start) + unary_context = text in '([{,;' + line_break = False + previous_token_type = token_type + previous_text = text + + +@register_check +def break_before_binary_operator(logical_line, tokens): + r""" + Avoid breaks before binary operators. + + The preferred place to break around a binary operator is after the + operator, not before it. + + W503: (width == 0\n + height == 0) + W503: (width == 0\n and height == 0) + W503: var = (1\n & ~2) + W503: var = (1\n / -2) + W503: var = (1\n + -1\n + -2) + + Okay: foo(\n -x) + Okay: foo(x\n []) + Okay: x = '''\n''' + '' + Okay: foo(x,\n -y) + Okay: foo(x, # comment\n -y) + """ + for context in _break_around_binary_operators(tokens): + (token_type, text, previous_token_type, previous_text, + line_break, unary_context, start) = context + if (_is_binary_operator(token_type, text) and line_break and + not unary_context and + not _is_binary_operator(previous_token_type, + previous_text)): + yield start, "W503 line break before binary operator" + + +@register_check +def break_after_binary_operator(logical_line, tokens): + r""" + Avoid breaks after binary operators. + + The preferred place to break around a binary operator is before the + operator, not after it. + + W504: (width == 0 +\n height == 0) + W504: (width == 0 and\n height == 0) + W504: var = (1 &\n ~2) + + Okay: foo(\n -x) + Okay: foo(x\n []) + Okay: x = '''\n''' + '' + Okay: x = '' + '''\n''' + Okay: foo(x,\n -y) + Okay: foo(x, # comment\n -y) + + The following should be W504 but unary_context is tricky with these + Okay: var = (1 /\n -2) + Okay: var = (1 +\n -1 +\n -2) + """ + prev_start = None + for context in _break_around_binary_operators(tokens): + (token_type, text, previous_token_type, previous_text, + line_break, unary_context, start) = context + if (_is_binary_operator(previous_token_type, previous_text) and + line_break and + not unary_context and + not _is_binary_operator(token_type, text)): + yield prev_start, "W504 line break after binary operator" + prev_start = start + + +@register_check +def comparison_to_singleton(logical_line, noqa): + r"""Comparison to singletons should use "is" or "is not". + + Comparisons to singletons like None should always be done + with "is" or "is not", never the equality operators. + + Okay: if arg is not None: + E711: if arg != None: + E711: if None == arg: + E712: if arg == True: + E712: if False == arg: + + Also, beware of writing if x when you really mean if x is not None + -- e.g. when testing whether a variable or argument that defaults to + None was set to some other value. The other value might have a type + (such as a container) that could be false in a boolean context! + """ + match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) + if match: + singleton = match.group(1) or match.group(3) + same = (match.group(2) == '==') + + msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) + if singleton in ('None',): + code = 'E711' + else: + code = 'E712' + nonzero = ((singleton == 'True' and same) or + (singleton == 'False' and not same)) + msg += " or 'if %scond:'" % ('' if nonzero else 'not ') + yield match.start(2), ("%s comparison to %s should be %s" % + (code, singleton, msg)) + + +@register_check +def comparison_negative(logical_line): + r"""Negative comparison should be done using "not in" and "is not". + + Okay: if x not in y:\n pass + Okay: assert (X in Y or X is Z) + Okay: if not (X in Y):\n pass + Okay: zz = x is not y + E713: Z = not X in Y + E713: if not X.B in Y:\n pass + E714: if not X is Y:\n pass + E714: Z = not X.B is Y + """ + match = COMPARE_NEGATIVE_REGEX.search(logical_line) + if match: + pos = match.start(1) + if match.group(2) == 'in': + yield pos, "E713 test for membership should be 'not in'" + else: + yield pos, "E714 test for object identity should be 'is not'" + + +@register_check +def comparison_type(logical_line, noqa): + r"""Object type comparisons should always use isinstance(). + + Do not compare types directly. + + Okay: if isinstance(obj, int): + E721: if type(obj) is type(1): + + When checking if an object is a string, keep in mind that it might + be a unicode string too! In Python 2.3, str and unicode have a + common base class, basestring, so you can do: + + Okay: if isinstance(obj, basestring): + Okay: if type(a1) is type(b1): + """ + match = COMPARE_TYPE_REGEX.search(logical_line) + if match and not noqa: + inst = match.group(1) + if inst and isidentifier(inst) and inst not in SINGLETONS: + return # Allow comparison for types which are not obvious + yield match.start(), "E721 do not compare types, use 'isinstance()'" + + +@register_check +def bare_except(logical_line, noqa): + r"""When catching exceptions, mention specific exceptions when + possible. + + Okay: except Exception: + Okay: except BaseException: + E722: except: + """ + if noqa: + return + + regex = re.compile(r"except\s*:") + match = regex.match(logical_line) + if match: + yield match.start(), "E722 do not use bare 'except'" + + +@register_check +def ambiguous_identifier(logical_line, tokens): + r"""Never use the characters 'l', 'O', or 'I' as variable names. + + In some fonts, these characters are indistinguishable from the + numerals one and zero. When tempted to use 'l', use 'L' instead. + + Okay: L = 0 + Okay: o = 123 + Okay: i = 42 + E741: l = 0 + E741: O = 123 + E741: I = 42 + + Variables can be bound in several other contexts, including class + and function definitions, 'global' and 'nonlocal' statements, + exception handlers, and 'with' and 'for' statements. + In addition, we have a special handling for function parameters. + + Okay: except AttributeError as o: + Okay: with lock as L: + Okay: foo(l=12) + Okay: for a in foo(l=12): + E741: except AttributeError as O: + E741: with lock as l: + E741: global I + E741: nonlocal l + E741: def foo(l): + E741: def foo(l=12): + E741: l = foo(l=12) + E741: for l in range(10): + E742: class I(object): + E743: def l(x): + """ + is_func_def = False # Set to true if 'def' is found + parameter_parentheses_level = 0 + idents_to_avoid = ('l', 'O', 'I') + prev_type, prev_text, prev_start, prev_end, __ = tokens[0] + for token_type, text, start, end, line in tokens[1:]: + ident = pos = None + # find function definitions + if prev_text == 'def': + is_func_def = True + # update parameter parentheses level + if parameter_parentheses_level == 0 and \ + prev_type == tokenize.NAME and \ + token_type == tokenize.OP and text == '(': + parameter_parentheses_level = 1 + elif parameter_parentheses_level > 0 and \ + token_type == tokenize.OP: + if text == '(': + parameter_parentheses_level += 1 + elif text == ')': + parameter_parentheses_level -= 1 + # identifiers on the lhs of an assignment operator + if token_type == tokenize.OP and '=' in text and \ + parameter_parentheses_level == 0: + if prev_text in idents_to_avoid: + ident = prev_text + pos = prev_start + # identifiers bound to values with 'as', 'for', + # 'global', or 'nonlocal' + if prev_text in ('as', 'for', 'global', 'nonlocal'): + if text in idents_to_avoid: + ident = text + pos = start + # function parameter definitions + if is_func_def: + if text in idents_to_avoid: + ident = text + pos = start + if prev_text == 'class': + if text in idents_to_avoid: + yield start, "E742 ambiguous class definition '%s'" % text + if prev_text == 'def': + if text in idents_to_avoid: + yield start, "E743 ambiguous function definition '%s'" % text + if ident: + yield pos, "E741 ambiguous variable name '%s'" % ident + prev_type = token_type + prev_text = text + prev_start = start + + +@register_check +def python_3000_has_key(logical_line, noqa): + r"""The {}.has_key() method is removed in Python 3: use the 'in' + operator. + + Okay: if "alph" in d:\n print d["alph"] + W601: assert d.has_key('alph') + """ + pos = logical_line.find('.has_key(') + if pos > -1 and not noqa: + yield pos, "W601 .has_key() is deprecated, use 'in'" + + +@register_check +def python_3000_raise_comma(logical_line): + r"""When raising an exception, use "raise ValueError('message')". + + The older form is removed in Python 3. + + Okay: raise DummyError("Message") + W602: raise DummyError, "Message" + """ + match = RAISE_COMMA_REGEX.match(logical_line) + if match and not RERAISE_COMMA_REGEX.match(logical_line): + yield match.end() - 1, "W602 deprecated form of raising exception" + + +@register_check +def python_3000_not_equal(logical_line): + r"""New code should always use != instead of <>. + + The older syntax is removed in Python 3. + + Okay: if a != 'no': + W603: if a <> 'no': + """ + pos = logical_line.find('<>') + if pos > -1: + yield pos, "W603 '<>' is deprecated, use '!='" + + +@register_check +def python_3000_backticks(logical_line): + r"""Use repr() instead of backticks in Python 3. + + Okay: val = repr(1 + 2) + W604: val = `1 + 2` + """ + pos = logical_line.find('`') + if pos > -1: + yield pos, "W604 backticks are deprecated, use 'repr()'" + + +@register_check +def python_3000_invalid_escape_sequence(logical_line, tokens, noqa): + r"""Invalid escape sequences are deprecated in Python 3.6. + + Okay: regex = r'\.png$' + W605: regex = '\.png$' + """ + if noqa: + return + + # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals + valid = [ + '\n', + '\\', + '\'', + '"', + 'a', + 'b', + 'f', + 'n', + 'r', + 't', + 'v', + '0', '1', '2', '3', '4', '5', '6', '7', + 'x', + + # Escape sequences only recognized in string literals + 'N', + 'u', + 'U', + ] + + for token_type, text, start, end, line in tokens: + if token_type == tokenize.STRING: + start_line, start_col = start + quote = text[-3:] if text[-3:] in ('"""', "'''") else text[-1] + # Extract string modifiers (e.g. u or r) + quote_pos = text.index(quote) + prefix = text[:quote_pos].lower() + start = quote_pos + len(quote) + string = text[start:-len(quote)] + + if 'r' not in prefix: + pos = string.find('\\') + while pos >= 0: + pos += 1 + if string[pos] not in valid: + line = start_line + string.count('\n', 0, pos) + if line == start_line: + col = start_col + len(prefix) + len(quote) + pos + else: + col = pos - string.rfind('\n', 0, pos) - 1 + yield ( + (line, col - 1), + "W605 invalid escape sequence '\\%s'" % + string[pos], + ) + pos = string.find('\\', pos + 1) + + +@register_check +def python_3000_async_await_keywords(logical_line, tokens): + """'async' and 'await' are reserved keywords starting at Python 3.7. + + W606: async = 42 + W606: await = 42 + Okay: async def read(db):\n data = await db.fetch('SELECT ...') + """ + # The Python tokenize library before Python 3.5 recognizes + # async/await as a NAME token. Therefore, use a state machine to + # look for the possible async/await constructs as defined by the + # Python grammar: + # https://docs.python.org/3/reference/grammar.html + + state = None + for token_type, text, start, end, line in tokens: + error = False + + if token_type == tokenize.NL: + continue + + if state is None: + if token_type == tokenize.NAME: + if text == 'async': + state = ('async_stmt', start) + elif text == 'await': + state = ('await', start) + elif (token_type == tokenize.NAME and + text in ('def', 'for')): + state = ('define', start) + + elif state[0] == 'async_stmt': + if token_type == tokenize.NAME and text in ('def', 'with', 'for'): + # One of funcdef, with_stmt, or for_stmt. Return to + # looking for async/await names. + state = None + else: + error = True + elif state[0] == 'await': + if token_type == tokenize.NAME: + # An await expression. Return to looking for async/await + # names. + state = None + elif token_type == tokenize.OP and text == '(': + state = None + else: + error = True + elif state[0] == 'define': + if token_type == tokenize.NAME and text in ('async', 'await'): + error = True + else: + state = None + + if error: + yield ( + state[1], + "W606 'async' and 'await' are reserved keywords starting with " + "Python 3.7", + ) + state = None + + # Last token + if state is not None: + yield ( + state[1], + "W606 'async' and 'await' are reserved keywords starting with " + "Python 3.7", + ) + + +######################################################################## +@register_check +def maximum_doc_length(logical_line, max_doc_length, noqa, tokens): + r"""Limit all doc lines to a maximum of 72 characters. + + For flowing long blocks of text (docstrings or comments), limiting + the length to 72 characters is recommended. + + Reports warning W505 + """ + if max_doc_length is None or noqa: + return + + prev_token = None + skip_lines = set() + # Skip lines that + for token_type, text, start, end, line in tokens: + if token_type not in SKIP_COMMENTS.union([tokenize.STRING]): + skip_lines.add(line) + + for token_type, text, start, end, line in tokens: + # Skip lines that aren't pure strings + if token_type == tokenize.STRING and skip_lines: + continue + if token_type in (tokenize.STRING, tokenize.COMMENT): + # Only check comment-only lines + if prev_token is None or prev_token in SKIP_TOKENS: + lines = line.splitlines() + for line_num, physical_line in enumerate(lines): + if hasattr(physical_line, 'decode'): # Python 2 + # The line could contain multi-byte characters + try: + physical_line = physical_line.decode('utf-8') + except UnicodeError: + pass + if start[0] + line_num == 1 and line.startswith('#!'): + return + length = len(physical_line) + chunks = physical_line.split() + if token_type == tokenize.COMMENT: + if (len(chunks) == 2 and + length - len(chunks[-1]) < MAX_DOC_LENGTH): + continue + if len(chunks) == 1 and line_num + 1 < len(lines): + if (len(chunks) == 1 and + length - len(chunks[-1]) < MAX_DOC_LENGTH): + continue + if length > max_doc_length: + doc_error = (start[0] + line_num, max_doc_length) + yield (doc_error, "W505 doc line too long " + "(%d > %d characters)" + % (length, max_doc_length)) + prev_token = token_type + + +######################################################################## +# Helper functions +######################################################################## + + +if sys.version_info < (3,): + # Python 2: implicit encoding. + def readlines(filename): + """Read the source code.""" + with open(filename, 'rU') as f: + return f.readlines() + isidentifier = re.compile(r'[a-zA-Z_]\w*$').match + stdin_get_value = sys.stdin.read +else: + # Python 3 + def readlines(filename): + """Read the source code.""" + try: + with open(filename, 'rb') as f: + (coding, lines) = tokenize.detect_encoding(f.readline) + f = TextIOWrapper(f, coding, line_buffering=True) + return [line.decode(coding) for line in lines] + f.readlines() + except (LookupError, SyntaxError, UnicodeError): + # Fall back if file encoding is improperly declared + with open(filename, encoding='latin-1') as f: + return f.readlines() + isidentifier = str.isidentifier + + def stdin_get_value(): + """Read the value from stdin.""" + return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() + +noqa = lru_cache(512)(re.compile(r'# no(?:qa|pep8)\b', re.I).search) + + +def expand_indent(line): + r"""Return the amount of indentation. + + Tabs are expanded to the next multiple of 8. + + >>> expand_indent(' ') + 4 + >>> expand_indent('\t') + 8 + >>> expand_indent(' \t') + 8 + >>> expand_indent(' \t') + 16 + """ + line = line.rstrip('\n\r') + if '\t' not in line: + return len(line) - len(line.lstrip()) + result = 0 + for char in line: + if char == '\t': + result = result // 8 * 8 + 8 + elif char == ' ': + result += 1 + else: + break + return result + + +def mute_string(text): + """Replace contents with 'xxx' to prevent syntax matching. + + >>> mute_string('"abc"') + '"xxx"' + >>> mute_string("'''abc'''") + "'''xxx'''" + >>> mute_string("r'abc'") + "r'xxx'" + """ + # String modifiers (e.g. u or r) + start = text.index(text[-1]) + 1 + end = len(text) - 1 + # Triple quotes + if text[-3:] in ('"""', "'''"): + start += 2 + end -= 2 + return text[:start] + 'x' * (end - start) + text[end:] + + +def parse_udiff(diff, patterns=None, parent='.'): + """Return a dictionary of matching lines.""" + # For each file of the diff, the entry key is the filename, + # and the value is a set of row numbers to consider. + rv = {} + path = nrows = None + for line in diff.splitlines(): + if nrows: + if line[:1] != '-': + nrows -= 1 + continue + if line[:3] == '@@ ': + hunk_match = HUNK_REGEX.match(line) + (row, nrows) = [int(g or '1') for g in hunk_match.groups()] + rv[path].update(range(row, row + nrows)) + elif line[:3] == '+++': + path = line[4:].split('\t', 1)[0] + # Git diff will use (i)ndex, (w)ork tree, (c)ommit and + # (o)bject instead of a/b/c/d as prefixes for patches + if path[:2] in ('b/', 'w/', 'i/'): + path = path[2:] + rv[path] = set() + return { + os.path.join(parent, filepath): rows + for (filepath, rows) in rv.items() + if rows and filename_match(filepath, patterns) + } + + +def normalize_paths(value, parent=os.curdir): + """Parse a comma-separated list of paths. + + Return a list of absolute paths. + """ + if not value: + return [] + if isinstance(value, list): + return value + paths = [] + for path in value.split(','): + path = path.strip() + if '/' in path: + path = os.path.abspath(os.path.join(parent, path)) + paths.append(path.rstrip('/')) + return paths + + +def filename_match(filename, patterns, default=True): + """Check if patterns contains a pattern that matches filename. + + If patterns is unspecified, this always returns True. + """ + if not patterns: + return default + return any(fnmatch(filename, pattern) for pattern in patterns) + + +def update_counts(s, counts): + r"""Adds one to the counts of each appearance of characters in s, + for characters in counts""" + for char in s: + if char in counts: + counts[char] += 1 + + +def _is_eol_token(token): + return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' + + +######################################################################## +# Framework to run all checks +######################################################################## + + +class Checker(object): + """Load a Python source file, tokenize it, check coding style.""" + + def __init__(self, filename=None, lines=None, + options=None, report=None, **kwargs): + if options is None: + options = StyleGuide(kwargs).options + else: + assert not kwargs + self._io_error = None + self._physical_checks = options.physical_checks + self._logical_checks = options.logical_checks + self._ast_checks = options.ast_checks + self.max_line_length = options.max_line_length + self.max_doc_length = options.max_doc_length + self.multiline = False # in a multiline string? + self.hang_closing = options.hang_closing + self.verbose = options.verbose + self.filename = filename + # Dictionary where a checker can store its custom state. + self._checker_states = {} + if filename is None: + self.filename = 'stdin' + self.lines = lines or [] + elif filename == '-': + self.filename = 'stdin' + self.lines = stdin_get_value().splitlines(True) + elif lines is None: + try: + self.lines = readlines(filename) + except IOError: + (exc_type, exc) = sys.exc_info()[:2] + self._io_error = '%s: %s' % (exc_type.__name__, exc) + self.lines = [] + else: + self.lines = lines + if self.lines: + ord0 = ord(self.lines[0][0]) + if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM + if ord0 == 0xfeff: + self.lines[0] = self.lines[0][1:] + elif self.lines[0][:3] == '\xef\xbb\xbf': + self.lines[0] = self.lines[0][3:] + self.report = report or options.report + self.report_error = self.report.error + self.noqa = False + + def report_invalid_syntax(self): + """Check if the syntax is valid.""" + (exc_type, exc) = sys.exc_info()[:2] + if len(exc.args) > 1: + offset = exc.args[1] + if len(offset) > 2: + offset = offset[1:3] + else: + offset = (1, 0) + self.report_error(offset[0], offset[1] or 0, + 'E901 %s: %s' % (exc_type.__name__, exc.args[0]), + self.report_invalid_syntax) + + def readline(self): + """Get the next line from the input buffer.""" + if self.line_number >= self.total_lines: + return '' + line = self.lines[self.line_number] + self.line_number += 1 + if self.indent_char is None and line[:1] in WHITESPACE: + self.indent_char = line[0] + return line + + def run_check(self, check, argument_names): + """Run a check plugin.""" + arguments = [] + for name in argument_names: + arguments.append(getattr(self, name)) + return check(*arguments) + + def init_checker_state(self, name, argument_names): + """Prepare custom state for the specific checker plugin.""" + if 'checker_state' in argument_names: + self.checker_state = self._checker_states.setdefault(name, {}) + + def check_physical(self, line): + """Run all physical checks on a raw input line.""" + self.physical_line = line + for name, check, argument_names in self._physical_checks: + self.init_checker_state(name, argument_names) + result = self.run_check(check, argument_names) + if result is not None: + (offset, text) = result + self.report_error(self.line_number, offset, text, check) + if text[:4] == 'E101': + self.indent_char = line[0] + + def build_tokens_line(self): + """Build a logical line from tokens.""" + logical = [] + comments = [] + length = 0 + prev_row = prev_col = mapping = None + for token_type, text, start, end, line in self.tokens: + if token_type in SKIP_TOKENS: + continue + if not mapping: + mapping = [(0, start)] + if token_type == tokenize.COMMENT: + comments.append(text) + continue + if token_type == tokenize.STRING: + text = mute_string(text) + if prev_row: + (start_row, start_col) = start + if prev_row != start_row: # different row + prev_text = self.lines[prev_row - 1][prev_col - 1] + if prev_text == ',' or (prev_text not in '{[(' and + text not in '}])'): + text = ' ' + text + elif prev_col != start_col: # different column + text = line[prev_col:start_col] + text + logical.append(text) + length += len(text) + mapping.append((length, end)) + (prev_row, prev_col) = end + self.logical_line = ''.join(logical) + self.noqa = comments and noqa(''.join(comments)) + return mapping + + def check_logical(self): + """Build a line from tokens and run all logical checks on it.""" + self.report.increment_logical_line() + mapping = self.build_tokens_line() + if not mapping: + return + + mapping_offsets = [offset for offset, _ in mapping] + (start_row, start_col) = mapping[0][1] + start_line = self.lines[start_row - 1] + self.indent_level = expand_indent(start_line[:start_col]) + if self.blank_before < self.blank_lines: + self.blank_before = self.blank_lines + if self.verbose >= 2: + print(self.logical_line[:80].rstrip()) + for name, check, argument_names in self._logical_checks: + if self.verbose >= 4: + print(' ' + name) + self.init_checker_state(name, argument_names) + for offset, text in self.run_check(check, argument_names) or (): + if not isinstance(offset, tuple): + # As mappings are ordered, bisecting is a fast way + # to find a given offset in them. + token_offset, pos = mapping[bisect.bisect_left( + mapping_offsets, offset)] + offset = (pos[0], pos[1] + offset - token_offset) + self.report_error(offset[0], offset[1], text, check) + if self.logical_line: + self.previous_indent_level = self.indent_level + self.previous_logical = self.logical_line + if not self.indent_level: + self.previous_unindented_logical_line = self.logical_line + self.blank_lines = 0 + self.tokens = [] + + def check_ast(self): + """Build the file's AST and run all AST checks.""" + try: + tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) + except (ValueError, SyntaxError, TypeError): + return self.report_invalid_syntax() + for name, cls, __ in self._ast_checks: + checker = cls(tree, self.filename) + for lineno, offset, text, check in checker.run(): + if not self.lines or not noqa(self.lines[lineno - 1]): + self.report_error(lineno, offset, text, check) + + def generate_tokens(self): + """Tokenize file, run physical line checks and yield tokens.""" + if self._io_error: + self.report_error(1, 0, 'E902 %s' % self._io_error, readlines) + tokengen = tokenize.generate_tokens(self.readline) + try: + for token in tokengen: + if token[2][0] > self.total_lines: + return + self.noqa = token[4] and noqa(token[4]) + self.maybe_check_physical(token) + yield token + except (SyntaxError, tokenize.TokenError): + self.report_invalid_syntax() + + def maybe_check_physical(self, token): + """If appropriate for token, check current physical line(s).""" + # Called after every token, but act only on end of line. + if _is_eol_token(token): + # Obviously, a newline token ends a single physical line. + self.check_physical(token[4]) + elif token[0] == tokenize.STRING and '\n' in token[1]: + # Less obviously, a string that contains newlines is a + # multiline string, either triple-quoted or with internal + # newlines backslash-escaped. Check every physical line in + # the string *except* for the last one: its newline is + # outside of the multiline string, so we consider it a + # regular physical line, and will check it like any other + # physical line. + # + # Subtleties: + # - we don't *completely* ignore the last line; if it + # contains the magical "# noqa" comment, we disable all + # physical checks for the entire multiline string + # - have to wind self.line_number back because initially it + # points to the last line of the string, and we want + # check_physical() to give accurate feedback + if noqa(token[4]): + return + self.multiline = True + self.line_number = token[2][0] + _, src, (_, offset), _, _ = token + src = self.lines[self.line_number - 1][:offset] + src + for line in src.split('\n')[:-1]: + self.check_physical(line + '\n') + self.line_number += 1 + self.multiline = False + + def check_all(self, expected=None, line_offset=0): + """Run all checks on the input file.""" + self.report.init_file(self.filename, self.lines, expected, line_offset) + self.total_lines = len(self.lines) + if self._ast_checks: + self.check_ast() + self.line_number = 0 + self.indent_char = None + self.indent_level = self.previous_indent_level = 0 + self.previous_logical = '' + self.previous_unindented_logical_line = '' + self.tokens = [] + self.blank_lines = self.blank_before = 0 + parens = 0 + for token in self.generate_tokens(): + self.tokens.append(token) + token_type, text = token[0:2] + if self.verbose >= 3: + if token[2][0] == token[3][0]: + pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) + else: + pos = 'l.%s' % token[3][0] + print('l.%s\t%s\t%s\t%r' % + (token[2][0], pos, tokenize.tok_name[token[0]], text)) + if token_type == tokenize.OP: + if text in '([{': + parens += 1 + elif text in '}])': + parens -= 1 + elif not parens: + if token_type in NEWLINE: + if token_type == tokenize.NEWLINE: + self.check_logical() + self.blank_before = 0 + elif len(self.tokens) == 1: + # The physical line contains only this token. + self.blank_lines += 1 + del self.tokens[0] + else: + self.check_logical() + if self.tokens: + self.check_physical(self.lines[-1]) + self.check_logical() + return self.report.get_file_results() + + +class BaseReport(object): + """Collect the results of the checks.""" + + print_filename = False + + def __init__(self, options): + self._benchmark_keys = options.benchmark_keys + self._ignore_code = options.ignore_code + # Results + self.elapsed = 0 + self.total_errors = 0 + self.counters = dict.fromkeys(self._benchmark_keys, 0) + self.messages = {} + + def start(self): + """Start the timer.""" + self._start_time = time.time() + + def stop(self): + """Stop the timer.""" + self.elapsed = time.time() - self._start_time + + def init_file(self, filename, lines, expected, line_offset): + """Signal a new file.""" + self.filename = filename + self.lines = lines + self.expected = expected or () + self.line_offset = line_offset + self.file_errors = 0 + self.counters['files'] += 1 + self.counters['physical lines'] += len(lines) + + def increment_logical_line(self): + """Signal a new logical line.""" + self.counters['logical lines'] += 1 + + def error(self, line_number, offset, text, check): + """Report an error, according to options.""" + code = text[:4] + if self._ignore_code(code): + return + if code in self.counters: + self.counters[code] += 1 + else: + self.counters[code] = 1 + self.messages[code] = text[5:] + # Don't care about expected errors or warnings + if code in self.expected: + return + if self.print_filename and not self.file_errors: + print(self.filename) + self.file_errors += 1 + self.total_errors += 1 + return code + + def get_file_results(self): + """Return the count of errors and warnings for this file.""" + return self.file_errors + + def get_count(self, prefix=''): + """Return the total count of errors and warnings.""" + return sum(self.counters[key] + for key in self.messages if key.startswith(prefix)) + + def get_statistics(self, prefix=''): + """Get statistics for message codes that start with the prefix. + + prefix='' matches all errors and warnings + prefix='E' matches all errors + prefix='W' matches all warnings + prefix='E4' matches all errors that have to do with imports + """ + return ['%-7s %s %s' % (self.counters[key], key, self.messages[key]) + for key in sorted(self.messages) if key.startswith(prefix)] + + def print_statistics(self, prefix=''): + """Print overall statistics (number of errors and warnings).""" + for line in self.get_statistics(prefix): + print(line) + + def print_benchmark(self): + """Print benchmark numbers.""" + print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) + if self.elapsed: + for key in self._benchmark_keys: + print('%-7d %s per second (%d total)' % + (self.counters[key] / self.elapsed, key, + self.counters[key])) + + +class FileReport(BaseReport): + """Collect the results of the checks and print the filenames.""" + + print_filename = True + + +class StandardReport(BaseReport): + """Collect and print the results of the checks.""" + + def __init__(self, options): + super(StandardReport, self).__init__(options) + self._fmt = REPORT_FORMAT.get(options.format.lower(), + options.format) + self._repeat = options.repeat + self._show_source = options.show_source + self._show_pep8 = options.show_pep8 + + def init_file(self, filename, lines, expected, line_offset): + """Signal a new file.""" + self._deferred_print = [] + return super(StandardReport, self).init_file( + filename, lines, expected, line_offset) + + def error(self, line_number, offset, text, check): + """Report an error, according to options.""" + code = super(StandardReport, self).error(line_number, offset, + text, check) + if code and (self.counters[code] == 1 or self._repeat): + self._deferred_print.append( + (line_number, offset, code, text[5:], check.__doc__)) + return code + + def get_file_results(self): + """Print results and return the overall count for this file.""" + self._deferred_print.sort() + for line_number, offset, code, text, doc in self._deferred_print: + print(self._fmt % { + 'path': self.filename, + 'row': self.line_offset + line_number, 'col': offset + 1, + 'code': code, 'text': text, + }) + if self._show_source: + if line_number > len(self.lines): + line = '' + else: + line = self.lines[line_number - 1] + print(line.rstrip()) + print(re.sub(r'\S', ' ', line[:offset]) + '^') + if self._show_pep8 and doc: + print(' ' + doc.strip()) + + # stdout is block buffered when not stdout.isatty(). + # line can be broken where buffer boundary since other + # processes write to same file. + # flush() after print() to avoid buffer boundary. + # Typical buffer size is 8192. line written safely when + # len(line) < 8192. + sys.stdout.flush() + return self.file_errors + + +class DiffReport(StandardReport): + """Collect and print the results for the changed lines only.""" + + def __init__(self, options): + super(DiffReport, self).__init__(options) + self._selected = options.selected_lines + + def error(self, line_number, offset, text, check): + if line_number not in self._selected[self.filename]: + return + return super(DiffReport, self).error(line_number, offset, text, check) + + +class StyleGuide(object): + """Initialize a PEP-8 instance with few options.""" + + def __init__(self, *args, **kwargs): + # build options from the command line + self.checker_class = kwargs.pop('checker_class', Checker) + parse_argv = kwargs.pop('parse_argv', False) + config_file = kwargs.pop('config_file', False) + parser = kwargs.pop('parser', None) + # build options from dict + options_dict = dict(*args, **kwargs) + arglist = None if parse_argv else options_dict.get('paths', None) + verbose = options_dict.get('verbose', None) + options, self.paths = process_options( + arglist, parse_argv, config_file, parser, verbose) + if options_dict: + options.__dict__.update(options_dict) + if 'paths' in options_dict: + self.paths = options_dict['paths'] + + self.runner = self.input_file + self.options = options + + if not options.reporter: + options.reporter = BaseReport if options.quiet else StandardReport + + options.select = tuple(options.select or ()) + if not (options.select or options.ignore or + options.testsuite or options.doctest) and DEFAULT_IGNORE: + # The default choice: ignore controversial checks + options.ignore = tuple(DEFAULT_IGNORE.split(',')) + else: + # Ignore all checks which are not explicitly selected + options.ignore = ('',) if options.select else tuple(options.ignore) + options.benchmark_keys = BENCHMARK_KEYS[:] + options.ignore_code = self.ignore_code + options.physical_checks = self.get_checks('physical_line') + options.logical_checks = self.get_checks('logical_line') + options.ast_checks = self.get_checks('tree') + self.init_report() + + def init_report(self, reporter=None): + """Initialize the report instance.""" + self.options.report = (reporter or self.options.reporter)(self.options) + return self.options.report + + def check_files(self, paths=None): + """Run all checks on the paths.""" + if paths is None: + paths = self.paths + report = self.options.report + runner = self.runner + report.start() + try: + for path in paths: + if os.path.isdir(path): + self.input_dir(path) + elif not self.excluded(path): + runner(path) + except KeyboardInterrupt: + print('... stopped') + report.stop() + return report + + def input_file(self, filename, lines=None, expected=None, line_offset=0): + """Run all checks on a Python source file.""" + if self.options.verbose: + print('checking %s' % filename) + fchecker = self.checker_class( + filename, lines=lines, options=self.options) + return fchecker.check_all(expected=expected, line_offset=line_offset) + + def input_dir(self, dirname): + """Check all files in this directory and all subdirectories.""" + dirname = dirname.rstrip('/') + if self.excluded(dirname): + return 0 + counters = self.options.report.counters + verbose = self.options.verbose + filepatterns = self.options.filename + runner = self.runner + for root, dirs, files in os.walk(dirname): + if verbose: + print('directory ' + root) + counters['directories'] += 1 + for subdir in sorted(dirs): + if self.excluded(subdir, root): + dirs.remove(subdir) + for filename in sorted(files): + # contain a pattern that matches? + if ((filename_match(filename, filepatterns) and + not self.excluded(filename, root))): + runner(os.path.join(root, filename)) + + def excluded(self, filename, parent=None): + """Check if the file should be excluded. + + Check if 'options.exclude' contains a pattern matching filename. + """ + if not self.options.exclude: + return False + basename = os.path.basename(filename) + if filename_match(basename, self.options.exclude): + return True + if parent: + filename = os.path.join(parent, filename) + filename = os.path.abspath(filename) + return filename_match(filename, self.options.exclude) + + def ignore_code(self, code): + """Check if the error code should be ignored. + + If 'options.select' contains a prefix of the error code, + return False. Else, if 'options.ignore' contains a prefix of + the error code, return True. + """ + if len(code) < 4 and any(s.startswith(code) + for s in self.options.select): + return False + return (code.startswith(self.options.ignore) and + not code.startswith(self.options.select)) + + def get_checks(self, argument_name): + """Get all the checks for this category. + + Find all globally visible functions where the first argument + name starts with argument_name and which contain selected tests. + """ + checks = [] + for check, attrs in _checks[argument_name].items(): + (codes, args) = attrs + if any(not (code and self.ignore_code(code)) for code in codes): + checks.append((check.__name__, check, args)) + return sorted(checks) + + +def get_parser(prog='pycodestyle', version=__version__): + """Create the parser for the program.""" + parser = OptionParser(prog=prog, version=version, + usage="%prog [options] input ...") + parser.config_options = [ + 'exclude', 'filename', 'select', 'ignore', 'max-line-length', + 'max-doc-length', 'hang-closing', 'count', 'format', 'quiet', + 'show-pep8', 'show-source', 'statistics', 'verbose'] + parser.add_option('-v', '--verbose', default=0, action='count', + help="print status messages, or debug with -vv") + parser.add_option('-q', '--quiet', default=0, action='count', + help="report only file names, or nothing with -qq") + parser.add_option('-r', '--repeat', default=True, action='store_true', + help="(obsolete) show all occurrences of the same error") + parser.add_option('--first', action='store_false', dest='repeat', + help="show first occurrence of each error") + parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, + help="exclude files or directories which match these " + "comma separated patterns (default: %default)") + parser.add_option('--filename', metavar='patterns', default='*.py', + help="when parsing directories, only check filenames " + "matching these comma separated patterns " + "(default: %default)") + parser.add_option('--select', metavar='errors', default='', + help="select errors and warnings (e.g. E,W6)") + parser.add_option('--ignore', metavar='errors', default='', + help="skip errors and warnings (e.g. E4,W) " + "(default: %s)" % DEFAULT_IGNORE) + parser.add_option('--show-source', action='store_true', + help="show source code for each error") + parser.add_option('--show-pep8', action='store_true', + help="show text of PEP 8 for each error " + "(implies --first)") + parser.add_option('--statistics', action='store_true', + help="count errors and warnings") + parser.add_option('--count', action='store_true', + help="print total number of errors and warnings " + "to standard error and set exit code to 1 if " + "total is not null") + parser.add_option('--max-line-length', type='int', metavar='n', + default=MAX_LINE_LENGTH, + help="set maximum allowed line length " + "(default: %default)") + parser.add_option('--max-doc-length', type='int', metavar='n', + default=None, + help="set maximum allowed doc line length and perform " + "these checks (unchecked if not set)") + parser.add_option('--hang-closing', action='store_true', + help="hang closing bracket instead of matching " + "indentation of opening bracket's line") + parser.add_option('--format', metavar='format', default='default', + help="set the error format [default|pylint|]") + parser.add_option('--diff', action='store_true', + help="report changes only within line number ranges in " + "the unified diff received on STDIN") + group = parser.add_option_group("Testing Options") + if os.path.exists(TESTSUITE_PATH): + group.add_option('--testsuite', metavar='dir', + help="run regression tests from dir") + group.add_option('--doctest', action='store_true', + help="run doctest on myself") + group.add_option('--benchmark', action='store_true', + help="measure processing speed") + return parser + + +def read_config(options, args, arglist, parser): + """Read and parse configurations. + + If a config file is specified on the command line with the + "--config" option, then only it is used for configuration. + + Otherwise, the user configuration (~/.config/pycodestyle) and any + local configurations in the current directory or above will be + merged together (in that order) using the read method of + ConfigParser. + """ + config = RawConfigParser() + + cli_conf = options.config + + local_dir = os.curdir + + if USER_CONFIG and os.path.isfile(USER_CONFIG): + if options.verbose: + print('user configuration: %s' % USER_CONFIG) + config.read(USER_CONFIG) + + parent = tail = args and os.path.abspath(os.path.commonprefix(args)) + while tail: + if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG): + local_dir = parent + if options.verbose: + print('local configuration: in %s' % parent) + break + (parent, tail) = os.path.split(parent) + + if cli_conf and os.path.isfile(cli_conf): + if options.verbose: + print('cli configuration: %s' % cli_conf) + config.read(cli_conf) + + pycodestyle_section = None + if config.has_section(parser.prog): + pycodestyle_section = parser.prog + elif config.has_section('pep8'): + pycodestyle_section = 'pep8' # Deprecated + warnings.warn('[pep8] section is deprecated. Use [pycodestyle].') + + if pycodestyle_section: + option_list = {o.dest: o.type or o.action for o in parser.option_list} + + # First, read the default values + (new_options, __) = parser.parse_args([]) + + # Second, parse the configuration + for opt in config.options(pycodestyle_section): + if opt.replace('_', '-') not in parser.config_options: + print(" unknown option '%s' ignored" % opt) + continue + if options.verbose > 1: + print(" %s = %s" % (opt, + config.get(pycodestyle_section, opt))) + normalized_opt = opt.replace('-', '_') + opt_type = option_list[normalized_opt] + if opt_type in ('int', 'count'): + value = config.getint(pycodestyle_section, opt) + elif opt_type in ('store_true', 'store_false'): + value = config.getboolean(pycodestyle_section, opt) + else: + value = config.get(pycodestyle_section, opt) + if normalized_opt == 'exclude': + value = normalize_paths(value, local_dir) + setattr(new_options, normalized_opt, value) + + # Third, overwrite with the command-line options + (options, __) = parser.parse_args(arglist, values=new_options) + options.doctest = options.testsuite = False + return options + + +def process_options(arglist=None, parse_argv=False, config_file=None, + parser=None, verbose=None): + """Process options passed either via arglist or command line args. + + Passing in the ``config_file`` parameter allows other tools, such as + flake8 to specify their own options to be processed in pycodestyle. + """ + if not parser: + parser = get_parser() + if not parser.has_option('--config'): + group = parser.add_option_group("Configuration", description=( + "The project options are read from the [%s] section of the " + "tox.ini file or the setup.cfg file located in any parent folder " + "of the path(s) being processed. Allowed options are: %s." % + (parser.prog, ', '.join(parser.config_options)))) + group.add_option('--config', metavar='path', default=config_file, + help="user config file location") + # Don't read the command line if the module is used as a library. + if not arglist and not parse_argv: + arglist = [] + # If parse_argv is True and arglist is None, arguments are + # parsed from the command line (sys.argv) + (options, args) = parser.parse_args(arglist) + options.reporter = None + + # If explicitly specified verbosity, override any `-v` CLI flag + if verbose is not None: + options.verbose = verbose + + if options.ensure_value('testsuite', False): + args.append(options.testsuite) + elif not options.ensure_value('doctest', False): + if parse_argv and not args: + if options.diff or any(os.path.exists(name) + for name in PROJECT_CONFIG): + args = ['.'] + else: + parser.error('input not specified') + options = read_config(options, args, arglist, parser) + options.reporter = parse_argv and options.quiet == 1 and FileReport + + options.filename = _parse_multi_options(options.filename) + options.exclude = normalize_paths(options.exclude) + options.select = _parse_multi_options(options.select) + options.ignore = _parse_multi_options(options.ignore) + + if options.diff: + options.reporter = DiffReport + stdin = stdin_get_value() + options.selected_lines = parse_udiff(stdin, options.filename, args[0]) + args = sorted(options.selected_lines) + + return options, args + + +def _parse_multi_options(options, split_token=','): + r"""Split and strip and discard empties. + + Turns the following: + + A, + B, + + into ["A", "B"] + """ + if options: + return [o.strip() for o in options.split(split_token) if o.strip()] + else: + return options + + +def _main(): + """Parse options and run checks on Python source.""" + import signal + + # Handle "Broken pipe" gracefully + try: + signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) + except AttributeError: + pass # not supported on Windows + + style_guide = StyleGuide(parse_argv=True) + options = style_guide.options + + if options.doctest or options.testsuite: + from testsuite.support import run_tests + report = run_tests(style_guide) + else: + report = style_guide.check_files() + + if options.statistics: + report.print_statistics() + + if options.benchmark: + report.print_benchmark() + + if options.testsuite and not options.quiet: + report.print_results() + + if report.total_errors: + if options.count: + sys.stderr.write(str(report.total_errors) + '\n') + sys.exit(1) + + +if __name__ == '__main__': + _main() diff --git a/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/INSTALLER b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/LICENSE b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/LICENSE new file mode 100644 index 0000000..5010e30 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/LICENSE @@ -0,0 +1,27 @@ +The MIT License + +Copyright 2013-2019 William Pearson +Copyright 2015-2016 Julien Enselme +Copyright 2016 Google Inc. +Copyright 2017 Samuel Vasko +Copyright 2017 Nate Prewitt +Copyright 2017 Jack Evans +Copyright 2019 Filippo Broggini + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/METADATA b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/METADATA new file mode 100644 index 0000000..6f2635c --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/METADATA @@ -0,0 +1,255 @@ +Metadata-Version: 2.1 +Name: toml +Version: 0.10.2 +Summary: Python Library for Tom's Obvious, Minimal Language +Home-page: https://github.com/uiri/toml +Author: William Pearson +Author-email: uiri@xqz.ca +License: MIT +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.* + +**** +TOML +**** + +.. image:: https://img.shields.io/pypi/v/toml + :target: https://pypi.org/project/toml/ + +.. image:: https://travis-ci.org/uiri/toml.svg?branch=master + :target: https://travis-ci.org/uiri/toml + +.. image:: https://img.shields.io/pypi/pyversions/toml.svg + :target: https://pypi.org/project/toml/ + + +A Python library for parsing and creating `TOML `_. + +The module passes `the TOML test suite `_. + +See also: + +* `The TOML Standard `_ +* `The currently supported TOML specification `_ + +Installation +============ + +To install the latest release on `PyPI `_, +simply run: + +:: + + pip install toml + +Or to install the latest development version, run: + +:: + + git clone https://github.com/uiri/toml.git + cd toml + python setup.py install + +Quick Tutorial +============== + +*toml.loads* takes in a string containing standard TOML-formatted data and +returns a dictionary containing the parsed data. + +.. code:: pycon + + >>> import toml + >>> toml_string = """ + ... # This is a TOML document. + ... + ... title = "TOML Example" + ... + ... [owner] + ... name = "Tom Preston-Werner" + ... dob = 1979-05-27T07:32:00-08:00 # First class dates + ... + ... [database] + ... server = "192.168.1.1" + ... ports = [ 8001, 8001, 8002 ] + ... connection_max = 5000 + ... enabled = true + ... + ... [servers] + ... + ... # Indentation (tabs and/or spaces) is allowed but not required + ... [servers.alpha] + ... ip = "10.0.0.1" + ... dc = "eqdc10" + ... + ... [servers.beta] + ... ip = "10.0.0.2" + ... dc = "eqdc10" + ... + ... [clients] + ... data = [ ["gamma", "delta"], [1, 2] ] + ... + ... # Line breaks are OK when inside arrays + ... hosts = [ + ... "alpha", + ... "omega" + ... ] + ... """ + >>> parsed_toml = toml.loads(toml_string) + + +*toml.dumps* takes a dictionary and returns a string containing the +corresponding TOML-formatted data. + +.. code:: pycon + + >>> new_toml_string = toml.dumps(parsed_toml) + >>> print(new_toml_string) + title = "TOML Example" + [owner] + name = "Tom Preston-Werner" + dob = 1979-05-27T07:32:00Z + [database] + server = "192.168.1.1" + ports = [ 8001, 8001, 8002,] + connection_max = 5000 + enabled = true + [clients] + data = [ [ "gamma", "delta",], [ 1, 2,],] + hosts = [ "alpha", "omega",] + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +*toml.dump* takes a dictionary and a file descriptor and returns a string containing the +corresponding TOML-formatted data. + +.. code:: pycon + + >>> with open('new_toml_file.toml', 'w') as f: + ... new_toml_string = toml.dump(parsed_toml, f) + >>> print(new_toml_string) + title = "TOML Example" + [owner] + name = "Tom Preston-Werner" + dob = 1979-05-27T07:32:00Z + [database] + server = "192.168.1.1" + ports = [ 8001, 8001, 8002,] + connection_max = 5000 + enabled = true + [clients] + data = [ [ "gamma", "delta",], [ 1, 2,],] + hosts = [ "alpha", "omega",] + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +For more functions, view the API Reference below. + +Note +---- + +For Numpy users, by default the data types ``np.floatX`` will not be translated to floats by toml, but will instead be encoded as strings. To get around this, specify the ``TomlNumpyEncoder`` when saving your data. + +.. code:: pycon + + >>> import toml + >>> import numpy as np + >>> a = np.arange(0, 10, dtype=np.double) + >>> output = {'a': a} + >>> toml.dumps(output) + 'a = [ "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0",]\n' + >>> toml.dumps(output, encoder=toml.TomlNumpyEncoder()) + 'a = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,]\n' + +API Reference +============= + +``toml.load(f, _dict=dict)`` + Parse a file or a list of files as TOML and return a dictionary. + + :Args: + * ``f``: A path to a file, list of filepaths (to be read into single + object) or a file descriptor + * ``_dict``: The class of the dictionary object to be returned + + :Returns: + A dictionary (or object ``_dict``) containing parsed TOML data + + :Raises: + * ``TypeError``: When ``f`` is an invalid type or is a list containing + invalid types + * ``TomlDecodeError``: When an error occurs while decoding the file(s) + +``toml.loads(s, _dict=dict)`` + Parse a TOML-formatted string to a dictionary. + + :Args: + * ``s``: The TOML-formatted string to be parsed + * ``_dict``: Specifies the class of the returned toml dictionary + + :Returns: + A dictionary (or object ``_dict``) containing parsed TOML data + + :Raises: + * ``TypeError``: When a non-string object is passed + * ``TomlDecodeError``: When an error occurs while decoding the + TOML-formatted string + +``toml.dump(o, f, encoder=None)`` + Write a dictionary to a file containing TOML-formatted data + + :Args: + * ``o``: An object to be converted into TOML + * ``f``: A File descriptor where the TOML-formatted output should be stored + * ``encoder``: An instance of ``TomlEncoder`` (or subclass) for encoding the object. If ``None``, will default to ``TomlEncoder`` + + :Returns: + A string containing the TOML-formatted data corresponding to object ``o`` + + :Raises: + * ``TypeError``: When anything other than file descriptor is passed + +``toml.dumps(o, encoder=None)`` + Create a TOML-formatted string from an input object + + :Args: + * ``o``: An object to be converted into TOML + * ``encoder``: An instance of ``TomlEncoder`` (or subclass) for encoding the object. If ``None``, will default to ``TomlEncoder`` + + :Returns: + A string containing the TOML-formatted data corresponding to object ``o`` + + + +Licensing +========= + +This project is released under the terms of the MIT Open Source License. View +*LICENSE.txt* for more information. + + diff --git a/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/RECORD b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/RECORD new file mode 100644 index 0000000..973329b --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/RECORD @@ -0,0 +1,16 @@ +toml/__init__.py,sha256=Au3kqCwKD0cjbf4yJGOpUFwpsY0WHsC1ZRGvWgIKmpc,723 +toml/decoder.py,sha256=hSGTLf-2WBDZ_ddoCHWFy6N647XyMSh1o3rN2o4dEFg,38942 +toml/encoder.py,sha256=XjBc8ayvvlsLyd_qDA4tMWDNmMFRS4DpwtuDSWBq7zo,9940 +toml/ordered.py,sha256=mz03lZmV0bmc9lsYRIUOuj7Dsu5Ptwq-UtGVq5FdVZ4,354 +toml/tz.py,sha256=-5vg8wkg_atnVi2TnEveexIVE7T_FxBVr_-2WVfO1oA,701 +toml-0.10.2.dist-info/LICENSE,sha256=LZKUgj32yJNXyL5JJ_znk2HWVh5e51MtWSbmOTmqpTY,1252 +toml-0.10.2.dist-info/METADATA,sha256=n_YkspvEihd_QXLIZZ50WVSFz3rZ_k7jQP-OU1WUpWY,7142 +toml-0.10.2.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110 +toml-0.10.2.dist-info/top_level.txt,sha256=2BO8ZRNnvJWgXyiQv66LBb_v87qBzcoUtEBefA75Ouk,5 +toml-0.10.2.dist-info/RECORD,, +toml-0.10.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +toml/__pycache__/ordered.cpython-37.pyc,, +toml/__pycache__/tz.cpython-37.pyc,, +toml/__pycache__/decoder.cpython-37.pyc,, +toml/__pycache__/encoder.cpython-37.pyc,, +toml/__pycache__/__init__.cpython-37.pyc,, diff --git a/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/WHEEL b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/WHEEL new file mode 100644 index 0000000..6d38aa0 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.35.1) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/top_level.txt b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/top_level.txt new file mode 100644 index 0000000..bd79a65 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml-0.10.2.dist-info/top_level.txt @@ -0,0 +1 @@ +toml diff --git a/venv3/lib/python3.7/site-packages/toml/__init__.py b/venv3/lib/python3.7/site-packages/toml/__init__.py new file mode 100644 index 0000000..7719ac2 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml/__init__.py @@ -0,0 +1,25 @@ +"""Python module which parses and emits TOML. + +Released under the MIT license. +""" + +from toml import encoder +from toml import decoder + +__version__ = "0.10.2" +_spec_ = "0.5.0" + +load = decoder.load +loads = decoder.loads +TomlDecoder = decoder.TomlDecoder +TomlDecodeError = decoder.TomlDecodeError +TomlPreserveCommentDecoder = decoder.TomlPreserveCommentDecoder + +dump = encoder.dump +dumps = encoder.dumps +TomlEncoder = encoder.TomlEncoder +TomlArraySeparatorEncoder = encoder.TomlArraySeparatorEncoder +TomlPreserveInlineDictEncoder = encoder.TomlPreserveInlineDictEncoder +TomlNumpyEncoder = encoder.TomlNumpyEncoder +TomlPreserveCommentEncoder = encoder.TomlPreserveCommentEncoder +TomlPathlibEncoder = encoder.TomlPathlibEncoder diff --git a/venv3/lib/python3.7/site-packages/toml/decoder.py b/venv3/lib/python3.7/site-packages/toml/decoder.py new file mode 100644 index 0000000..bf400e9 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml/decoder.py @@ -0,0 +1,1057 @@ +import datetime +import io +from os import linesep +import re +import sys + +from toml.tz import TomlTz + +if sys.version_info < (3,): + _range = xrange # noqa: F821 +else: + unicode = str + _range = range + basestring = str + unichr = chr + + +def _detect_pathlib_path(p): + if (3, 4) <= sys.version_info: + import pathlib + if isinstance(p, pathlib.PurePath): + return True + return False + + +def _ispath(p): + if isinstance(p, (bytes, basestring)): + return True + return _detect_pathlib_path(p) + + +def _getpath(p): + if (3, 6) <= sys.version_info: + import os + return os.fspath(p) + if _detect_pathlib_path(p): + return str(p) + return p + + +try: + FNFError = FileNotFoundError +except NameError: + FNFError = IOError + + +TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?") + + +class TomlDecodeError(ValueError): + """Base toml Exception / Error.""" + + def __init__(self, msg, doc, pos): + lineno = doc.count('\n', 0, pos) + 1 + colno = pos - doc.rfind('\n', 0, pos) + emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos) + ValueError.__init__(self, emsg) + self.msg = msg + self.doc = doc + self.pos = pos + self.lineno = lineno + self.colno = colno + + +# Matches a TOML number, which allows underscores for readability +_number_with_underscores = re.compile('([0-9])(_([0-9]))*') + + +class CommentValue(object): + def __init__(self, val, comment, beginline, _dict): + self.val = val + separator = "\n" if beginline else " " + self.comment = separator + comment + self._dict = _dict + + def __getitem__(self, key): + return self.val[key] + + def __setitem__(self, key, value): + self.val[key] = value + + def dump(self, dump_value_func): + retstr = dump_value_func(self.val) + if isinstance(self.val, self._dict): + return self.comment + "\n" + unicode(retstr) + else: + return unicode(retstr) + self.comment + + +def _strictly_valid_num(n): + n = n.strip() + if not n: + return False + if n[0] == '_': + return False + if n[-1] == '_': + return False + if "_." in n or "._" in n: + return False + if len(n) == 1: + return True + if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']: + return False + if n[0] == '+' or n[0] == '-': + n = n[1:] + if len(n) > 1 and n[0] == '0' and n[1] != '.': + return False + if '__' in n: + return False + return True + + +def load(f, _dict=dict, decoder=None): + """Parses named file or files as toml and returns a dictionary + + Args: + f: Path to the file to open, array of files to read into single dict + or a file descriptor + _dict: (optional) Specifies the class of the returned toml dictionary + decoder: The decoder to use + + Returns: + Parsed toml file represented as a dictionary + + Raises: + TypeError -- When f is invalid type + TomlDecodeError: Error while decoding toml + IOError / FileNotFoundError -- When an array with no valid (existing) + (Python 2 / Python 3) file paths is passed + """ + + if _ispath(f): + with io.open(_getpath(f), encoding='utf-8') as ffile: + return loads(ffile.read(), _dict, decoder) + elif isinstance(f, list): + from os import path as op + from warnings import warn + if not [path for path in f if op.exists(path)]: + error_msg = "Load expects a list to contain filenames only." + error_msg += linesep + error_msg += ("The list needs to contain the path of at least one " + "existing file.") + raise FNFError(error_msg) + if decoder is None: + decoder = TomlDecoder(_dict) + d = decoder.get_empty_table() + for l in f: # noqa: E741 + if op.exists(l): + d.update(load(l, _dict, decoder)) + else: + warn("Non-existent filename in list with at least one valid " + "filename") + return d + else: + try: + return loads(f.read(), _dict, decoder) + except AttributeError: + raise TypeError("You can only load a file descriptor, filename or " + "list") + + +_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$') + + +def loads(s, _dict=dict, decoder=None): + """Parses string as toml + + Args: + s: String to be parsed + _dict: (optional) Specifies the class of the returned toml dictionary + + Returns: + Parsed toml file represented as a dictionary + + Raises: + TypeError: When a non-string is passed + TomlDecodeError: Error while decoding toml + """ + + implicitgroups = [] + if decoder is None: + decoder = TomlDecoder(_dict) + retval = decoder.get_empty_table() + currentlevel = retval + if not isinstance(s, basestring): + raise TypeError("Expecting something like a string") + + if not isinstance(s, unicode): + s = s.decode('utf8') + + original = s + sl = list(s) + openarr = 0 + openstring = False + openstrchar = "" + multilinestr = False + arrayoftables = False + beginline = True + keygroup = False + dottedkey = False + keyname = 0 + key = '' + prev_key = '' + line_no = 1 + + for i, item in enumerate(sl): + if item == '\r' and sl[i + 1] == '\n': + sl[i] = ' ' + continue + if keyname: + key += item + if item == '\n': + raise TomlDecodeError("Key name found without value." + " Reached end of line.", original, i) + if openstring: + if item == openstrchar: + oddbackslash = False + k = 1 + while i >= k and sl[i - k] == '\\': + oddbackslash = not oddbackslash + k += 1 + if not oddbackslash: + keyname = 2 + openstring = False + openstrchar = "" + continue + elif keyname == 1: + if item.isspace(): + keyname = 2 + continue + elif item == '.': + dottedkey = True + continue + elif item.isalnum() or item == '_' or item == '-': + continue + elif (dottedkey and sl[i - 1] == '.' and + (item == '"' or item == "'")): + openstring = True + openstrchar = item + continue + elif keyname == 2: + if item.isspace(): + if dottedkey: + nextitem = sl[i + 1] + if not nextitem.isspace() and nextitem != '.': + keyname = 1 + continue + if item == '.': + dottedkey = True + nextitem = sl[i + 1] + if not nextitem.isspace() and nextitem != '.': + keyname = 1 + continue + if item == '=': + keyname = 0 + prev_key = key[:-1].rstrip() + key = '' + dottedkey = False + else: + raise TomlDecodeError("Found invalid character in key name: '" + + item + "'. Try quoting the key name.", + original, i) + if item == "'" and openstrchar != '"': + k = 1 + try: + while sl[i - k] == "'": + k += 1 + if k == 3: + break + except IndexError: + pass + if k == 3: + multilinestr = not multilinestr + openstring = multilinestr + else: + openstring = not openstring + if openstring: + openstrchar = "'" + else: + openstrchar = "" + if item == '"' and openstrchar != "'": + oddbackslash = False + k = 1 + tripquote = False + try: + while sl[i - k] == '"': + k += 1 + if k == 3: + tripquote = True + break + if k == 1 or (k == 3 and tripquote): + while sl[i - k] == '\\': + oddbackslash = not oddbackslash + k += 1 + except IndexError: + pass + if not oddbackslash: + if tripquote: + multilinestr = not multilinestr + openstring = multilinestr + else: + openstring = not openstring + if openstring: + openstrchar = '"' + else: + openstrchar = "" + if item == '#' and (not openstring and not keygroup and + not arrayoftables): + j = i + comment = "" + try: + while sl[j] != '\n': + comment += s[j] + sl[j] = ' ' + j += 1 + except IndexError: + break + if not openarr: + decoder.preserve_comment(line_no, prev_key, comment, beginline) + if item == '[' and (not openstring and not keygroup and + not arrayoftables): + if beginline: + if len(sl) > i + 1 and sl[i + 1] == '[': + arrayoftables = True + else: + keygroup = True + else: + openarr += 1 + if item == ']' and not openstring: + if keygroup: + keygroup = False + elif arrayoftables: + if sl[i - 1] == ']': + arrayoftables = False + else: + openarr -= 1 + if item == '\n': + if openstring or multilinestr: + if not multilinestr: + raise TomlDecodeError("Unbalanced quotes", original, i) + if ((sl[i - 1] == "'" or sl[i - 1] == '"') and ( + sl[i - 2] == sl[i - 1])): + sl[i] = sl[i - 1] + if sl[i - 3] == sl[i - 1]: + sl[i - 3] = ' ' + elif openarr: + sl[i] = ' ' + else: + beginline = True + line_no += 1 + elif beginline and sl[i] != ' ' and sl[i] != '\t': + beginline = False + if not keygroup and not arrayoftables: + if sl[i] == '=': + raise TomlDecodeError("Found empty keyname. ", original, i) + keyname = 1 + key += item + if keyname: + raise TomlDecodeError("Key name found without value." + " Reached end of file.", original, len(s)) + if openstring: # reached EOF and have an unterminated string + raise TomlDecodeError("Unterminated string found." + " Reached end of file.", original, len(s)) + s = ''.join(sl) + s = s.split('\n') + multikey = None + multilinestr = "" + multibackslash = False + pos = 0 + for idx, line in enumerate(s): + if idx > 0: + pos += len(s[idx - 1]) + 1 + + decoder.embed_comments(idx, currentlevel) + + if not multilinestr or multibackslash or '\n' not in multilinestr: + line = line.strip() + if line == "" and (not multikey or multibackslash): + continue + if multikey: + if multibackslash: + multilinestr += line + else: + multilinestr += line + multibackslash = False + closed = False + if multilinestr[0] == '[': + closed = line[-1] == ']' + elif len(line) > 2: + closed = (line[-1] == multilinestr[0] and + line[-2] == multilinestr[0] and + line[-3] == multilinestr[0]) + if closed: + try: + value, vtype = decoder.load_value(multilinestr) + except ValueError as err: + raise TomlDecodeError(str(err), original, pos) + currentlevel[multikey] = value + multikey = None + multilinestr = "" + else: + k = len(multilinestr) - 1 + while k > -1 and multilinestr[k] == '\\': + multibackslash = not multibackslash + k -= 1 + if multibackslash: + multilinestr = multilinestr[:-1] + else: + multilinestr += "\n" + continue + if line[0] == '[': + arrayoftables = False + if len(line) == 1: + raise TomlDecodeError("Opening key group bracket on line by " + "itself.", original, pos) + if line[1] == '[': + arrayoftables = True + line = line[2:] + splitstr = ']]' + else: + line = line[1:] + splitstr = ']' + i = 1 + quotesplits = decoder._get_split_on_quotes(line) + quoted = False + for quotesplit in quotesplits: + if not quoted and splitstr in quotesplit: + break + i += quotesplit.count(splitstr) + quoted = not quoted + line = line.split(splitstr, i) + if len(line) < i + 1 or line[-1].strip() != "": + raise TomlDecodeError("Key group not on a line by itself.", + original, pos) + groups = splitstr.join(line[:-1]).split('.') + i = 0 + while i < len(groups): + groups[i] = groups[i].strip() + if len(groups[i]) > 0 and (groups[i][0] == '"' or + groups[i][0] == "'"): + groupstr = groups[i] + j = i + 1 + while ((not groupstr[0] == groupstr[-1]) or + len(groupstr) == 1): + j += 1 + if j > len(groups) + 2: + raise TomlDecodeError("Invalid group name '" + + groupstr + "' Something " + + "went wrong.", original, pos) + groupstr = '.'.join(groups[i:j]).strip() + groups[i] = groupstr[1:-1] + groups[i + 1:j] = [] + else: + if not _groupname_re.match(groups[i]): + raise TomlDecodeError("Invalid group name '" + + groups[i] + "'. Try quoting it.", + original, pos) + i += 1 + currentlevel = retval + for i in _range(len(groups)): + group = groups[i] + if group == "": + raise TomlDecodeError("Can't have a keygroup with an empty " + "name", original, pos) + try: + currentlevel[group] + if i == len(groups) - 1: + if group in implicitgroups: + implicitgroups.remove(group) + if arrayoftables: + raise TomlDecodeError("An implicitly defined " + "table can't be an array", + original, pos) + elif arrayoftables: + currentlevel[group].append(decoder.get_empty_table() + ) + else: + raise TomlDecodeError("What? " + group + + " already exists?" + + str(currentlevel), + original, pos) + except TypeError: + currentlevel = currentlevel[-1] + if group not in currentlevel: + currentlevel[group] = decoder.get_empty_table() + if i == len(groups) - 1 and arrayoftables: + currentlevel[group] = [decoder.get_empty_table()] + except KeyError: + if i != len(groups) - 1: + implicitgroups.append(group) + currentlevel[group] = decoder.get_empty_table() + if i == len(groups) - 1 and arrayoftables: + currentlevel[group] = [decoder.get_empty_table()] + currentlevel = currentlevel[group] + if arrayoftables: + try: + currentlevel = currentlevel[-1] + except KeyError: + pass + elif line[0] == "{": + if line[-1] != "}": + raise TomlDecodeError("Line breaks are not allowed in inline" + "objects", original, pos) + try: + decoder.load_inline_object(line, currentlevel, multikey, + multibackslash) + except ValueError as err: + raise TomlDecodeError(str(err), original, pos) + elif "=" in line: + try: + ret = decoder.load_line(line, currentlevel, multikey, + multibackslash) + except ValueError as err: + raise TomlDecodeError(str(err), original, pos) + if ret is not None: + multikey, multilinestr, multibackslash = ret + return retval + + +def _load_date(val): + microsecond = 0 + tz = None + try: + if len(val) > 19: + if val[19] == '.': + if val[-1].upper() == 'Z': + subsecondval = val[20:-1] + tzval = "Z" + else: + subsecondvalandtz = val[20:] + if '+' in subsecondvalandtz: + splitpoint = subsecondvalandtz.index('+') + subsecondval = subsecondvalandtz[:splitpoint] + tzval = subsecondvalandtz[splitpoint:] + elif '-' in subsecondvalandtz: + splitpoint = subsecondvalandtz.index('-') + subsecondval = subsecondvalandtz[:splitpoint] + tzval = subsecondvalandtz[splitpoint:] + else: + tzval = None + subsecondval = subsecondvalandtz + if tzval is not None: + tz = TomlTz(tzval) + microsecond = int(int(subsecondval) * + (10 ** (6 - len(subsecondval)))) + else: + tz = TomlTz(val[19:]) + except ValueError: + tz = None + if "-" not in val[1:]: + return None + try: + if len(val) == 10: + d = datetime.date( + int(val[:4]), int(val[5:7]), + int(val[8:10])) + else: + d = datetime.datetime( + int(val[:4]), int(val[5:7]), + int(val[8:10]), int(val[11:13]), + int(val[14:16]), int(val[17:19]), microsecond, tz) + except ValueError: + return None + return d + + +def _load_unicode_escapes(v, hexbytes, prefix): + skip = False + i = len(v) - 1 + while i > -1 and v[i] == '\\': + skip = not skip + i -= 1 + for hx in hexbytes: + if skip: + skip = False + i = len(hx) - 1 + while i > -1 and hx[i] == '\\': + skip = not skip + i -= 1 + v += prefix + v += hx + continue + hxb = "" + i = 0 + hxblen = 4 + if prefix == "\\U": + hxblen = 8 + hxb = ''.join(hx[i:i + hxblen]).lower() + if hxb.strip('0123456789abcdef'): + raise ValueError("Invalid escape sequence: " + hxb) + if hxb[0] == "d" and hxb[1].strip('01234567'): + raise ValueError("Invalid escape sequence: " + hxb + + ". Only scalar unicode points are allowed.") + v += unichr(int(hxb, 16)) + v += unicode(hx[len(hxb):]) + return v + + +# Unescape TOML string values. + +# content after the \ +_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"'] +# What it should be replaced by +_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"'] +# Used for substitution +_escape_to_escapedchars = dict(zip(_escapes, _escapedchars)) + + +def _unescape(v): + """Unescape characters in a TOML string.""" + i = 0 + backslash = False + while i < len(v): + if backslash: + backslash = False + if v[i] in _escapes: + v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:] + elif v[i] == '\\': + v = v[:i - 1] + v[i:] + elif v[i] == 'u' or v[i] == 'U': + i += 1 + else: + raise ValueError("Reserved escape sequence used") + continue + elif v[i] == '\\': + backslash = True + i += 1 + return v + + +class InlineTableDict(object): + """Sentinel subclass of dict for inline tables.""" + + +class TomlDecoder(object): + + def __init__(self, _dict=dict): + self._dict = _dict + + def get_empty_table(self): + return self._dict() + + def get_empty_inline_table(self): + class DynamicInlineTableDict(self._dict, InlineTableDict): + """Concrete sentinel subclass for inline tables. + It is a subclass of _dict which is passed in dynamically at load + time + + It is also a subclass of InlineTableDict + """ + + return DynamicInlineTableDict() + + def load_inline_object(self, line, currentlevel, multikey=False, + multibackslash=False): + candidate_groups = line[1:-1].split(",") + groups = [] + if len(candidate_groups) == 1 and not candidate_groups[0].strip(): + candidate_groups.pop() + while len(candidate_groups) > 0: + candidate_group = candidate_groups.pop(0) + try: + _, value = candidate_group.split('=', 1) + except ValueError: + raise ValueError("Invalid inline table encountered") + value = value.strip() + if ((value[0] == value[-1] and value[0] in ('"', "'")) or ( + value[0] in '-0123456789' or + value in ('true', 'false') or + (value[0] == "[" and value[-1] == "]") or + (value[0] == '{' and value[-1] == '}'))): + groups.append(candidate_group) + elif len(candidate_groups) > 0: + candidate_groups[0] = (candidate_group + "," + + candidate_groups[0]) + else: + raise ValueError("Invalid inline table value encountered") + for group in groups: + status = self.load_line(group, currentlevel, multikey, + multibackslash) + if status is not None: + break + + def _get_split_on_quotes(self, line): + doublequotesplits = line.split('"') + quoted = False + quotesplits = [] + if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]: + singlequotesplits = doublequotesplits[0].split("'") + doublequotesplits = doublequotesplits[1:] + while len(singlequotesplits) % 2 == 0 and len(doublequotesplits): + singlequotesplits[-1] += '"' + doublequotesplits[0] + doublequotesplits = doublequotesplits[1:] + if "'" in singlequotesplits[-1]: + singlequotesplits = (singlequotesplits[:-1] + + singlequotesplits[-1].split("'")) + quotesplits += singlequotesplits + for doublequotesplit in doublequotesplits: + if quoted: + quotesplits.append(doublequotesplit) + else: + quotesplits += doublequotesplit.split("'") + quoted = not quoted + return quotesplits + + def load_line(self, line, currentlevel, multikey, multibackslash): + i = 1 + quotesplits = self._get_split_on_quotes(line) + quoted = False + for quotesplit in quotesplits: + if not quoted and '=' in quotesplit: + break + i += quotesplit.count('=') + quoted = not quoted + pair = line.split('=', i) + strictly_valid = _strictly_valid_num(pair[-1]) + if _number_with_underscores.match(pair[-1]): + pair[-1] = pair[-1].replace('_', '') + while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and + pair[-1][0] != "'" and pair[-1][0] != '"' and + pair[-1][0] != '[' and pair[-1][0] != '{' and + pair[-1].strip() != 'true' and + pair[-1].strip() != 'false'): + try: + float(pair[-1]) + break + except ValueError: + pass + if _load_date(pair[-1]) is not None: + break + if TIME_RE.match(pair[-1]): + break + i += 1 + prev_val = pair[-1] + pair = line.split('=', i) + if prev_val == pair[-1]: + raise ValueError("Invalid date or number") + if strictly_valid: + strictly_valid = _strictly_valid_num(pair[-1]) + pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()] + if '.' in pair[0]: + if '"' in pair[0] or "'" in pair[0]: + quotesplits = self._get_split_on_quotes(pair[0]) + quoted = False + levels = [] + for quotesplit in quotesplits: + if quoted: + levels.append(quotesplit) + else: + levels += [level.strip() for level in + quotesplit.split('.')] + quoted = not quoted + else: + levels = pair[0].split('.') + while levels[-1] == "": + levels = levels[:-1] + for level in levels[:-1]: + if level == "": + continue + if level not in currentlevel: + currentlevel[level] = self.get_empty_table() + currentlevel = currentlevel[level] + pair[0] = levels[-1].strip() + elif (pair[0][0] == '"' or pair[0][0] == "'") and \ + (pair[0][-1] == pair[0][0]): + pair[0] = _unescape(pair[0][1:-1]) + k, koffset = self._load_line_multiline_str(pair[1]) + if k > -1: + while k > -1 and pair[1][k + koffset] == '\\': + multibackslash = not multibackslash + k -= 1 + if multibackslash: + multilinestr = pair[1][:-1] + else: + multilinestr = pair[1] + "\n" + multikey = pair[0] + else: + value, vtype = self.load_value(pair[1], strictly_valid) + try: + currentlevel[pair[0]] + raise ValueError("Duplicate keys!") + except TypeError: + raise ValueError("Duplicate keys!") + except KeyError: + if multikey: + return multikey, multilinestr, multibackslash + else: + currentlevel[pair[0]] = value + + def _load_line_multiline_str(self, p): + poffset = 0 + if len(p) < 3: + return -1, poffset + if p[0] == '[' and (p.strip()[-1] != ']' and + self._load_array_isstrarray(p)): + newp = p[1:].strip().split(',') + while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'": + newp = newp[:-2] + [newp[-2] + ',' + newp[-1]] + newp = newp[-1] + poffset = len(p) - len(newp) + p = newp + if p[0] != '"' and p[0] != "'": + return -1, poffset + if p[1] != p[0] or p[2] != p[0]: + return -1, poffset + if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]: + return -1, poffset + return len(p) - 1, poffset + + def load_value(self, v, strictly_valid=True): + if not v: + raise ValueError("Empty value is invalid") + if v == 'true': + return (True, "bool") + elif v.lower() == 'true': + raise ValueError("Only all lowercase booleans allowed") + elif v == 'false': + return (False, "bool") + elif v.lower() == 'false': + raise ValueError("Only all lowercase booleans allowed") + elif v[0] == '"' or v[0] == "'": + quotechar = v[0] + testv = v[1:].split(quotechar) + triplequote = False + triplequotecount = 0 + if len(testv) > 1 and testv[0] == '' and testv[1] == '': + testv = testv[2:] + triplequote = True + closed = False + for tv in testv: + if tv == '': + if triplequote: + triplequotecount += 1 + else: + closed = True + else: + oddbackslash = False + try: + i = -1 + j = tv[i] + while j == '\\': + oddbackslash = not oddbackslash + i -= 1 + j = tv[i] + except IndexError: + pass + if not oddbackslash: + if closed: + raise ValueError("Found tokens after a closed " + + "string. Invalid TOML.") + else: + if not triplequote or triplequotecount > 1: + closed = True + else: + triplequotecount = 0 + if quotechar == '"': + escapeseqs = v.split('\\')[1:] + backslash = False + for i in escapeseqs: + if i == '': + backslash = not backslash + else: + if i[0] not in _escapes and (i[0] != 'u' and + i[0] != 'U' and + not backslash): + raise ValueError("Reserved escape sequence used") + if backslash: + backslash = False + for prefix in ["\\u", "\\U"]: + if prefix in v: + hexbytes = v.split(prefix) + v = _load_unicode_escapes(hexbytes[0], hexbytes[1:], + prefix) + v = _unescape(v) + if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or + v[1] == v[2]): + v = v[2:-2] + return (v[1:-1], "str") + elif v[0] == '[': + return (self.load_array(v), "array") + elif v[0] == '{': + inline_object = self.get_empty_inline_table() + self.load_inline_object(v, inline_object) + return (inline_object, "inline_object") + elif TIME_RE.match(v): + h, m, s, _, ms = TIME_RE.match(v).groups() + time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0) + return (time, "time") + else: + parsed_date = _load_date(v) + if parsed_date is not None: + return (parsed_date, "date") + if not strictly_valid: + raise ValueError("Weirdness with leading zeroes or " + "underscores in your number.") + itype = "int" + neg = False + if v[0] == '-': + neg = True + v = v[1:] + elif v[0] == '+': + v = v[1:] + v = v.replace('_', '') + lowerv = v.lower() + if '.' in v or ('x' not in v and ('e' in v or 'E' in v)): + if '.' in v and v.split('.', 1)[1] == '': + raise ValueError("This float is missing digits after " + "the point") + if v[0] not in '0123456789': + raise ValueError("This float doesn't have a leading " + "digit") + v = float(v) + itype = "float" + elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'): + v = float(v) + itype = "float" + if itype == "int": + v = int(v, 0) + if neg: + return (0 - v, itype) + return (v, itype) + + def bounded_string(self, s): + if len(s) == 0: + return True + if s[-1] != s[0]: + return False + i = -2 + backslash = False + while len(s) + i > 0: + if s[i] == "\\": + backslash = not backslash + i -= 1 + else: + break + return not backslash + + def _load_array_isstrarray(self, a): + a = a[1:-1].strip() + if a != '' and (a[0] == '"' or a[0] == "'"): + return True + return False + + def load_array(self, a): + atype = None + retval = [] + a = a.strip() + if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip(): + strarray = self._load_array_isstrarray(a) + if not a[1:-1].strip().startswith('{'): + a = a[1:-1].split(',') + else: + # a is an inline object, we must find the matching parenthesis + # to define groups + new_a = [] + start_group_index = 1 + end_group_index = 2 + open_bracket_count = 1 if a[start_group_index] == '{' else 0 + in_str = False + while end_group_index < len(a[1:]): + if a[end_group_index] == '"' or a[end_group_index] == "'": + if in_str: + backslash_index = end_group_index - 1 + while (backslash_index > -1 and + a[backslash_index] == '\\'): + in_str = not in_str + backslash_index -= 1 + in_str = not in_str + if not in_str and a[end_group_index] == '{': + open_bracket_count += 1 + if in_str or a[end_group_index] != '}': + end_group_index += 1 + continue + elif a[end_group_index] == '}' and open_bracket_count > 1: + open_bracket_count -= 1 + end_group_index += 1 + continue + + # Increase end_group_index by 1 to get the closing bracket + end_group_index += 1 + + new_a.append(a[start_group_index:end_group_index]) + + # The next start index is at least after the closing + # bracket, a closing bracket can be followed by a comma + # since we are in an array. + start_group_index = end_group_index + 1 + while (start_group_index < len(a[1:]) and + a[start_group_index] != '{'): + start_group_index += 1 + end_group_index = start_group_index + 1 + a = new_a + b = 0 + if strarray: + while b < len(a) - 1: + ab = a[b].strip() + while (not self.bounded_string(ab) or + (len(ab) > 2 and + ab[0] == ab[1] == ab[2] and + ab[-2] != ab[0] and + ab[-3] != ab[0])): + a[b] = a[b] + ',' + a[b + 1] + ab = a[b].strip() + if b < len(a) - 2: + a = a[:b + 1] + a[b + 2:] + else: + a = a[:b + 1] + b += 1 + else: + al = list(a[1:-1]) + a = [] + openarr = 0 + j = 0 + for i in _range(len(al)): + if al[i] == '[': + openarr += 1 + elif al[i] == ']': + openarr -= 1 + elif al[i] == ',' and not openarr: + a.append(''.join(al[j:i])) + j = i + 1 + a.append(''.join(al[j:])) + for i in _range(len(a)): + a[i] = a[i].strip() + if a[i] != '': + nval, ntype = self.load_value(a[i]) + if atype: + if ntype != atype: + raise ValueError("Not a homogeneous array") + else: + atype = ntype + retval.append(nval) + return retval + + def preserve_comment(self, line_no, key, comment, beginline): + pass + + def embed_comments(self, idx, currentlevel): + pass + + +class TomlPreserveCommentDecoder(TomlDecoder): + + def __init__(self, _dict=dict): + self.saved_comments = {} + super(TomlPreserveCommentDecoder, self).__init__(_dict) + + def preserve_comment(self, line_no, key, comment, beginline): + self.saved_comments[line_no] = (key, comment, beginline) + + def embed_comments(self, idx, currentlevel): + if idx not in self.saved_comments: + return + + key, comment, beginline = self.saved_comments[idx] + currentlevel[key] = CommentValue(currentlevel[key], comment, beginline, + self._dict) diff --git a/venv3/lib/python3.7/site-packages/toml/encoder.py b/venv3/lib/python3.7/site-packages/toml/encoder.py new file mode 100644 index 0000000..bf17a72 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml/encoder.py @@ -0,0 +1,304 @@ +import datetime +import re +import sys +from decimal import Decimal + +from toml.decoder import InlineTableDict + +if sys.version_info >= (3,): + unicode = str + + +def dump(o, f, encoder=None): + """Writes out dict as toml to a file + + Args: + o: Object to dump into toml + f: File descriptor where the toml should be stored + encoder: The ``TomlEncoder`` to use for constructing the output string + + Returns: + String containing the toml corresponding to dictionary + + Raises: + TypeError: When anything other than file descriptor is passed + """ + + if not f.write: + raise TypeError("You can only dump an object to a file descriptor") + d = dumps(o, encoder=encoder) + f.write(d) + return d + + +def dumps(o, encoder=None): + """Stringifies input dict as toml + + Args: + o: Object to dump into toml + encoder: The ``TomlEncoder`` to use for constructing the output string + + Returns: + String containing the toml corresponding to dict + + Examples: + ```python + >>> import toml + >>> output = { + ... 'a': "I'm a string", + ... 'b': ["I'm", "a", "list"], + ... 'c': 2400 + ... } + >>> toml.dumps(output) + 'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n' + ``` + """ + + retval = "" + if encoder is None: + encoder = TomlEncoder(o.__class__) + addtoretval, sections = encoder.dump_sections(o, "") + retval += addtoretval + outer_objs = [id(o)] + while sections: + section_ids = [id(section) for section in sections.values()] + for outer_obj in outer_objs: + if outer_obj in section_ids: + raise ValueError("Circular reference detected") + outer_objs += section_ids + newsections = encoder.get_empty_table() + for section in sections: + addtoretval, addtosections = encoder.dump_sections( + sections[section], section) + + if addtoretval or (not addtoretval and not addtosections): + if retval and retval[-2:] != "\n\n": + retval += "\n" + retval += "[" + section + "]\n" + if addtoretval: + retval += addtoretval + for s in addtosections: + newsections[section + "." + s] = addtosections[s] + sections = newsections + return retval + + +def _dump_str(v): + if sys.version_info < (3,) and hasattr(v, 'decode') and isinstance(v, str): + v = v.decode('utf-8') + v = "%r" % v + if v[0] == 'u': + v = v[1:] + singlequote = v.startswith("'") + if singlequote or v.startswith('"'): + v = v[1:-1] + if singlequote: + v = v.replace("\\'", "'") + v = v.replace('"', '\\"') + v = v.split("\\x") + while len(v) > 1: + i = -1 + if not v[0]: + v = v[1:] + v[0] = v[0].replace("\\\\", "\\") + # No, I don't know why != works and == breaks + joinx = v[0][i] != "\\" + while v[0][:i] and v[0][i] == "\\": + joinx = not joinx + i -= 1 + if joinx: + joiner = "x" + else: + joiner = "u00" + v = [v[0] + joiner + v[1]] + v[2:] + return unicode('"' + v[0] + '"') + + +def _dump_float(v): + return "{}".format(v).replace("e+0", "e+").replace("e-0", "e-") + + +def _dump_time(v): + utcoffset = v.utcoffset() + if utcoffset is None: + return v.isoformat() + # The TOML norm specifies that it's local time thus we drop the offset + return v.isoformat()[:-6] + + +class TomlEncoder(object): + + def __init__(self, _dict=dict, preserve=False): + self._dict = _dict + self.preserve = preserve + self.dump_funcs = { + str: _dump_str, + unicode: _dump_str, + list: self.dump_list, + bool: lambda v: unicode(v).lower(), + int: lambda v: v, + float: _dump_float, + Decimal: _dump_float, + datetime.datetime: lambda v: v.isoformat().replace('+00:00', 'Z'), + datetime.time: _dump_time, + datetime.date: lambda v: v.isoformat() + } + + def get_empty_table(self): + return self._dict() + + def dump_list(self, v): + retval = "[" + for u in v: + retval += " " + unicode(self.dump_value(u)) + "," + retval += "]" + return retval + + def dump_inline_table(self, section): + """Preserve inline table in its compact syntax instead of expanding + into subsection. + + https://github.com/toml-lang/toml#user-content-inline-table + """ + retval = "" + if isinstance(section, dict): + val_list = [] + for k, v in section.items(): + val = self.dump_inline_table(v) + val_list.append(k + " = " + val) + retval += "{ " + ", ".join(val_list) + " }\n" + return retval + else: + return unicode(self.dump_value(section)) + + def dump_value(self, v): + # Lookup function corresponding to v's type + dump_fn = self.dump_funcs.get(type(v)) + if dump_fn is None and hasattr(v, '__iter__'): + dump_fn = self.dump_funcs[list] + # Evaluate function (if it exists) else return v + return dump_fn(v) if dump_fn is not None else self.dump_funcs[str](v) + + def dump_sections(self, o, sup): + retstr = "" + if sup != "" and sup[-1] != ".": + sup += '.' + retdict = self._dict() + arraystr = "" + for section in o: + section = unicode(section) + qsection = section + if not re.match(r'^[A-Za-z0-9_-]+$', section): + qsection = _dump_str(section) + if not isinstance(o[section], dict): + arrayoftables = False + if isinstance(o[section], list): + for a in o[section]: + if isinstance(a, dict): + arrayoftables = True + if arrayoftables: + for a in o[section]: + arraytabstr = "\n" + arraystr += "[[" + sup + qsection + "]]\n" + s, d = self.dump_sections(a, sup + qsection) + if s: + if s[0] == "[": + arraytabstr += s + else: + arraystr += s + while d: + newd = self._dict() + for dsec in d: + s1, d1 = self.dump_sections(d[dsec], sup + + qsection + "." + + dsec) + if s1: + arraytabstr += ("[" + sup + qsection + + "." + dsec + "]\n") + arraytabstr += s1 + for s1 in d1: + newd[dsec + "." + s1] = d1[s1] + d = newd + arraystr += arraytabstr + else: + if o[section] is not None: + retstr += (qsection + " = " + + unicode(self.dump_value(o[section])) + '\n') + elif self.preserve and isinstance(o[section], InlineTableDict): + retstr += (qsection + " = " + + self.dump_inline_table(o[section])) + else: + retdict[qsection] = o[section] + retstr += arraystr + return (retstr, retdict) + + +class TomlPreserveInlineDictEncoder(TomlEncoder): + + def __init__(self, _dict=dict): + super(TomlPreserveInlineDictEncoder, self).__init__(_dict, True) + + +class TomlArraySeparatorEncoder(TomlEncoder): + + def __init__(self, _dict=dict, preserve=False, separator=","): + super(TomlArraySeparatorEncoder, self).__init__(_dict, preserve) + if separator.strip() == "": + separator = "," + separator + elif separator.strip(' \t\n\r,'): + raise ValueError("Invalid separator for arrays") + self.separator = separator + + def dump_list(self, v): + t = [] + retval = "[" + for u in v: + t.append(self.dump_value(u)) + while t != []: + s = [] + for u in t: + if isinstance(u, list): + for r in u: + s.append(r) + else: + retval += " " + unicode(u) + self.separator + t = s + retval += "]" + return retval + + +class TomlNumpyEncoder(TomlEncoder): + + def __init__(self, _dict=dict, preserve=False): + import numpy as np + super(TomlNumpyEncoder, self).__init__(_dict, preserve) + self.dump_funcs[np.float16] = _dump_float + self.dump_funcs[np.float32] = _dump_float + self.dump_funcs[np.float64] = _dump_float + self.dump_funcs[np.int16] = self._dump_int + self.dump_funcs[np.int32] = self._dump_int + self.dump_funcs[np.int64] = self._dump_int + + def _dump_int(self, v): + return "{}".format(int(v)) + + +class TomlPreserveCommentEncoder(TomlEncoder): + + def __init__(self, _dict=dict, preserve=False): + from toml.decoder import CommentValue + super(TomlPreserveCommentEncoder, self).__init__(_dict, preserve) + self.dump_funcs[CommentValue] = lambda v: v.dump(self.dump_value) + + +class TomlPathlibEncoder(TomlEncoder): + + def _dump_pathlib_path(self, v): + return _dump_str(str(v)) + + def dump_value(self, v): + if (3, 4) <= sys.version_info: + import pathlib + if isinstance(v, pathlib.PurePath): + v = str(v) + return super(TomlPathlibEncoder, self).dump_value(v) diff --git a/venv3/lib/python3.7/site-packages/toml/ordered.py b/venv3/lib/python3.7/site-packages/toml/ordered.py new file mode 100644 index 0000000..9c20c41 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml/ordered.py @@ -0,0 +1,15 @@ +from collections import OrderedDict +from toml import TomlEncoder +from toml import TomlDecoder + + +class TomlOrderedDecoder(TomlDecoder): + + def __init__(self): + super(self.__class__, self).__init__(_dict=OrderedDict) + + +class TomlOrderedEncoder(TomlEncoder): + + def __init__(self): + super(self.__class__, self).__init__(_dict=OrderedDict) diff --git a/venv3/lib/python3.7/site-packages/toml/tz.py b/venv3/lib/python3.7/site-packages/toml/tz.py new file mode 100644 index 0000000..bf20593 --- /dev/null +++ b/venv3/lib/python3.7/site-packages/toml/tz.py @@ -0,0 +1,24 @@ +from datetime import tzinfo, timedelta + + +class TomlTz(tzinfo): + def __init__(self, toml_offset): + if toml_offset == "Z": + self._raw_offset = "+00:00" + else: + self._raw_offset = toml_offset + self._sign = -1 if self._raw_offset[0] == '-' else 1 + self._hours = int(self._raw_offset[1:3]) + self._minutes = int(self._raw_offset[4:6]) + + def __deepcopy__(self, memo): + return self.__class__(self._raw_offset) + + def tzname(self, dt): + return "UTC" + self._raw_offset + + def utcoffset(self, dt): + return self._sign * timedelta(hours=self._hours, minutes=self._minutes) + + def dst(self, dt): + return timedelta(0) diff --git a/venv3/pip-selfcheck.json b/venv3/pip-selfcheck.json index 447f950..4f6b8e7 100644 --- a/venv3/pip-selfcheck.json +++ b/venv3/pip-selfcheck.json @@ -1 +1 @@ -{"last_check":"2021-03-03T22:25:46Z","pypi_version":"21.0.1"} \ No newline at end of file +{"last_check":"2021-03-14T08:43:16Z","pypi_version":"21.0.1"} \ No newline at end of file