diff --git a/admin.py b/admin.py
index 0c438da7..8be3d978 100644
--- a/admin.py
+++ b/admin.py
@@ -1,6 +1,6 @@
"""Renders admin pages for ops and other management tasks.
-Currently just /admin/responses, which shows active responses with tasks that
+Currently just ``/admin/responses``, which shows active responses with tasks that
haven't completed yet.
"""
import datetime
diff --git a/blog_webmention.py b/blog_webmention.py
index cd7d9bd5..7b0bd48b 100644
--- a/blog_webmention.py
+++ b/blog_webmention.py
@@ -1,5 +1,4 @@
-"""Converts webmentions to comments on Blogger, Tumblr, and WP.com.
-"""
+"""Converts webmentions to comments on Blogger, Tumblr, and WP.com."""
import logging
import urllib.parse
@@ -158,10 +157,10 @@ def find_mention_item(self, items):
content.value.
Args:
- items: sequence of mf2 item dicts
+ items (list of dict): mf2 items
Returns:
- mf2 item dict or None
+ dict: mf2 item, or None
"""
# find target URL in source
for item in items:
diff --git a/blogger.py b/blogger.py
index 41dd49d1..253d652f 100644
--- a/blogger.py
+++ b/blogger.py
@@ -7,18 +7,17 @@
http://gdata-python-client.googlecode.com/hg/pydocs/gdata.blogger.data.html
To use, go to your Blogger blog's dashboard, click Template, Edit HTML, then
-put this in the head section:
+put this in the head section::
-
+
-https://developers.google.com/blogger/docs/2.0/developers_guide_protocol
-https://support.google.com/blogger/answer/42064?hl=en
-create comment:
-https://developers.google.com/blogger/docs/2.0/developers_guide_protocol#CreatingComments
+* https://developers.google.com/blogger/docs/2.0/developers_guide_protocol
+* https://support.google.com/blogger/answer/42064?hl=en
+* https://developers.google.com/blogger/docs/2.0/developers_guide_protocol#CreatingComments
+
+Test command line::
-test command line:
-curl localhost:8080/webmention/blogger \
- -d 'source=http://localhost/response.html&target=http://freedom-io-2.blogspot.com/2014/04/blog-post.html'
+ curl localhost:8080/webmention/blogger -d 'source=http://localhost/response.html&target=http://freedom-io-2.blogspot.com/2014/04/blog-post.html'
"""
import collections
import logging
@@ -70,8 +69,9 @@ def new(auth_entity=None, blog_id=None, **kwargs):
"""Creates and returns a Blogger for the logged in user.
Args:
- auth_entity: :class:`oauth_dropins.blogger.BloggerV2Auth`
- blog_id: which blog. optional. if not provided, uses the first available.
+ auth_entity (oauth_dropins.blogger.BloggerV2Auth):
+ blog_id (str): which blog, optional; if not provided, uses the first
+ available
"""
urls, domains = Blogger.urls_and_domains(auth_entity, blog_id=blog_id)
if not urls or not domains:
@@ -100,11 +100,11 @@ def urls_and_domains(auth_entity, blog_id=None):
"""Returns an auth entity's URL and domain.
Args:
- auth_entity: oauth_dropins.blogger.BloggerV2Auth
+ auth_entity (oauth_dropins.blogger.BloggerV2Auth):
blog_id: which blog. optional. if not provided, uses the first available.
Returns:
- ([string url], [string domain])
+ ([str url], [str domain])
"""
for id, host in zip(auth_entity.blog_ids, auth_entity.blog_hostnames):
if blog_id == id or (not blog_id and host):
@@ -118,16 +118,16 @@ def create_comment(self, post_url, author_name, author_url, content, client=None
Must be implemented by subclasses.
Args:
- post_url: string
- author_name: string
- author_url: string
- content: string
- client: :class:`gdata.blogger.client.BloggerClient`. If None, one will be
+ post_url (str)
+ author_name (str)
+ author_url (str)
+ content (str)
+ client (gdata.blogger.client.BloggerClient): If None, one will be
created from auth_entity. Used for dependency injection in the unit
test.
Returns:
- JSON response dict with 'id' and other fields
+ dict: JSON response with ``id`` and other fields
"""
if client is None:
client = self.auth_entity.get().api()
diff --git a/browser.py b/browser.py
index 2b87b021..b55ebc92 100644
--- a/browser.py
+++ b/browser.py
@@ -1,5 +1,4 @@
-"""Browser extension views.
-"""
+"""Browser extension views."""
import copy
import logging
from operator import itemgetter
@@ -31,10 +30,11 @@ def merge_by_id(existing, updates):
with the same id. Requires all objects to have ids.
Args:
- existing: sequence of AS1 dicts
- updates: sequence of AS1 dicts
+ existing (list of dict): AS1 objects
+ updates (list of dict): AS1 objects
- Returns: merged list of AS1 dicts
+ Returns:
+ list of dict: merged objects
"""
objs = {o['id']: o for o in existing}
objs.update({o['id']: o for o in updates})
@@ -62,9 +62,10 @@ def key_id_from_actor(cls, actor):
To be implemented by subclasses.
Args:
- actor: dict AS1 actor
+ actor (dict): AS1 actor
- Returns: str, key id to use for the corresponding datastore entity
+ Returns:
+ str: key id to use for the corresponding datastore entity
"""
raise NotImplementedError()
@@ -74,7 +75,7 @@ def new(cls, auth_entity=None, actor=None, **kwargs):
Args:
auth_entity: unused
- actor: dict AS1 actor
+ actor (dict): AS1 actor
"""
assert not auth_entity
assert actor
@@ -151,10 +152,9 @@ def gr_source(self):
def check_token(self, load_source=True):
"""Loads the token and checks that it has at least one domain registered.
- Expects token in the `token` query param.
+ Expects token in the ``token`` query param.
- Raises: :class:`HTTPException` with HTTP 403 if the token is missing or
- invalid
+ Raises (HTTPException): HTTP 403 if the token is missing or invalid
"""
token = request.values['token']
domains = Domain.query(Domain.tokens == token).fetch()
@@ -165,7 +165,7 @@ def check_token(self, load_source=True):
def auth(self):
"""Checks token and loads and returns the source.
- Raises: :class:`HTTPException` with HTTP 400 or 403
+ Raises (HTTPException): HTTP 400 or 403
"""
self.check_token()
return util.load_source(error_fn=self.error)
@@ -181,8 +181,8 @@ class Status(BrowserView):
"""Runs preflight checks for a source and returns status and config info.
Response body is a JSON map with these fields:
- status: string, 'enabled' or 'disabled'
- poll-seconds: integer, current poll frequency for this source in seconds
+ status (str): ``enabled`` or ``disabled``
+ poll-seconds (int): current poll frequency for this source in seconds
"""
def dispatch_request(self):
source = self.auth()
@@ -323,13 +323,13 @@ def update_activity():
class Extras(BrowserView):
"""Merges extras (comments, reactions) from silo HTML into an existing Activity.
- Requires the request parameter `id` with the silo post's id (not shortcode!).
+ Requires the request parameter ``id`` with the silo post's id (not shortcode!).
Response body is the translated ActivityStreams JSON for the extras.
- Subclasses must populate the MERGE_METHOD constant with the string name of the
- granary source class's method that parses extras from silo HTML and merges
- them into an activity.
+ Subclasses must populate the :attr:`MERGE_METHOD` constant with the string
+ name of the granary source class's method that parses extras from silo HTML
+ and merges them into an activity.
"""
MERGE_METHOD = None
diff --git a/cron.py b/cron.py
index ae888ac7..aee817af 100644
--- a/cron.py
+++ b/cron.py
@@ -29,7 +29,7 @@
class LastUpdatedPicture(StringIdModel):
"""Stores the last user in a given silo that we updated profile picture for.
- Key id is the silo's SHORT_NAME.
+ Key id is the silo's ``SHORT_NAME``.
"""
last = ndb.KeyProperty()
created = ndb.DateTimeProperty(auto_now_add=True, required=True, tzinfo=timezone.utc)
diff --git a/docs/conf.py b/docs/conf.py
index 11a5f8ce..5822e64c 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-#
# Sphinx build configuration file.
#
# This file is execfile()d with the current directory set to its
@@ -80,7 +78,7 @@
# General information about the project.
project = 'Bridgy'
-copyright = '2011-2022, Ryan Barrett'
+copyright = '2011-2023, Ryan Barrett'
author = 'Ryan Barrett'
# The version info for the project you're documenting, acts as replacement for
diff --git a/docs/index.rst b/docs/index.rst
index 2ba6aa97..47000d78 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -434,7 +434,9 @@ dataset `__.
3. `Import it into
BigQuery `__:
@@ -456,13 +458,14 @@ https://console.cloud.google.com/datastore/entities;kind=Response;ns=\ **:math:`
Open the existing ``Response`` table in BigQuery:
https://console.cloud.google.com/bigquery?project=brid-gy&ws=%211m10%211m4%214m3%211sbrid-gy%212sdatastore%213sResponse%211m4%211m3%211sbrid-gy%212sbquxjob_371f97c8_18131ff6e69%213sUS
-Query for the same first few rows sorted by ``updated`` ascending, check
-that they’re the same:
+Update the year in the queries below to two years before today. Query
+for the same first few rows sorted by ``updated`` ascending, check that
+they’re the same:
::
SELECT * FROM `brid-gy.datastore.Response`
- WHERE updated >= TIMESTAMP('2020-11-01T00:00:00Z')
+ WHERE updated >= TIMESTAMP('202X-11-01T00:00:00Z')
ORDER BY updated ASC
LIMIT 10
@@ -471,7 +474,7 @@ Delete those rows:
::
DELETE FROM `brid-gy.datastore.Response`
- WHERE updated >= TIMESTAMP('2020-11-01T00:00:00Z')
+ WHERE updated >= TIMESTAMP('202X-11-01T00:00:00Z')
Load the new ``Response`` entities into a temporary table:
@@ -538,7 +541,7 @@ nullable.
Import the CSV, replacing the *data* sheet.
4. Change the underscores in column headings to spaces.
5. Open each sheet, edit the chart, and extend the data range to include
- all of thee new rows.
+ all of the new rows.
6. Check out the graphs! Save full size images with OS or browser
screenshots, thumbnails with the *Download Chart* button. Then post
them!
@@ -557,7 +560,8 @@ for long term storage.
I use the `Datastore Bulk Delete Dataflow
template `__
-with a GQL query like this:
+with a GQL query like this. (Update the years below to two years before
+today.)
.. code:: sql
@@ -573,7 +577,7 @@ command line:
--gcs-location gs://dataflow-templates-us-central1/latest/Datastore_to_Datastore_Delete
--region us-central1
--staging-location gs://brid-gy.appspot.com/tmp-datastore-delete
- --parameters datastoreReadGqlQuery="SELECT * FROM `Response` WHERE updated < DATETIME('2020-11-01T00:00:00Z'),datastoreReadProjectId=brid-gy,datastoreDeleteProjectId=brid-gy"
+ --parameters datastoreReadGqlQuery="SELECT * FROM `Response` WHERE updated < DATETIME('202X-11-01T00:00:00Z'),datastoreReadProjectId=brid-gy,datastoreDeleteProjectId=brid-gy"
Expect this to take at least a day or so.
diff --git a/docs/source/modules.rst b/docs/source/modules.rst
index f0b82e6c..8b4b8f25 100644
--- a/docs/source/modules.rst
+++ b/docs/source/modules.rst
@@ -8,100 +8,124 @@ Reference documentation.
admin
-----
.. automodule:: admin
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
blog_webmention
---------------
.. automodule:: blog_webmention
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
blogger
-------
.. automodule:: blogger
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
browser
-------
.. automodule:: browser
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
cron
----
.. automodule:: cron
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
facebook
--------
.. automodule:: facebook
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
flickr
------
.. automodule:: flickr
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
github
------
.. automodule:: github
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
handlers
--------
.. automodule:: handlers
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
indieauth
---------
.. automodule:: indieauth
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
instagram
---------
.. automodule:: instagram
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
mastodon
--------
.. automodule:: mastodon
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
medium
------
.. automodule:: medium
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
models
------
.. automodule:: models
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
original_post_discovery
-----------------------
.. automodule:: original_post_discovery
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
pages
-----
.. automodule:: pages
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
publish
-------
.. automodule:: publish
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
reddit
------
.. automodule:: reddit
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
superfeedr
----------
.. automodule:: superfeedr
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
tasks
-----
.. automodule:: tasks
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
tumblr
------
.. automodule:: tumblr
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
twitter
-------
.. automodule:: twitter
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
util
----
.. automodule:: util
- :exclude-members: __getnewargs__, __getstate__, __new__, __repr__
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
webmention
----------
.. automodule:: webmention
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
wordpress_rest
--------------
.. automodule:: wordpress_rest
+ :exclude-members: __eq__, __getnewargs__, __getstate__, __hash__, __new__, __repr__, __str__, __weakref__
diff --git a/facebook.py b/facebook.py
index 271058bd..d0172812 100644
--- a/facebook.py
+++ b/facebook.py
@@ -1,5 +1,4 @@
-"""Facebook API code and datastore model classes.
-"""
+"""Facebook API code and datastore model classes."""
import urllib.parse
from google.cloud import ndb
@@ -69,14 +68,14 @@ def button_html(cls, feature, **kwargs):
def canonicalize_url(self, url, **kwargs):
"""Facebook-specific standardization of syndicated urls.
- Canonical form is https://www.facebook.com/USERID/posts/POSTID
+ Canonical form is ``https://www.facebook.com/USERID/posts/POSTID``
Args:
- url: a string, the url of the syndicated content
+ url (str): the url of the syndicated content
kwargs: unused
Return:
- a string, the canonical form of the syndication url
+ str: the canonical form of the syndication url
"""
if util.domain_from_link(url) != self.gr_source.DOMAIN:
return None
diff --git a/flickr.py b/flickr.py
index 0bf7f369..72326854 100644
--- a/flickr.py
+++ b/flickr.py
@@ -19,7 +19,7 @@
class Flickr(models.Source):
"""A Flickr account.
- The key name is the nsid.
+ The key name is the ``nsid``.
"""
# Fetching comments and likes is extremely request-intensive, so let's dial
# back the frequency for now.
@@ -46,7 +46,7 @@ def new(auth_entity=None, **kwargs):
"""Creates and returns a :class:`Flickr` for the logged in user.
Args:
- auth_entity: :class:`oauth_dropins.flickr.FlickrAuth`
+ auth_entity (oauth_dropins.flickr.FlickrAuth)
"""
assert 'username' not in kwargs
assert 'id' not in kwargs
diff --git a/github.py b/github.py
index 91b496f5..f5a2618c 100644
--- a/github.py
+++ b/github.py
@@ -1,5 +1,4 @@
-"""GitHub API code and datastore model classes.
-"""
+"""GitHub API code and datastore model classes."""
import logging
from flask import request
@@ -53,7 +52,7 @@ def new(auth_entity=None, **kwargs):
"""Creates and returns a :class:`GitHub` for the logged in user.
Args:
- auth_entity: :class:`oauth_dropins.github.GitHubAuth`
+ auth_entity (oauth_dropins.github.GitHubAuth):
kwargs: property values
"""
assert 'username' not in kwargs
diff --git a/handlers.py b/handlers.py
index 087c6933..8b7fbc96 100644
--- a/handlers.py
+++ b/handlers.py
@@ -4,19 +4,15 @@
URL paths are:
-/post/SITE/USER_ID/POST_ID
+* ``/post/SITE/USER_ID/POST_ID``
e.g. /post/flickr/212038/10100823411094363
-
-/comment/SITE/USER_ID/POST_ID/COMMENT_ID
+* ``/comment/SITE/USER_ID/POST_ID/COMMENT_ID``
e.g. /comment/twitter/snarfed_org/10100823411094363/999999
-
-/like/SITE/USER_ID/POST_ID/LIKED_BY_USER_ID
+* ``/like/SITE/USER_ID/POST_ID/LIKED_BY_USER_ID``
e.g. /like/twitter/snarfed_org/10100823411094363/999999
-
-/repost/SITE/USER_ID/POST_ID/REPOSTED_BY_USER_ID
+* ``/repost/SITE/USER_ID/POST_ID/REPOSTED_BY_USER_ID``
e.g. /repost/twitter/snarfed_org/10100823411094363/999999
-
-/rsvp/SITE/USER_ID/EVENT_ID/RSVP_USER_ID
+* ``/rsvp/SITE/USER_ID/EVENT_ID/RSVP_USER_ID``
e.g. /rsvp/facebook/212038/12345/67890
"""
import datetime
@@ -85,7 +81,7 @@ def get_item(self, **kwargs):
Args:
source: :class:`models.Source` subclass
- id: string
+ id: str
Returns:
ActivityStreams object dict
@@ -96,7 +92,7 @@ def get_post(self, id, **kwargs):
"""Fetch a post.
Args:
- id: string, site-specific post id
+ id: str, site-specific post id
is_event: bool
kwargs: passed through to :meth:`get_activities`
@@ -198,16 +194,16 @@ def dispatch_request(self, site, key_id, **kwargs):
def merge_urls(self, obj, property, urls, object_type='article'):
"""Updates an object's ActivityStreams URL objects in place.
- Adds all URLs in urls that don't already exist in obj[property].
+ Adds all URLs in urls that don't already exist in ``obj[property]``\.
ActivityStreams schema details:
http://activitystrea.ms/specs/json/1.0/#id-comparison
Args:
- obj: ActivityStreams object to merge URLs into
- property: string property to merge URLs into
- urls: sequence of string URLs to add
- object_type: stored as the objectType alongside each URL
+ obj (dict): ActivityStreams object to merge URLs into
+ property (str): property to merge URLs into
+ urls (sequence of str): URLs to add
+ object_type (str): stored as the objectType alongside each URL
"""
if obj:
obj[property] = util.get_list(obj, property)
diff --git a/instagram.py b/instagram.py
index 521fa77e..40eb739b 100644
--- a/instagram.py
+++ b/instagram.py
@@ -1,5 +1,4 @@
-"""Instagram browser extension source class and views.
-"""
+"""Instagram browser extension source class and views."""
from granary import instagram as gr_instagram
from oauth_dropins import instagram as oauth_instagram
diff --git a/mastodon.py b/mastodon.py
index d451607b..96542e74 100644
--- a/mastodon.py
+++ b/mastodon.py
@@ -56,7 +56,8 @@ def app_url(self):
class Mastodon(models.Source):
"""A Mastodon account.
- The key name is the fully qualified address, eg '@snarfed@mastodon.technology'.
+ The key name is the fully qualified address, eg
+ ``@snarfed@mastodon.technology``.
"""
GR_CLASS = gr_mastodon.Mastodon
OAUTH_START = StartBase
@@ -76,7 +77,7 @@ class Mastodon(models.Source):
@property
def URL_CANONICALIZER(self):
- """Generate URL_CANONICALIZER dynamically to use the instance's domain."""
+ """Generated dynamically to use the instance's domain."""
return util.UrlCanonicalizer(domain=self.gr_source.DOMAIN)
@staticmethod
@@ -84,7 +85,7 @@ def new(auth_entity=None, **kwargs):
"""Creates and returns a :class:`Mastodon` entity.
Args:
- auth_entity: :class:`oauth_mastodon.MastodonAuth`
+ auth_entity (oauth_mastodon.MastodonAuth):
kwargs: property values
"""
assert 'username' not in kwargs
@@ -98,7 +99,7 @@ def new(auth_entity=None, **kwargs):
**kwargs)
def instance(self):
- """Returns the Mastodon instance domain, e.g. 'foo.com' ."""
+ """Returns the Mastodon instance domain, e.g. ``foo.com`` ."""
return self._split_address()[1]
def _split_address(self):
@@ -107,7 +108,7 @@ def _split_address(self):
return split[1], split[2]
def user_tag_id(self):
- """Returns the tag URI for this source, e.g. 'tag:foo.com:alice'."""
+ """Returns the tag URI for this source, e.g. ``tag:foo.com:alice``."""
username = self._split_address()[0]
return self.gr_source.tag_uri(username)
@@ -121,7 +122,7 @@ def label_name(self):
@classmethod
def button_html(cls, feature, **kwargs):
- """Override oauth-dropins's button_html() to not show the instance text box."""
+ """Override oauth-dropins's to not show the instance text box."""
source = kwargs.get('source')
instance = source.instance() if source else ''
scopes = SCOPE_SEPARATOR.join(
diff --git a/medium.py b/medium.py
index bb7b31f8..b6db0797 100644
--- a/medium.py
+++ b/medium.py
@@ -1,13 +1,17 @@
"""Medium hosted blog implementation.
+Medium's API is unsupported and degrading. We should probably sunset this.
+
Only supports outbound webmentions right now, not inbound, since Medium's API
doesn't support creating responses or recommendations yet.
-https://github.com/Medium/medium-api-docs/issues/71
-https://github.com/Medium/medium-api-docs/issues/72
+
+* https://github.com/Medium/medium-api-docs/issues/71
+* https://github.com/Medium/medium-api-docs/issues/72
API docs:
-https://github.com/Medium/medium-api-docs#contents
-https://medium.com/developers/welcome-to-the-medium-api-3418f956552
+
+* https://github.com/Medium/medium-api-docs#contents
+* https://medium.com/developers/welcome-to-the-medium-api-3418f956552
"""
import collections
import logging
@@ -28,7 +32,7 @@
class Medium(models.Source):
"""A Medium publication or user blog.
- The key name is the username (with @ prefix) or publication name.
+ The key name is the username (with ``@`` prefix) or publication name.
"""
GR_CLASS = collections.namedtuple('FakeGrClass', ('NAME',))(NAME='Medium')
OAUTH_START = oauth_medium.Start
@@ -50,8 +54,8 @@ def new(auth_entity=None, username=None, **kwargs):
"""Creates and returns a Medium for the logged in user.
Args:
- auth_entity: :class:`oauth_dropins.medium.MediumAuth`
- username: string, either username (starting with @) or publication id
+ auth_entity (oauth_dropins.medium.MediumAuth):
+ username (str): either username (starting with ``@``) or publication id
"""
assert username
assert 'id' not in kwargs
@@ -82,6 +86,7 @@ def _data(self, auth_entity):
https://github.com/Medium/medium-api-docs/#user-content-getting-the-authenticated-users-details
Example user::
+
{
'imageUrl': 'https://cdn-images-1.medium.com/fit/c/200/200/0*4dsrv3pwIJfFraSz.jpeg',
'url': 'https://medium.com/@snarfed',
@@ -91,6 +96,7 @@ def _data(self, auth_entity):
}
Example publication::
+
{
'id': 'b45573563f5a',
'name': 'Developers',
diff --git a/models.py b/models.py
index 67583f3f..59764b12 100644
--- a/models.py
+++ b/models.py
@@ -1,5 +1,4 @@
-"""Datastore model classes.
-"""
+"""Datastore model classes."""
from datetime import datetime, timedelta, timezone
import logging
import os
@@ -63,7 +62,7 @@ class DisableSource(Exception):
class SourceMeta(ndb.MetaModel):
- """:class:`Source` metaclass. Registers all subclasses in the sources global."""
+ """:class:`Source` metaclass. Registers all subclasses in the ``sources`` global."""
def __new__(meta, name, bases, class_dict):
cls = ndb.MetaModel.__new__(meta, name, bases, class_dict)
if cls.SHORT_NAME:
@@ -76,7 +75,6 @@ class Source(StringIdModel, metaclass=SourceMeta):
Each concrete silo class should subclass this class.
"""
-
STATUSES = ('enabled', 'disabled')
POLL_STATUSES = ('ok', 'error', 'polling')
FEATURES = ('listen', 'publish', 'webmention', 'email')
@@ -192,7 +190,7 @@ class Source(StringIdModel, metaclass=SourceMeta):
# for __getattr__ to run when it's accessed.
def __init__(self, *args, id=None, **kwargs):
- """Constructor. Escapes the key string id if it starts with `__`."""
+ """Constructor. Escapes the key string id if it starts with ``__``."""
username = kwargs.get('username')
if self.USERNAME_KEY_ID and username and not id:
id = username.lower()
@@ -270,17 +268,16 @@ def lookup(cls, id):
By default, interprets id as just the key id. Subclasses may extend this to
support usernames, etc.
- Ideally, if USERNAME_KEY_ID, normalize to lower case before looking up. We'd
- need to backfill all existing entities with upper case key ids, though,
- which we're not planning to do.
- https://github.com/snarfed/bridgy/issues/884
+ Ideally, if ``USERNAME_KEY_ID``, normalize to lower case before looking up.
+ We'd need to backfill all existing entities with upper case key ids, though,
+ which we're not planning to do. https://github.com/snarfed/bridgy/issues/884
"""
if id and id.startswith('__'):
id = '\\' + id
return ndb.Key(cls, id).get()
def user_tag_id(self):
- """Returns the tag URI for this source, e.g. 'tag:plus.google.com:123456'."""
+ """Returns the tag URI for this source, e.g. ``tag:plus.google.com:123456``."""
return self.gr_source.tag_uri(self.key_id())
def bridgy_path(self):
@@ -306,13 +303,13 @@ def label_name(self):
@classmethod
@ndb.transactional()
def put_updates(cls, source):
- """Writes source.updates to the datastore transactionally.
+ """Writes ``source.updates`` to the datastore transactionally.
Returns:
- source: :class:`Source`
+ source (Source)
Returns:
- the updated :class:`Source`
+ ``source``, updated
"""
if not source.updates:
return source
@@ -380,18 +377,18 @@ def get_author_urls(self):
In debug mode, replace test domains with localhost.
Return:
- a list of string URLs, possibly empty
+ list of str: URLs, possibly empty
"""
return [util.replace_test_domains_with_localhost(u) for u in self.domain_urls]
def search_for_links(self):
"""Searches for activities with links to any of this source's web sites.
- https://github.com/snarfed/bridgy/issues/456
- https://github.com/snarfed/bridgy/issues/565
+ * https://github.com/snarfed/bridgy/issues/456
+ * https://github.com/snarfed/bridgy/issues/565
Returns:
- sequence of ActivityStreams activity dicts
+ list of dict: ActivityStreams activities
"""
return []
@@ -415,11 +412,11 @@ def get_comment(self, comment_id, **kwargs):
Passes through to granary by default. May be overridden by subclasses.
Args:
- comment_id: string, site-specific comment id
+ comment_id (str): site-specific comment id
kwargs: passed to :meth:`granary.source.Source.get_comment`
Returns:
- dict, decoded ActivityStreams comment object, or None
+ dict: decoded ActivityStreams comment object, or None
"""
comment = self.gr_source.get_comment(comment_id, **kwargs)
if comment:
@@ -427,16 +424,15 @@ def get_comment(self, comment_id, **kwargs):
return comment
def get_like(self, activity_user_id, activity_id, like_user_id, **kwargs):
- """Returns an ActivityStreams 'like' activity object.
+ """Returns an ActivityStreams ``like`` activity object.
- Passes through to granary by default. May be overridden
- by subclasses.
+ Passes through to granary by default. May be overridden by subclasses.
Args:
- activity_user_id: string id of the user who posted the original activity
- activity_id: string activity id
- like_user_id: string id of the user who liked the activity
- kwargs: passed to granary.Source.get_comment
+ activity_user_id (str): id of the user who posted the original activity
+ activity_id (str): activity id
+ like_user_id (str): id of the user who liked the activity
+ kwargs: passed to :meth:`granary.source.Source.get_comment`
"""
return self.gr_source.get_like(activity_user_id, activity_id, like_user_id,
**kwargs)
@@ -455,13 +451,13 @@ def create_comment(self, post_url, author_name, author_url, content):
Must be implemented by subclasses.
Args:
- post_url: string
- author_name: string
- author_url: string
- content: string
+ post_url (str)
+ author_name (str)
+ author_url (str)
+ content (str)
Returns:
- response dict with at least 'id' field
+ dict: response with at least ``id`` field
"""
raise NotImplementedError()
@@ -472,7 +468,7 @@ def feed_url(self):
:mod:`blogger`, :mod:`medium`, :mod:`tumblr`, and :mod:`wordpress_rest`.
Returns:
- string URL
+ str: URL
"""
raise NotImplementedError()
@@ -483,7 +479,7 @@ def edit_template_url(self):
:mod:`blogger`, :mod:`medium`, :mod:`tumblr`, and :mod:`wordpress_rest`.
Returns:
- string URL
+ str: URL
"""
raise NotImplementedError()
@@ -494,7 +490,8 @@ def button_html(cls, feature, **kwargs):
Mostly just passes through to
:meth:`oauth_dropins.handlers.Start.button_html`.
- Returns: string, HTML
+ Returns:
+ str: HTML
"""
assert set(feature.split(',')) <= set(cls.FEATURES)
form_extra = (kwargs.pop('form_extra', '') +
@@ -519,11 +516,12 @@ def create_new(cls, user_url=None, **kwargs):
"""Creates and saves a new :class:`Source` and adds a poll task for it.
Args:
- user_url: a string, optional. if provided, supersedes other urls when
- determining the author_url
- **kwargs: passed to :meth:`new()`
+ user_url (str): if provided, supersedes other urls when determining the
+ ``author_url``
+ kwargs: passed to :meth:`new()`
- Returns: newly created :class:`Source`
+ Returns:
+ Source: newly created entity
"""
source = cls.new(**kwargs)
if source is None:
@@ -584,7 +582,7 @@ def create_new(cls, user_url=None, **kwargs):
return source
def verified(self):
- """Returns True if this source is ready to be used, false otherwise.
+ """Returns True if this source is ready to be used, False otherwise.
See :meth:`verify()` for details. May be overridden by subclasses, e.g.
:class:`tumblr.Tumblr`.
@@ -608,7 +606,7 @@ def verify(self, force=False):
May be overridden by subclasses, e.g. :class:`tumblr.Tumblr`.
Args:
- force: if True, fully verifies (e.g. re-fetches the blog's HTML and
+ force (bool): if True, fully verifies (e.g. re-fetches the blog's HTML and
performs webmention discovery) even we already think this source is
verified.
"""
@@ -633,19 +631,19 @@ def urls_and_domains(self, auth_entity, user_url, actor=None,
resolve_source_domain=True):
"""Returns this user's valid (not webmention-blocklisted) URLs and domains.
- Converts the auth entity's user_json to an ActivityStreams actor and uses
- its 'urls' and 'url' fields. May be overridden by subclasses.
+ Converts the auth entity's ``user_json`` to an ActivityStreams actor and
+ uses its ``urls`` and ``url`` fields. May be overridden by subclasses.
Args:
- auth_entity: :class:`oauth_dropins.models.BaseAuth`
- user_url: string, optional URL passed in when authorizing
- actor: dict, optional AS actor for the user. If provided, overrides
+ auth_entity (oauth_dropins.models.BaseAuth)
+ user_url (str): optional URL passed in when authorizing
+ actor (dict): optional AS actor for the user. If provided, overrides
auth_entity
- resolve_source_domain: boolean, whether to follow redirects on URLs on
+ resolve_source_domain (bool): whether to follow redirects on URLs on
this source's domain
Returns:
- ([string url, ...], [string domain, ...])
+ ([str url, ...], [str domain, ...]) tuple:
"""
if not actor:
actor = self.gr_source.user_to_actor(json_loads(auth_entity.user_json))
@@ -683,10 +681,11 @@ def resolve_profile_url(url, resolve=True):
"""Resolves a profile URL to be added to a source.
Args:
- url: string
- resolve: boolean, whether to make HTTP requests to follow redirects, etc.
+ url (str)
+ resolve (bool): whether to make HTTP requests to follow redirects, etc.
- Returns: string, resolved URL, or None
+ Returns:
+ str: resolved URL, or None
"""
final, _, ok = util.get_webmention_target(url, resolve=resolve)
if not ok:
@@ -719,17 +718,17 @@ def canonicalize_url(self, url, activity=None, **kwargs):
return self.URL_CANONICALIZER(url, **kwargs) if self.URL_CANONICALIZER else url
def infer_profile_url(self, url):
- """Given an arbitrary URL representing a person, try to find their
- profile URL for *this* service.
+ """Given a silo profile, tries to find the matching Bridgy user URL.
Queries Bridgy's registered accounts for users with a particular
domain in their silo profile.
Args:
- url: string, a person's URL
+ url (str): a person's URL
Return:
- a string URL for their profile on this service (or None)
+ str: URL for their profile on this service, or None
+
"""
domain = util.domain_from_link(url)
if domain == self.gr_source.DOMAIN:
@@ -742,13 +741,13 @@ def preprocess_for_publish(self, obj):
"""Preprocess an object before trying to publish it.
By default this tries to massage person tags so that the tag's
- "url" points to the person's profile on this service (as opposed
+ ``url`` points to the person's profile on this service (as opposed
to a person's homepage).
The object is modified in place.
Args:
- obj: ActivityStreams activity or object dict
+ obj (dict): ActivityStreams activity or object
"""
if isinstance(obj, str):
return obj
@@ -771,7 +770,7 @@ def on_new_syndicated_post(self, syndpost):
"""Called when a new :class:`SyndicatedPost` is stored for this source.
Args:
- syndpost: :class:`SyndicatedPost`
+ syndpost (SyndicatedPost)
"""
pass
@@ -785,7 +784,7 @@ def is_private(self):
def is_beta_user(self):
"""Returns True if this is a "beta" user opted into new features.
- Beta users come from beta_users.txt.
+ Beta users come from ``beta_users.txt``.
"""
return self.bridgy_path() in util.BETA_USER_PATHS
@@ -1049,10 +1048,11 @@ def type_label(self):
class BlogWebmention(Publish, StringIdModel):
"""Datastore entity for webmentions for hosted blog providers.
- Key id is the source URL and target URL concated with a space, ie 'SOURCE
- TARGET'. The source URL is *always* the URL given in the webmention HTTP
- request. If the source page has a u-url, that's stored in the u_url property.
- The target URL is always the final URL, after any redirects.
+ Key id is the source URL and target URL concated with a space, ie ``SOURCE
+ TARGET``. The source URL is *always* the URL given in the webmention HTTP
+ request. If the source page has a ``u-url``, that's stored in the
+ :attr:`u_url` property. The target URL is always the final URL, after any
+ redirects.
Reuses :class:`Publish`'s fields, but otherwise unrelated.
"""
@@ -1079,7 +1079,7 @@ class SyndicatedPost(ndb.Model):
See :mod:`original_post_discovery`.
When a :class:`SyndicatedPost` entity is about to be stored,
- :meth:`source.Source.on_new_syndicated_post()` is called before it's stored.
+ :meth:`source.Source.on_new_syndicated_post` is called before it's stored.
"""
syndication = ndb.StringProperty()
@@ -1095,8 +1095,8 @@ def insert_original_blank(cls, source, original):
there is, nothing will be added.
Args:
- source: :class:`Source` subclass
- original: string
+ source (Source)
+ original (str)
"""
if cls.query(cls.original == original, ancestor=source.key).get():
return
@@ -1110,8 +1110,8 @@ def insert_syndication_blank(cls, source, syndication):
syndication. If there is, nothing will be added.
Args:
- source: :class:`Source` subclass
- original: string
+ source (Source)
+ original (str)
"""
if cls.query(cls.syndication == syndication, ancestor=source.key).get():
@@ -1131,9 +1131,9 @@ def insert(cls, source, syndication, original):
removed. If non-blank relationships exist, they will be retained.
Args:
- source: :class:`Source` subclass
- syndication: string (not None)
- original: string (not None)
+ source (Source)
+ syndication (str)
+ original (str)
Returns:
SyndicatedPost: newly created or preexisting entity
@@ -1163,7 +1163,7 @@ class Domain(StringIdModel):
domain. Clients can include a token with requests that operate on a given
domain, eg sending posts and responses from the browser extension.
- Key id is the string domain, eg 'example.com'.
+ Key id is the string domain, eg ``example.com``.
"""
tokens = ndb.StringProperty(repeated=True)
auth = ndb.KeyProperty(IndieAuth)
diff --git a/original_post_discovery.py b/original_post_discovery.py
index 5214a2d2..b5fcac40 100644
--- a/original_post_discovery.py
+++ b/original_post_discovery.py
@@ -1,7 +1,7 @@
"""Augments the standard original_post_discovery algorithm with a
reverse lookup that supports posts without a backlink or citation.
-Performs a reverse-lookup that scans the activity's author's h-feed
+Performs a reverse-lookup that scans the activity's author's ``h-feed``
for posts with rel=syndication links. As we find syndicated copies,
save the relationship. If we find the original post for the activity
in question, return the original's URL.
@@ -11,18 +11,16 @@
This feature adds costs in terms of HTTP requests and database
lookups in the following primary cases:
-- Author's domain is known to be invalid or blocklisted, there will
+* Author's domain is known to be invalid or blocklisted, there will
be 0 requests and 0 DB lookups.
-
-- For a syndicated post has been seen previously (regardless of
+* For a syndicated post has been seen previously (regardless of
whether discovery was successful), there will be 0 requests and 1
DB lookup.
-
-- The first time a syndicated post has been seen:
- - 1 to 2 HTTP requests to get and parse the h-feed plus 1 additional
- request for *each* post permalink that has not been seen before.
- - 1 DB query for the initial check plus 1 additional DB query for
- *each* post permalink.
+* The first time a syndicated post has been seen:
+ * 1 to 2 HTTP requests to get and parse the ``h-feed`` plus 1 additional
+ request for *each* post permalink that has not been seen before.
+ * 1 DB query for the initial check plus 1 additional DB query for
+ *each* post permalink.
"""
import collections
import itertools
@@ -53,26 +51,26 @@
def discover(source, activity, fetch_hfeed=True, include_redirect_sources=True,
already_fetched_hfeeds=None):
- """Augments the standard original_post_discovery algorithm with a
+ """Augments the standard original post discovery algorithm with a
reverse lookup that supports posts without a backlink or citation.
- If fetch_hfeed is False, then we will check the db for previously found
- :class:`models.SyndicatedPost`\ s but will not do posse-post-discovery to find
+ If ``fetch_hfeed`` is False, then we will check the db for previously found
+ :class:`models.SyndicatedPost`\s but will not do posse-post-discovery to find
new ones.
Args:
- source: :class:`models.Source` subclass. Changes to property values (e.g.
- domains, domain_urls, last_syndication_url) are stored in source.updates;
- they should be updated transactionally later.
- activity: activity dict
- fetch_hfeed: boolean
- include_redirect_sources: boolean, whether to include URLs that redirect as
+ source (models.Source): subclass. Changes to property values (e.g.
+ `domains``, ``domain_urls``, ``last_syndication_url``) are stored in
+ ``source.updates``\; they should be updated transactionally later.
+ activity (dict)
+ fetch_hfeed (bool)
+ include_redirect_sources (bool): whether to include URLs that redirect as
well as their final destination URLs
- already_fetched_hfeeds: set, URLs that we have already fetched and run
- posse-post-discovery on, so we can avoid running it multiple times
+ already_fetched_hfeeds (set of str): URLs that we have already fetched and
+ run posse-post-discovery on, so we can avoid running it multiple times
Returns:
- (set(string original post URLs), set(string mention URLs)) tuple
+ (set of str, set of str) tuple: (original post URLs, mention URLs)
"""
label = activity.get('url') or activity.get('id')
logger.debug(f'discovering original posts for: {label}')
@@ -182,12 +180,12 @@ def refetch(source):
links that might not have been there the first time we looked.
Args:
- source: :class:`models.Source` subclass. Changes to property values (e.g.
- domains, domain_urls, last_syndication_url) are stored in source.updates;
+ source (models.Source): Changes to property values (e.g. ``domains``,
+ ``domain_urls``, ``last_syndication_url``) are stored in source.updates;
they should be updated transactionally later.
Returns:
- dict: mapping syndicated_url to a list of new :class:`models.SyndicatedPost`\ s
+ dict: mapping syndicated_url to a list of new :class:`models.SyndicatedPost`\s
"""
logger.debug(f'attempting to refetch h-feed for {source.label()}')
@@ -208,11 +206,11 @@ def targets_for_response(resp, originals, mentions):
but only posts and comments get sent to mentioned URLs.
Args:
- resp: ActivityStreams response object
- originals, mentions: sequence of string URLs
+ resp (dict): ActivityStreams response object
+ originals, mentions (sequence of str) URLs
Returns:
- set of string URLs
+ set of str: URLs
"""
type = models.Response.get_type(resp)
targets = set()
@@ -228,18 +226,18 @@ def _posse_post_discovery(source, activity, syndication_url, fetch_hfeed,
"""Performs the actual meat of the posse-post-discover.
Args:
- source: :class:`models.Source` subclass
- activity: activity dict
- syndication_url: url of the syndicated copy for which we are
+ source (models.Source)
+ activity (dict)
+ syndication_url (str): url of the syndicated copy for which we are
trying to find an original
- fetch_hfeed: boolean, whether or not to fetch and parse the
+ fetch_hfeed (bool): whether or not to fetch and parse the
author's feed if we don't have a previously stored
relationship
- already_fetched_hfeeds: set, URLs we've already fetched in a
+ already_fetched_hfeeds (set of str): URLs we've already fetched in a
previous iteration
Return:
- sequence of string original post urls, possibly empty
+ list of str: original post urls, possibly empty
"""
logger.info(f'starting posse post discovery with syndicated {syndication_url}')
@@ -287,14 +285,14 @@ def _process_author(source, author_url, refetch=False, store_blanks=True):
"""Fetch the author's domain URL, and look for syndicated posts.
Args:
- source: a subclass of :class:`models.Source`
- author_url: the author's homepage URL
- refetch: boolean, whether to refetch and process entries we've seen before
- store_blanks: boolean, whether we should store blank
- :class:`models.SyndicatedPost`\ s when we don't find a relationship
+ source (models.Source)
+ author_url (str): the author's homepage URL
+ refetch (bool): whether to refetch and process entries we've seen before
+ store_blanks (bool): whether we should store blank
+ :class:`models.SyndicatedPost`\s when we don't find a relationship
Return:
- a dict of syndicated_url to a list of new :class:`models.SyndicatedPost`\ s
+ dict: maps syndicated_url to a list of new :class:`models.SyndicatedPost`\s
"""
# for now use whether the url is a valid webmention target
# as a proxy for whether it's worth searching it.
@@ -411,15 +409,17 @@ def updated_or_published(item):
def _merge_hfeeds(feed1, feed2):
- """Merge items from two h-feeds into a composite feed. Skips items in
- feed2 that are already represented in feed1, based on the "url" property.
+ """Merge items from two ``h-feeds`` into a composite feed.
+
+ Skips items in ``feed2`` that are already represented in ``feed1``\, based on
+ the ``url`` property.
Args:
- feed1: a list of dicts
- feed2: a list of dicts
+ feed1 (list of dict)
+ feed2 (list of dict)
Returns:
- a list of dicts
+ list of dict:
"""
seen = set()
for item in feed1:
@@ -434,13 +434,14 @@ def _merge_hfeeds(feed1, feed2):
def _find_feed_items(mf2):
"""Extract feed items from given microformats2 data.
- If the top-level h-* item is an h-feed, return its children. Otherwise,
+ If the top-level ``h-*`` item is an h-feed, return its children. Otherwise,
returns the top-level items.
Args:
- mf2: dict, parsed mf2 data
+ mf2 (dict): parsed mf2 data
- Returns: list of dicts, each one representing an mf2 h-* item
+ Returns:
+ list of dict: each one representing an mf2 ``h-*`` item
"""
feeditems = mf2['items']
hfeeds = mf2util.find_all_entries(mf2, ('h-feed',))
@@ -462,18 +463,18 @@ def process_entry(source, permalink, feed_entry, refetch, preexisting,
"""Fetch and process an h-entry and save a new :class:`models.SyndicatedPost`.
Args:
- source:
- permalink: url of the unprocessed post
- feed_entry: the h-feed version of the h-entry dict, often contains
- a partial version of the h-entry at the permalink
- refetch: boolean, whether to refetch and process entries we've seen before
- preexisting: list of previously discovered :class:`models.SyndicatedPost`\ s
+ source (models.Source)
+ permalink (str): url of the unprocessed post
+ feed_entry (dict): the ``h-feed`` version of the ``h-entry``\, often contains
+ a partial version of the ``h-entry`` at the permalink
+ refetch (bool): whether to refetch and process entries we've seen before
+ preexisting (list): of previously discovered :class:`models.SyndicatedPost`\s
for this permalink
- store_blanks: boolean, whether we should store blank
- :class:`models.SyndicatedPost`\ s when we don't find a relationship
+ store_blanks (bool): whether we should store blank
+ :class:`models.SyndicatedPost`\s when we don't find a relationship
Returns:
- a dict from syndicated url to a list of new :class:`models.SyndicatedPost`\ s
+ dict: maps syndicated url to a list of new :class:`models.SyndicatedPost`\s
"""
# if the post has already been processed, do not add to the results
# since this method only returns *newly* discovered relationships.
@@ -569,14 +570,13 @@ def _process_syndication_urls(source, permalink, syndication_urls,
in the db.
Args:
- source: a :class:`models.Source` subclass
- permalink: a string. the current h-entry permalink
- syndication_urls: a collection of strings. the unfitered list
- of syndication urls
- preexisting: a list of previously discovered :class:`models.SyndicatedPost`\ s
+ source (models.Source)
+ permalink (str): the current ``h-entry`` permalink
+ syndication_urls (sequence of str): the unfitered list of syndication urls
+ preexisting: list of models.SyndicatedPost: previously discovered
Returns:
- dict mapping string syndication url to list of :class:`models.SyndicatedPost`\ s
+ dict: maps str syndication url to list of :class:`models.SyndicatedPost`\s
"""
results = {}
# save the results (or lack thereof) to the db, and put them in a
diff --git a/pages.py b/pages.py
index 7664fa1d..497df806 100644
--- a/pages.py
+++ b/pages.py
@@ -60,11 +60,11 @@ def about():
@app.route('/users')
@flask_util.cached(cache, datetime.timedelta(hours=1))
def users():
- """View for /users.
+ """View for ``/users``.
Semi-optimized. Pages by source name. Queries each source type for results
with name greater than the start_name query param, then merge sorts the
- results and truncates at PAGE_SIZE.
+ results and truncates at ``PAGE_SIZE``\.
The start_name param is expected to be capitalized because capital letters
sort lexicographically before lower case letters. An alternative would be to
@@ -293,10 +293,10 @@ def get_paging_param(param):
def process_webmention_links(e):
- """Generates pretty HTML for the links in a :class:`Webmentions` entity.
+ """Generates pretty HTML for the links in a :class:`models.Webmentions` entity.
Args:
- e: :class:`Webmentions` subclass (:class:`Response` or :class:`BlogPost`)
+ e (models.Response or models.BlogPost)
"""
def link(url, g):
return util.pretty_link(
diff --git a/publish.py b/publish.py
index 0dd6cb78..fe7db3d5 100644
--- a/publish.py
+++ b/publish.py
@@ -74,8 +74,8 @@ class PublishBase(webmention.Webmention):
also override other methods.
Attributes:
- fetched: :class:`requests.Response` from fetching source_url
- shortlink: rel-shortlink found in the original post, if any
+ fetched (requests.Response): fetched source_url
+ shortlink (str): rel-shortlink found in the original post, if any
"""
PREVIEW = None
@@ -85,7 +85,7 @@ class PublishBase(webmention.Webmention):
def authorize(self):
"""Returns True if the current user is authorized for this request.
- Otherwise, should call :meth:`self.error()` to provide an appropriate
+ Otherwise, should call :meth:`error` to provide an appropriate
error message.
"""
return True
@@ -299,11 +299,12 @@ def _find_source(self, source_cls, url, domain):
"""Returns the source that should publish a post URL, or None if not found.
Args:
- source_cls: :class:`models.Source` subclass for this silo
- url: string
- domain: string, url's domain
+ source_cls (models.Source): subclass for this silo
+ url (str)
+ domain (str): url's domain
- Returns: :class:`models.Source`
+ Returns:
+ models.Source:
"""
domain = domain.lower()
if util.domain_or_parent_in(domain, util.DOMAINS):
@@ -344,10 +345,10 @@ def attempt_single_item(self, item):
"""Attempts to preview or publish a single mf2 item.
Args:
- item: mf2 item dict from mf2py
+ item (dict): mf2 item from mf2py
Returns:
- CreationResult
+ granary.source.CreationResult:
"""
self.maybe_inject_silo_content(item)
obj = microformats2.json_to_object(item)
@@ -417,10 +418,10 @@ def delete(self, source_url):
"""Attempts to delete or preview delete a published post.
Args:
- source_url: string, original post URL
+ source_url (str): original post URL
Returns:
- dict response data with at least id and url
+ dict: response data with at least ``id`` and ``url``
"""
assert self.entity
if ((self.entity.status != 'complete' or self.entity.type == 'preview') and
@@ -463,7 +464,7 @@ def preprocess(self, activity):
Specifically, expands inReplyTo/object URLs with rel=syndication URLs.
Args:
- activity: an ActivityStreams activity or object being published
+ activity (dict): ActivityStreams activity or object being published
"""
self.source.preprocess_for_publish(activity)
self.expand_target_urls(activity)
@@ -480,7 +481,7 @@ def expand_target_urls(self, activity):
This method modifies the dict in place.
Args:
- activity: an ActivityStreams dict of the activity being published
+ activity (dict): ActivityStreams activity being published
"""
for field in ('inReplyTo', 'object'):
# microformats2.json_to_object de-dupes, no need to do it here
@@ -539,7 +540,7 @@ def get_or_add_publish_entity(self, source_url):
...and if necessary, :class:`models.PublishedPage` entity.
Args:
- source_url: string
+ source_url (str)
"""
try:
return self._get_or_add_publish_entity(source_url)
@@ -585,10 +586,11 @@ def _render_preview(self, result, include_link=False):
"""Renders a preview CreationResult as HTML.
Args:
- result: CreationResult
- include_link: boolean
+ result (CreationResult)
+ include_link (bool)
- Returns: CreationResult with the rendered HTML in content
+ Returns:
+ CreationResult: result, with rendered HTML in content
"""
state = {
'source_key': self.source.key.urlsafe().decode(),
diff --git a/reddit.py b/reddit.py
index 6344de6b..82d01ea6 100644
--- a/reddit.py
+++ b/reddit.py
@@ -32,7 +32,7 @@ def new(auth_entity=None, **kwargs):
"""Creates and returns a :class:`Reddit` entity.
Args:
- auth_entity: :class:`oauth_dropins.reddit.RedditAuth`
+ auth_entity (oauth_dropins.reddit.RedditAuth):
kwargs: property values
"""
assert 'username' not in kwargs
@@ -74,7 +74,7 @@ def search_for_links(self):
"""Searches for activities with links to any of this source's web sites.
Returns:
- sequence of ActivityStreams activity dicts
+ list of dict: ActivityStreams activities
"""
urls = {util.schemeless(util.fragmentless(url), slashes=False)
for url in self.domain_urls
diff --git a/superfeedr.py b/superfeedr.py
index be14c0cb..33fe6991 100644
--- a/superfeedr.py
+++ b/superfeedr.py
@@ -1,11 +1,8 @@
"""Superfeedr.
-https://superfeedr.com/users/snarfed
-http://documentation.superfeedr.com/subscribers.html
-http://documentation.superfeedr.com/schema.html
-
-If/when I add support for arbitrary RSS/Atom feeds, I should use
-http://feediscovery.appspot.com/ for feed discovery based on front page URL.
+* https://superfeedr.com/users/snarfed
+* http://documentation.superfeedr.com/subscribers.html
+* http://documentation.superfeedr.com/schema.html
"""
import logging
@@ -35,7 +32,7 @@ def subscribe(source):
http://documentation.superfeedr.com/subscribers.html#addingfeedswithpubsubhubbub
Args:
- source: Blogger, Tumblr, or WordPress
+ source (Blogger Tumblr, or WordPress)
"""
if appengine_info.LOCAL_SERVER:
logger.info('Running locally, not subscribing to Superfeedr')
@@ -66,12 +63,12 @@ def handle_feed(feed, source):
Creates :class:`models.BlogPost` entities and adds propagate-blogpost tasks
for new items.
- http://documentation.superfeedr.com/schema.html#json
- http://documentation.superfeedr.com/subscribers.html#pubsubhubbubnotifications
+ * http://documentation.superfeedr.com/schema.html#json
+ * http://documentation.superfeedr.com/subscribers.html#pubsubhubbubnotifications
Args:
- feed: unicode string, Superfeedr JSON feed
- source: Blogger, Tumblr, or WordPress
+ feed (str): Superfeedr JSON feed
+ source (Blogger, Tumblr, or WordPress)
"""
logger.info(f'Source: {source.label()} {source.key_id()}')
logger.info(f'Raw feed: {feed}')
@@ -126,7 +123,7 @@ def handle_feed(feed, source):
class Notify(View):
"""Handles a Superfeedr notification.
- Abstract; subclasses must set the SOURCE_CLS attr.
+ Abstract; subclasses must set the :attr:`SOURCE_CLS` attr.
http://documentation.superfeedr.com/subscribers.html#pubsubhubbubnotifications
"""
diff --git a/tasks.py b/tasks.py
index b9dcae0f..33b4681d 100644
--- a/tasks.py
+++ b/tasks.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Task queue handlers.
-"""
+"""Task queue handlers."""
import datetime
import gc
import logging
@@ -40,19 +38,21 @@ class Poll(View):
Request parameters:
- * source_key: string key of source entity
- * last_polled: timestamp, YYYY-MM-DD-HH-MM-SS
+ * ``source_key``: string key of source entity
+ * ``last_polled``: timestamp, ``YYYY-MM-DD-HH-MM-SS``
Inserts a propagate task for each response that hasn't been seen before.
Steps:
- 1: Fetch activities: posts by the user, links to the user's domain(s).
- 2: Extract responses, store their activities.
- 3: Filter out responses we've already seen, using Responses in the datastore.
- 4: Store new responses and enqueue propagate tasks.
- 5: Possibly refetch updated syndication urls.
- 1-4 are in backfeed(); 5 is in poll().
+ 1. Fetch activities: posts by the user, links to the user's domain(s).
+ 2. Extract responses, store their activities.
+ 3. Filter out responses we've already seen, using :class:`models.Response`\s
+ in the datastore.
+ 4. Store new responses and enqueue propagate tasks.
+ 5. Possibly refetch updated syndication urls.
+
+ 1-4 are in :meth:`backfeed`; 5 is in :meth:`poll`.
"""
RESTART_EXISTING_TASKS = False # overridden in Discover
@@ -119,7 +119,7 @@ def dispatch_request(self):
def poll(self, source):
"""Actually runs the poll.
- Stores property names and values to update in source.updates.
+ Stores property names and values to update in ``source.updates``.
"""
if source.last_activities_etag or source.last_activity_id:
logger.debug(f'Using ETag {source.last_activities_etag}, last activity id {source.last_activity_id}')
@@ -224,9 +224,9 @@ def backfeed(self, source, responses=None, activities=None):
Stores property names and values to update in source.updates.
Args:
- source: Source
- responses: dict mapping AS response id to AS object
- activities: dict mapping AS activity id to AS object
+ source (models.Source)
+ responses (dict): maps AS response id to AS object
+ activities (dict): maps AS activity id to AS object
"""
if responses is None:
responses = {}
@@ -414,7 +414,7 @@ def repropagate_old_responses(self, source, relationships):
We look through as many responses as we can until the datastore query expires.
Args:
- source: :class:`models.Source`
+ source (models.Source):
relationships: refetch result
"""
for response in (Response.query(Response.source == source.key)
@@ -461,8 +461,8 @@ class Discover(Poll):
Request parameters:
- * source_key: string key of source entity
- * post_id: string, silo post id(s)
+ * source_key (string): key of source entity
+ * post_id (string): silo post id(s)
Inserts a propagate task for each response that hasn't been seen before.
"""
@@ -519,8 +519,8 @@ class SendWebmentions(View):
Attributes:
- * entity: :class:`models.Webmentions` subclass instance (set in :meth:`lease_entity`)
- * source: :class:`models.Source` entity (set in :meth:`send_webmentions`)
+ * entity (models.Webmentions): subclass instance (set in :meth:`lease_entity`)
+ * source (models.Source): entity (set in :meth:`send_webmentions`)
"""
# request deadline (10m) plus some padding
LEASE_LENGTH = datetime.timedelta(minutes=12)
@@ -531,10 +531,10 @@ def source_url(self, target_url):
Subclasses must implement.
Args:
- target_url: string
+ target_url (str)
Returns:
- string
+ str
"""
raise NotImplementedError()
@@ -636,13 +636,14 @@ def do_send_webmentions(self):
def lease(self, key):
"""Attempts to acquire and lease the :class:`models.Webmentions` entity.
- Also loads and sets `g.source`, and returns False if the source doesn't
+ Also loads and sets ``g.source``, and returns False if the source doesn't
exist or is disabled.
Args:
- key: :class:`ndb.Key`
+ key (ndb.Key):
- Returns: True on success, False or None otherwise
+ Returns:
+ bool: True on success, False or None otherwise
"""
self.entity = key.get()
@@ -700,7 +701,7 @@ def release(self, new_status):
"""Attempts to unlease the :class:`models.Webmentions` entity.
Args:
- new_status: string
+ new_status (str):
"""
existing = self.entity.key.get()
# TODO: send_webmentions() edits self.entity.unsent etc, so if it fails and hits here, those values may be lost mid flight!
@@ -719,8 +720,8 @@ def record_source_webmention(self, endpoint, target):
"""Sets this source's last_webmention_sent and maybe webmention_endpoint.
Args:
- endpoint: str, URL
- target: str, URL
+ endpoint (str): URL
+ target (str): URL
"""
g.source = g.source.key.get()
logger.info('Setting last_webmention_sent')
@@ -743,7 +744,7 @@ class PropagateResponse(SendWebmentions):
Request parameters:
- * response_key: string key of :class:`models.Response` entity
+ * response_key (str): key of :class:`models.Response` entity
"""
def dispatch_request(self):
@@ -803,7 +804,7 @@ class PropagateBlogPost(SendWebmentions):
Request parameters:
- * key: string key of :class:`models.BlogPost` entity
+ * key (str): key of :class:`models.BlogPost` entity
"""
def dispatch_request(self):
diff --git a/tests/test_blog_webmention.py b/tests/test_blog_webmention.py
index 0c788f54..95606b44 100644
--- a/tests/test_blog_webmention.py
+++ b/tests/test_blog_webmention.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for blog_webmention.py.
-"""
+"""Unit tests for blog_webmention.py."""
import urllib.request, urllib.parse, urllib.error
from mox3 import mox
diff --git a/tests/test_blogger.py b/tests/test_blogger.py
index 8a22e233..b24e150f 100644
--- a/tests/test_blogger.py
+++ b/tests/test_blogger.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for blogger.py.
-"""
+"""Unit tests for blogger.py."""
import urllib.request, urllib.parse, urllib.error
from flask import get_flashed_messages
diff --git a/tests/test_handlers.py b/tests/test_handlers.py
index f93311dd..19b70f04 100644
--- a/tests/test_handlers.py
+++ b/tests/test_handlers.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for handlers.py.
-"""
+"""Unit tests for handlers.py."""
import html
import io
import urllib.request, urllib.error, urllib.parse
diff --git a/tests/test_medium.py b/tests/test_medium.py
index 28fd703f..6ad9125e 100644
--- a/tests/test_medium.py
+++ b/tests/test_medium.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for medium.py.
-"""
+"""Unit tests for medium.py."""
import urllib.request, urllib.parse, urllib.error
from flask import get_flashed_messages
diff --git a/tests/test_models.py b/tests/test_models.py
index b3ec89bc..6ee1d9a8 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for models.py.
-"""
+"""Unit tests for models.py."""
from datetime import datetime, timedelta, timezone
from unittest import skip
import copy
diff --git a/tests/test_original_post_discovery.py b/tests/test_original_post_discovery.py
index 8c26b805..f3956a29 100644
--- a/tests/test_original_post_discovery.py
+++ b/tests/test_original_post_discovery.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for original_post_discovery.py
-"""
+"""Unit tests for original_post_discovery.py"""
from datetime import datetime, timezone
from string import hexdigits
diff --git a/tests/test_pages.py b/tests/test_pages.py
index 91281413..d7bcb21e 100644
--- a/tests/test_pages.py
+++ b/tests/test_pages.py
@@ -1,4 +1,3 @@
-# coding=utf-8
"""Unit tests for pages.py."""
from datetime import datetime, timedelta, timezone
import urllib.request, urllib.parse, urllib.error
diff --git a/tests/test_publish.py b/tests/test_publish.py
index 7aa8a4c8..dcba4955 100644
--- a/tests/test_publish.py
+++ b/tests/test_publish.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for publish.py.
-"""
+"""Unit tests for publish.py."""
import html
import socket
import urllib.request, urllib.parse, urllib.error
diff --git a/tests/test_superfeedr.py b/tests/test_superfeedr.py
index 7c8df426..5373a09c 100644
--- a/tests/test_superfeedr.py
+++ b/tests/test_superfeedr.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for superfeedr.py.
-"""
+"""Unit tests for superfeedr.py."""
from flask import Flask
from google.cloud.ndb.key import _MAX_KEYPART_BYTES
from google.cloud.ndb._datastore_types import _MAX_STRING_LENGTH
diff --git a/tests/test_tasks.py b/tests/test_tasks.py
index 8edd5105..b0e9cfc0 100644
--- a/tests/test_tasks.py
+++ b/tests/test_tasks.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for tasks.py.
-"""
+"""Unit tests for tasks.py."""
import copy
import datetime
import http.client
diff --git a/tests/test_tumblr.py b/tests/test_tumblr.py
index ff54dd65..52a859db 100644
--- a/tests/test_tumblr.py
+++ b/tests/test_tumblr.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for tumblr.py.
-"""
+"""Unit tests for tumblr.py."""
from flask import get_flashed_messages
from mox3 import mox
from oauth_dropins.tumblr import TumblrAuth
diff --git a/tests/test_util.py b/tests/test_util.py
index e284ec22..a29c5499 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -1,4 +1,3 @@
-# coding=utf-8
"""Unit tests for util.py."""
from datetime import datetime, timezone
import time
diff --git a/tests/test_wordpress_rest.py b/tests/test_wordpress_rest.py
index 2c3ce068..22de183f 100644
--- a/tests/test_wordpress_rest.py
+++ b/tests/test_wordpress_rest.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit tests for wordpress_rest.py.
-"""
+"""Unit tests for wordpress_rest.py."""
import urllib.request, urllib.parse, urllib.error
from flask import get_flashed_messages
diff --git a/tests/testutil.py b/tests/testutil.py
index 07930ae5..f3049783 100644
--- a/tests/testutil.py
+++ b/tests/testutil.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Unit test utilities.
-"""
+"""Unit test utilities."""
import copy
from datetime import datetime, timedelta, timezone
import logging
diff --git a/tumblr.py b/tumblr.py
index d2a151df..3cfb34ee 100644
--- a/tumblr.py
+++ b/tumblr.py
@@ -1,27 +1,30 @@
"""Tumblr + Disqus blog webmention implementation.
To use, go to your Tumblr dashboard, click Customize, Edit HTML, then put this
-in the head section:
+in the head section::
-
+
-http://disqus.com/api/docs/
-http://disqus.com/api/docs/posts/create/
-http://help.disqus.com/customer/portal/articles/466253-what-html-tags-are-allowed-within-comments-
-create returns id, can lookup by id w/getContext?
+Misc notes and background:
-guest post (w/arbitrary author, url):
-http://spirytoos.blogspot.com/2013/12/not-so-easy-posting-as-guest-via-disqus.html
-http://stackoverflow.com/questions/15416688/disqus-api-create-comment-as-guest
-http://jonathonhill.net/2013-07-11/disqus-guest-posting-via-api/
+* http://disqus.com/api/docs/
+* http://disqus.com/api/docs/posts/create/
+* http://help.disqus.com/customer/portal/articles/466253-what-html-tags-are-allowed-within-comments-
-can send url and not look up disqus thread id!
-http://stackoverflow.com/questions/4549282/disqus-api-adding-comment
-https://disqus.com/api/docs/forums/listThreads/
+Guest post (w/arbitrary author, url):
-test command line:
-curl localhost:8080/webmention/tumblr \
- -d 'source=http://localhost/response.html&target=http://snarfed.tumblr.com/post/60428995188/glen-canyon-http-t-co-fzc4ehiydp?foo=bar#baz'
+* http://spirytoos.blogspot.com/2013/12/not-so-easy-posting-as-guest-via-disqus.html
+* http://stackoverflow.com/questions/15416688/disqus-api-create-comment-as-guest
+* http://jonathonhill.net/2013-07-11/disqus-guest-posting-via-api/
+
+Can send url and not look up disqus thread id!
+
+* http://stackoverflow.com/questions/4549282/disqus-api-adding-comment
+* https://disqus.com/api/docs/forums/listThreads/
+
+Test command line::
+
+ curl localhost:8080/webmention/tumblr -d 'source=http://localhost/response.html&target=http://snarfed.tumblr.com/post/60428995188/glen-canyon-http-t-co-fzc4ehiydp?foo=bar#baz'
"""
import collections
import logging
@@ -89,8 +92,8 @@ def new(auth_entity=None, blog_name=None, **kwargs):
"""Creates and returns a :class:`Tumblr` for the logged in user.
Args:
- auth_entity: :class:`oauth_dropins.tumblr.TumblrAuth`
- blog_name: which blog. optional. passed to urls_and_domains.
+ auth_entity (oauth_dropins.tumblr.TumblrAuth):
+ blog_name (str): which blog, optional, passed to :meth:`urls_and_domains`
"""
urls, domains = Tumblr.urls_and_domains(auth_entity, blog_name=blog_name)
if not urls or not domains:
@@ -112,12 +115,12 @@ def urls_and_domains(auth_entity, blog_name=None):
"""Returns this blog's URL and domain.
Args:
- auth_entity: :class:`oauth_dropins.tumblr.TumblrAuth`
- blog_name: which blog. optional. matches the 'name' field for one of the
- blogs in auth_entity.user_json['user']['blogs'].
+ auth_entity (oauth_dropins.tumblr.TumblrAuth)
+ blog_name (str): which blog, optional, matches the ``name`` field for one
+ of the blogs in ``auth_entity.user_json['user']['blogs']``
Returns:
- ([string url], [string domain])
+ ([str url], [str domain]):
"""
for blog in json_loads(auth_entity.user_json).get('user', {}).get('blogs', []):
if ((blog_name and blog_name == blog.get('name')) or
@@ -160,13 +163,13 @@ def create_comment(self, post_url, author_name, author_url, content):
Must be implemented by subclasses.
Args:
- post_url: string
- author_name: string
- author_url: string
- content: string
+ post_url (str)
+ author_name (str)
+ author_url (str)
+ content (str)
Returns:
- JSON response dict with 'id' and other fields
+ dict: JSON response with ``id`` and other fields
"""
if not self.disqus_shortname:
resp = util.requests_get(post_url)
@@ -207,13 +210,13 @@ def disqus_call(method, url, params, **kwargs):
"""Makes a Disqus API call.
Args:
- method: requests function to use, e.g. requests.get
- url: string
- params: query parameters
+ method (callable): requests function to use, e.g. :func:`requests.get`
+ url (str)
+ params (dict): query parameters
kwargs: passed through to method
Returns:
- dict, JSON response
+ dict: JSON response
"""
logger.info(f"Calling Disqus {url.split('/')[-2:]} with {params}")
params.update({
diff --git a/twitter.py b/twitter.py
index fe62077a..62ffeb82 100644
--- a/twitter.py
+++ b/twitter.py
@@ -1,9 +1,6 @@
"""Twitter source code and datastore model classes.
-Twitter's rate limiting window is currently 15m. A normal poll with nothing new
-hits /statuses/user_timeline and /search/tweets once each. Both allow 180 calls
-per window before they're rate limited.
-https://dev.twitter.com/docs/rate-limiting/1.1/limits
+The Twitter API is dead, and so is this code.
"""
import logging
@@ -48,7 +45,7 @@ def new(auth_entity=None, **kwargs):
"""Creates and returns a :class:`Twitter` entity.
Args:
- auth_entity: :class:`oauth_dropins.twitter.TwitterAuth`
+ auth_entity (oauth_dropins.twitter.TwitterAuth)
kwargs: property values
"""
assert 'username' not in kwargs
@@ -120,9 +117,9 @@ def get_like(self, activity_user_id, activity_id, like_user_id, **kwargs):
first, if we have one, and only re-scrape HTML as a fallback.
Args:
- activity_user_id: string id of the user who posted the original activity
- activity_id: string activity id
- like_user_id: string id of the user who liked the activity
+ activity_user_id (str): id of the user who posted the original activity
+ activity_id (str): activity id
+ like_user_id (str): id of the user who liked the activity
kwargs: passed to :meth:`granary.source.Source.get_comment`
"""
id = self.gr_source.tag_uri(f'{activity_id}_favorited_by_{like_user_id}')
@@ -133,14 +130,14 @@ def get_like(self, activity_user_id, activity_id, like_user_id, **kwargs):
def is_private(self):
"""Returns True if this Twitter account is protected.
- https://dev.twitter.com/rest/reference/get/users/show#highlighter_25173
- https://support.twitter.com/articles/14016
- https://support.twitter.com/articles/20169886
+ * https://dev.twitter.com/rest/reference/get/users/show#highlighter_25173
+ * https://support.twitter.com/articles/14016
+ * https://support.twitter.com/articles/20169886
"""
return json_loads(self.auth_entity.get().user_json).get('protected')
def canonicalize_url(self, url, activity=None, **kwargs):
- """Normalize /statuses/ to /status/.
+ """Normalize ``/statuses/`` to ``/status/``.
https://github.com/snarfed/bridgy/issues/618
"""
@@ -155,7 +152,7 @@ def start_oauth_flow(self, feature):
"""Redirects to Twitter's OAuth endpoint to start the OAuth flow.
Args:
- feature: 'listen' or 'publish'
+ feature: ``listen`` or ``publish``
"""
features = feature.split(',') if feature else []
for feature in features:
@@ -188,9 +185,9 @@ def finish(self, auth_entity, state=None):
class Start(oauth_twitter.Start, Auth):
- """Custom OAuth start handler so we can use access_type=read for state=listen.
+ """Custom OAuth start handler that uses ``access_type=read`` for ``state=listen``.
- Tweepy converts access_type to x_auth_access_type for Twitter's
+ Tweepy converts access_type to ``x_auth_access_type`` for Twitter's
oauth/request_token endpoint. Details:
https://dev.twitter.com/docs/api/1/post/oauth/request_token
"""
diff --git a/util.py b/util.py
index 0bfa89e8..811e4d76 100644
--- a/util.py
+++ b/util.py
@@ -1,6 +1,4 @@
-# coding=utf-8
-"""Misc utility constants and classes.
-"""
+"""Misc utility constants and classes."""
import binascii
import collections
import copy
@@ -108,7 +106,7 @@
def add_poll_task(source, now=False):
"""Adds a poll task for the given source entity.
- Pass now=True to insert a poll-now task.
+ Pass ``now=True`` to insert a ``poll-now`` task.
"""
if now:
queue = 'poll-now'
@@ -145,9 +143,9 @@ def add_task(queue, eta_seconds=None, **kwargs):
"""Adds a Cloud Tasks task for the given entity.
Args:
- queue: string, queue name
- entity: Source or Webmentions instance
- eta_seconds: integer, optional
+ queue (str): queue name
+ entity (Source or Webmentions)
+ eta_seconds (int): optional
kwargs: added to task's POST body (form-encoded)
"""
params = {
@@ -197,10 +195,9 @@ def redirect(path, code=302, logins=None):
Specifically, raises :class:`werkzeug.routing.RequestRedirect`.
Args:
- url: str
- code: int, HTTP status code
- logins: optional, list of :class:`util.Login` to be set in a Set-Cookie HTTP
- header
+ url (str)
+ code (int): HTTP status code
+ logins (list of util.Login): optional, set in a ``Set-Cookie`` HTTP header
"""
logger.info(f'Redirecting to {path}')
rr = Redirect(host_url(path))
@@ -212,11 +209,11 @@ def redirect(path, code=302, logins=None):
def webmention_endpoint_cache_key(url):
"""Returns cache key for a cached webmention endpoint for a given URL.
- Example: 'https snarfed.org /'
+ Example: ``https snarfed.org /``
- If the URL is the home page, ie path is / , the key includes a / at the end,
- so that we cache webmention endpoints for home pages separate from other pages.
- https://github.com/snarfed/bridgy/issues/701
+ If the URL is the home page, ie path is ``/`` , the key includes a ``/`` at
+ the end, so that we cache webmention endpoints for home pages separate from
+ other pages. https://github.com/snarfed/bridgy/issues/701
"""
domain = util.domain_from_link(url)
scheme = urllib.parse.urlparse(url).scheme
@@ -234,7 +231,7 @@ def report_error(msg, **kwargs):
https://cloud.google.com/error-reporting/docs/reference/libraries#client-libraries-install-python
Args:
- msg: string
+ msg (str)
"""
try:
error_reporting_client.report(msg, **kwargs)
@@ -246,10 +243,10 @@ def report_error(msg, **kwargs):
def requests_get(url, **kwargs):
"""Wraps :func:`requests.get` with extra semantics and our user agent.
- If a server tells us a response will be too big (based on Content-Length), we
- hijack the response and return 599 and an error response body instead. We pass
- stream=True to :func:`requests.get` so that it doesn't fetch the response body
- until we access :attr:`requests.Response.content` (or
+ If a server tells us a response will be too big (based on ``Content-Length``),
+ we hijack the response and return 599 and an error response body instead. We
+ pass ``stream=True`` to :func:`requests.get` so that it doesn't fetch the
+ response body until we access :attr:`requests.Response.content` (or
:attr:`requests.Response.text`).
http://docs.python-requests.org/en/latest/user/advanced/#body-content-workflow
@@ -296,14 +293,15 @@ def get_webmention_target(url, resolve=True, replace_test_domains=True):
tuple will be True! TODO: check callers and reconsider this.
Args:
- url: string
- resolve: whether to follow redirects
- replace_test_domains: whether to replace test user domains with localhost
+ url (str)
+ resolve (bool): whether to follow redirects
+ replace_test_domains (bool): whether to replace test user domains with
+ localhost
Returns:
- (string url, string pretty domain, boolean) tuple. The boolean is
- True if we should send a webmention, False otherwise, e.g. if it's a bad
- URL, not text/html, or in the blocklist.
+ (str url, str pretty domain, bool should send) tuple: target info.
+ should send is True if we should send a webmention, False otherwise, eg if
+ it's a bad URL, not ``text/html`` or in the blocklist.
"""
url = util.clean_url(url)
try:
@@ -333,7 +331,7 @@ def get_webmention_target(url, resolve=True, replace_test_domains=True):
def in_webmention_blocklist(domain):
- """Returns True if the domain or its root domain is in BLOCKLIST."""
+ """Returns True if the domain or its root domain is in ``BLOCKLIST``."""
domain = domain.lower()
return (util.domain_or_parent_in(domain, BLOCKLIST) or
(not appengine_info.LOCAL_SERVER and domain in LOCAL_HOSTS))
@@ -351,10 +349,10 @@ def prune_activity(activity, source):
to field here.
Args:
- activity: ActivityStreams activity dict
+ activity (dict): ActivityStreams activity
Returns:
- pruned activity dict
+ dict: pruned activity
"""
keep = ['id', 'url', 'content', 'fb_id', 'fb_object_id', 'fb_object_type']
if not as1.is_public(activity):
@@ -375,10 +373,10 @@ def prune_response(response):
"""Returns a response object dict with a few fields removed.
Args:
- response: ActivityStreams response object
+ response (dict): ActivityStreams response object
Returns:
- pruned response object
+ dict: pruned response object
"""
obj = response.get('object')
if obj:
@@ -389,13 +387,13 @@ def prune_response(response):
def replace_test_domains_with_localhost(url):
- """Replace domains in LOCALHOST_TEST_DOMAINS with localhost for local testing.
+ """Replace domains in ``LOCALHOST_TEST_DOMAINS` with localhost for testing.
Args:
- url: a string
+ url (str)
Returns:
- a string with certain well-known domains replaced by localhost
+ str: url with certain well-known domains replaced by ``localhost``
"""
if url and appengine_info.LOCAL_SERVER:
for test_domain, local_domain in LOCALHOST_TEST_DOMAINS:
@@ -412,16 +410,17 @@ def host_url(path_query=None):
def load_source(error_fn=None):
- """Loads a source from the `source_key` or `key` query parameter.
+ """Loads a source from the ``source_key`` or ``key`` query parameter.
Expects the query parameter value to be a URL-safe key. Returns HTTP 400 if
neither parameter is provided or the source doesn't exist.
Args:
- error_fn: callable to be called with errors. Takes one parameter, the string
- error message.
+ error_fn (callable): to be called with errors. Takes one parameter, the
+ string error message.
- Returns: :class:`models.Source`
+ Returns:
+ models.Source:
"""
logger.debug(f'Params: {list(request.values.items())}')
if error_fn is None:
@@ -443,18 +442,19 @@ def load_source(error_fn=None):
def maybe_add_or_delete_source(source_cls, auth_entity, state, **kwargs):
- """Adds or deletes a source if auth_entity is not None.
+ """Adds or deletes a source if ``auth_entity`` is not None.
Used in each source's oauth-dropins :meth:`Callback.finish()` and
:meth:`Callback.get()` methods, respectively.
Args:
- source_cls: source class, e.g. :class:`instagram.Instagram`
- auth_entity: ouath-dropins auth entity
- state: string, OAuth callback state parameter. a JSON serialized dict
- with operation, feature, and an optional callback URL. For deletes,
- it will also include the source key
- kwargs: passed through to the source_cls constructor
+ source_cls (granary.source.Source subclass): eg
+ :class:`granary.instagram.Instagram`
+ auth_entity (oauth_dropins.models.BaseAuth subclass instance): auth entity
+ state (str): OAuth callback ``state`` parameter. a JSON serialized dict with
+ ``operation``, ``feature``, and an optional callback URL. For deletes, it
+ will also include the source key
+ kwargs: passed through to the ``source_cls`` constructor
Returns:
source entity if it was created or updated, otherwise None
@@ -548,10 +548,12 @@ def construct_state_param_for_add(state=None, **kwargs):
"""Construct the state parameter if one isn't explicitly passed in.
The following keys are common:
- - operation: 'add' or 'delete'
- - feature: 'listen', 'publish', or 'webmention'
- - callback: an optional external callback, that we will redirect to at the end of the authorization handshake
- - source: the source key, only applicable to deletes
+
+ * ``operation``: ``add`` or ``delete``
+ * ``feature``: ``listen``, ``publish``, or ``webmention``
+ * ``callback``: an optional external callback URL that we will redirect to at
+ the end of the authorization handshake
+ * ``source``: the source entity key, only applicable to deletes
"""
state_obj = util.decode_oauth_state(state)
if not state_obj:
@@ -571,7 +573,7 @@ def get_logins():
The logins cookie is set in :meth:`redirect` and :class:`Redirect`.
Returns:
- list of :class:`Login` objects
+ list of Login:
"""
logins_str = request.cookies.get('logins')
if not logins_str:
@@ -593,11 +595,11 @@ def get_logins():
def preprocess_source(source):
"""Prepares a source entity for rendering in the source.html template.
- - convert image URLs to https if we're serving over SSL
- - set 'website_links' attr to list of pretty HTML links to domain_urls
+ * converts image URLs to https if we're serving over SSL
+ * sets ``website_links`` attr to list of pretty HTML links to domain_urls
Args:
- source: :class:`models.Source` entity
+ source (models.Source): entity
"""
if source.picture:
source.picture = util.update_scheme(source.picture, request)
@@ -611,9 +613,9 @@ def oauth_starter(oauth_start_view, **kwargs):
"""Returns an oauth-dropins start view that injects the state param.
Args:
- oauth_start_view: oauth-dropins :class:`Start` to use,
- e.g. :class:`oauth_dropins.twitter.Start`.
- kwargs: passed to :meth:`construct_state_param_for_add()`
+ oauth_start_view: (oauth_dropins.views.Start subclass instance):
+ eg :class:`oauth_dropins.twitter.Start`.
+ kwargs: passed to :meth:`construct_state_param_for_add`
"""
class Start(oauth_start_view):
def redirect_url(self, state=None, **ru_kwargs):
@@ -624,12 +626,12 @@ def redirect_url(self, state=None, **ru_kwargs):
def unwrap_t_umblr_com(url):
- """If url is a t.umblr.com short link, extract its destination URL.
+ """If url is a ``t.umblr.com`` short link, extract its destination URL.
- Otherwise, return url unchanged.
+ Otherwise, return ``url`` unchanged.
- Not in tumblr.py since models imports superfeedr, so it would be a circular
- import.
+ Not in ``tumblr.py`` since ``models`` imports ``superfeedr``, so it would be a
+ circular import.
Background: https://github.com/snarfed/bridgy/issues/609
"""
diff --git a/webmention.py b/webmention.py
index 109858fc..2e00fad0 100644
--- a/webmention.py
+++ b/webmention.py
@@ -38,10 +38,8 @@ class Webmention(View):
"""Webmention base view.
Attributes:
-
- * source: the :class:`models.Source` for this webmention
- * entity: the :class:`models.Publish` or :class:`models.Webmention` entity for
- this webmention
+ * source (models.Source): for this webmention
+ * entity (models.Publish or models.Webmention) entity for this webmention
"""
source = None
entity = None
@@ -49,19 +47,19 @@ class Webmention(View):
def fetch_mf2(self, url, id=None, require_mf2=True, raise_errors=False):
"""Fetches a URL and extracts its mf2 data.
- Side effects: sets :attr:`entity`\ .html on success, calls :attr:`error()`
- on errors.
+ Side effects: sets ``entity.html`` on success, calls :attr:`error` on
+ errors.
Args:
- url: string
- id: string, optional id of specific element to extract and parse. defaults
+ url: str
+ id: str, optional id of specific element to extract and parse. defaults
to the whole page.
require_mf2: boolean, whether to return error if no mf2 are found
raise_errors: boolean, whether to let error exceptions propagate up or
handle them
Returns:
- (:class:`requests.Response`, mf2 data dict) on success, None on failure
+ (requests.Response, mf2 data dict) tuple:
"""
try:
resp = util.requests_get(url)
@@ -122,13 +120,13 @@ def error(self, error, html=None, status=400, data=None, log_exception=False,
"""Handle an error. May be overridden by subclasses.
Args:
- error: string human-readable error message
- html: string HTML human-readable error message
- status: int HTTP response status code
- data: mf2 data dict parsed from source page
- log_exception: boolean, whether to include a stack trace in the log msg
- report: boolean, whether to report to StackDriver Error Reporting
- extra_json: dict to be merged into the JSON response body
+ error (str): human-readable error message
+ html (str): HTML human-readable error message
+ status (int): HTTP response status code
+ data (dict): mf2 data parsed from source page
+ log_exception (bool): whether to include a stack trace in the log msg
+ report (bool): whether to report to StackDriver Error Reporting
+ extra_json (dict): to be merged into the JSON response body
"""
if self.entity and self.entity.status == 'new':
self.entity.status = 'failed'
diff --git a/wordpress_rest.py b/wordpress_rest.py
index 3dcc9852..f97da37b 100644
--- a/wordpress_rest.py
+++ b/wordpress_rest.py
@@ -1,22 +1,25 @@
"""WordPress REST API (including WordPress.com) hosted blog implementation.
To use, go to your WordPress.com blog's admin console, then go to Appearance,
-Widgets, add a Text widget, and put this in its text section:
+Widgets, add a Text widget, and put this in its text section::
-
+
-(not this, it breaks :/)
-
+Not this, it breaks::
+
+
https://developer.wordpress.com/docs/api/
-create returns id, can lookup by id
-test command line:
-curl localhost:8080/webmention/wordpress \
- -d 'source=http://localhost/response.html&target=http://ryandc.wordpress.com/2013/03/24/mac-os-x/'
+Create returns id, can lookup by id.
+
+Test command line::
+
+ curl localhost:8080/webmention/wordpress -d 'source=http://localhost/response.html&target=http://ryandc.wordpress.com/2013/03/24/mac-os-x/'
+
+Making an API call with an access token from the command line::
-making an API call with an access token from the command line:
-curl -H 'Authorization: Bearer [TOKEN]' URL...
+ curl -H 'Authorization: Bearer [TOKEN]' URL...
"""
import collections
import logging
@@ -68,7 +71,7 @@ def new(auth_entity=None, **kwargs):
"""Creates and returns a WordPress for the logged in user.
Args:
- auth_entity: :class:`oauth_dropins.wordpress_rest.WordPressAuth`
+ auth_entity (oauth_dropins.wordpress_rest.WordPressAuth):
"""
site_info = WordPress.get_site_info(auth_entity)
if site_info is None:
@@ -98,26 +101,26 @@ def urls_and_domains(self, auth_entity):
auth_entity: unused
Returns:
- ([string url], [string domain])
+ ([str url], [str domain]) tuple:
"""
return [self.url], [self.key_id()]
def create_comment(self, post_url, author_name, author_url, content):
"""Creates a new comment in the source silo.
- If the last part of the post URL is numeric, e.g. http://site/post/123999,
- it's used as the post id. Otherwise, we extract the last part of
- the path as the slug, e.g. http: / / site / post / the-slug,
- and look up the post id via the API.
+ If the last part of the post URL is numeric, e.g.
+ ``http://site/post/123999``\, it's used as the post id. Otherwise, we
+ extract the last part of the path as the slug, e.g.
+ ``http://site/post/the-slug``\, and look up the post id via the API.
Args:
- post_url: string
- author_name: string
- author_url: string
- content: string
+ post_url (str)
+ author_name (str)
+ author_url (str)
+ content (str)
Returns:
- JSON response dict with 'id' and other fields
+ dict: JSON response with ``id`` and other fields
"""
auth_entity = self.auth_entity.get()
logger.info(f'Determining WordPress.com post id for {post_url}')
@@ -163,10 +166,10 @@ def get_site_info(cls, auth_entity):
"""Fetches the site info from the API.
Args:
- auth_entity: :class:`oauth_dropins.wordpress_rest.WordPressAuth`
+ auth_entity (oauth_dropins.wordpress_rest.WordPressAuth)
Returns:
- site info dict, or None if API calls are disabled for this blog
+ dict: site info, or None if API calls are disabled for this blog
"""
try:
return cls.urlopen(auth_entity, API_SITE_URL % auth_entity.blog_id)