Overhaul of package underneath for python3 only

This commit is contained in:
Nabil Freij 2019-08-19 22:56:57 +01:00
parent 4e9041f0bb
commit 2e3dad57ef
31 changed files with 1591 additions and 1104 deletions

View file

@ -12,20 +12,6 @@ docs-install: &docs-install
version: 2
jobs:
egg-info-27:
docker:
- image: circleci/python:2.7
steps:
- checkout
- run: python setup.py egg_info
egg-info-35:
docker:
- image: circleci/python:3.5
steps:
- checkout
- run: python setup.py egg_info
egg-info-36:
docker:
- image: circleci/python:3.6
@ -40,6 +26,31 @@ jobs:
- checkout
- run: python setup.py egg_info
twine-check:
docker:
- image: circleci/python:3.7
steps:
- checkout
- run: python setup.py sdist
- run: python -m pip install -U --user --force-reinstall twine
- run: python -m twine check dist/*
pip-install:
docker:
- image: continuumio/miniconda3
steps:
- checkout
- run: *apt-install
- run:
name: Update pip
command: pip install -U pip
- run:
name: Install Ablog
command: |
pip install --progress-bar off .[all]
pip install -e .[all]
python setup.py develop
html-docs:
docker:
- image: continuumio/miniconda3
@ -48,6 +59,7 @@ jobs:
- checkout
- run: *apt-install
- run: *docs-install
- run: pip install -e .[all]
- run: python setup.py build_sphinx
- store_artifacts:
path: build/sphinx/html/
@ -61,11 +73,14 @@ workflows:
egg-info:
jobs:
- egg-info-27
- egg-info-35
- egg-info-36
- egg-info-37
misc:
jobs:
- twine-check
- pip-install
documentation:
jobs:
- html-docs

26
.github/workflows/release.yml vendored Normal file
View file

@ -0,0 +1,26 @@
on: release
name: Release to PyPi
jobs:
tag-filter:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: tag-filter
uses: actions/bin/filter@master
with:
args: tag
- name: check
uses: ross/python-actions/setup-py/3.7@master
with:
args: check
- name: sdist
uses: ross/python-actions/setup-py/3.7@master
with:
args: sdist
- name: upload
uses: ross/python-actions/twine@master
env:
TWINE_PASSWORD: ${{ secrets.TWINE_PASSWORD }}
TWINE_USERNAME: ${{ secrets.TWINE_USERNAME }}
with:
args: upload ./dist/sunpy-sphinx-theme-*.tar.gz

221
.gitignore vendored
View file

@ -1,23 +1,228 @@
### Python: https://raw.githubusercontent.com/github/gitignore/master/Python.gitignore
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
MANIFEST
docs/manual/.ipynb_checkpoints/*
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
pip-wheel-metadata/
build/
develop-eggs/
dist/
demo/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
junit/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_*
docs/.*
docs/tmp/
docs/api/
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
### https://raw.github.com/github/gitignore/master/Global/OSX.gitignore
.DS_Store
.AppleDouble
.LSOverride
# Icon must ends with two \r.
Icon
# Thumbnails
._*
# Files that might appear on external disk
.Spotlight-V100
.Trashes
### Linux: https://raw.githubusercontent.com/github/gitignore/master/Global/Linux.gitignore
*~
.vscode
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### MacOS: https://raw.githubusercontent.com/github/gitignore/master/Global/macOS.gitignore
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Windows: https://raw.githubusercontent.com/github/gitignore/master/Global/Windows.gitignore
# Windows thumbnail cache files
Thumbs.db
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
### VScode: https://raw.githubusercontent.com/github/gitignore/master/Global/VisualStudioCode.gitignore
.vscode/*
### Extra Python Items and SunPy Specific
.hypothesis
.pytest_cache
sunpydata.sqlite
sunpydata.sqlite-journal
sunpy/_compiler.c
sunpy/cython_version.py
docs/_build
docs/generated
docs/api/
docs/whatsnew/latest_changelog.txt
examples/**/*.asdf
# This is incase you run the figure tests
figure_test_images*
tags
### Pycharm(?)
.idea
# Release script
.github_cache
docs/_build/
docs/.doctrees/
docs/_website/
docs/_latex/
test/

View file

@ -11,18 +11,11 @@ sudo: false
env:
matrix:
- PYTHON_VERSION=2.7 SPHINX_VERSION=1.6
- PYTHON_VERSION=2.7 SPHINX_VERSION=1.8
- PYTHON_VERSION=3.6 SPHINX_VERSION=2.0
- PYTHON_VERSION=3.6 SPHINX_VERSION=2.1
- PYTHON_VERSION=3.5 SPHINX_VERSION=1.6
- PYTHON_VERSION=3.5 SPHINX_VERSION=1.8
- PYTHON_VERSION=3.6 SPHINX_VERSION=1.6
- PYTHON_VERSION=3.6 SPHINX_VERSION=1.8
- PYTHON_VERSION=3.7 SPHINX_VERSION=1.6
- PYTHON_VERSION=3.7 SPHINX_VERSION=1.7
- PYTHON_VERSION=3.7 SPHINX_VERSION=1.8
- PYTHON_VERSION=3.7 SPHINX_VERSION=2.0
- PYTHON_VERSION=3.7 SPHINX_VERSION=2.1
global:
- LOCALE=default
@ -33,7 +26,7 @@ env:
install:
- git clone git://github.com/astropy/ci-helpers.git
- source ci-helpers/travis/setup_conda.sh
- pip install -e .
- pip install -e .[all]
script:
- make tests

View file

@ -1,6 +1,8 @@
include README.rst
include LICENSE.rst
include setup.py
include setup.cfg
include pyproject.toml
include ablog/*.py
include ablog/templates/*.html
include ablog/locale/sphinx.pot

View file

@ -2,7 +2,7 @@
demo:
rm -rf demo
printf "demo\nABlog\nABlog Team\nhttp://ablog.readthedocs.org" | ablog start
printf "demo\nABlog\nABlog Team\nhttps://ablog.readthedocs.org" | ablog start
rebuild:
cd docs; watchmedo shell-command --patterns='*.rst' --command='ablog build' --recursive
@ -20,9 +20,9 @@ test3:
cd docs; ablog build -T -b pickle
test4:
mkdir -p test; cd test; printf "\nABlog\nABlog Team\nhttp://ablog.readthedocs.org" | ablog start; ablog build
mkdir -p test; cd test; printf "\nABlog\nABlog Team\nhttps://ablog.readthedocs.org" | ablog start; ablog build
test5:
mkdir -p test; cd test; printf "ablog\nABlog\nABlog Team\nhttp://ablog.readthedocs.org" | ablog start; cd ablog; ablog build
mkdir -p test; cd test; printf "ablog\nABlog\nABlog Team\nhttps://ablog.readthedocs.org" | ablog start; cd ablog; ablog build
tests: test test1 test2 test3 test4 test5

View file

@ -12,7 +12,8 @@ ABlog for Sphinx
Note
----
Please note that is an official new home of `Ahmet Bakan's Ablog Sphinx extension <https://github.com/abakan/ablog/>`_. This version is maintined with the aim to keep it working for SunPy's website and thus new features are unlikely.
Please note that is an official new home of `Ahmet Bakan's Ablog Sphinx extension <https://github.com/abakan/ablog/>`__.
This version is maintined with the aim to keep it working for SunPy's website and thus new features are unlikely.
ABlog
-----
@ -27,15 +28,13 @@ ABlog is a Sphinx extension that converts any documentation or personal website
* `Easy GitHub Pages deploys`_
* `Jupiter Notebook Support for blog posts`_
.. _Atom feeds: http://ablog.readthedocs.org/blog/atom.xml
.. _Archive pages: http://ablog.readthedocs.org/blog/
.. _Blog sidebars: http://ablog.readthedocs.org/manual/ablog-configuration-options/#sidebars
.. _Disqus integration: http://ablog.readthedocs.org/manual/ablog-configuration-options/#disqus-integration
.. _Font-Awesome integration: http://ablog.readthedocs.org/manual/ablog-configuration-options/#fa
.. _Easy GitHub Pages deploys: http://ablog.readthedocs.org/manual/deploy-to-github-pages/
.. _Jupiter Notebook Support for blog posts: http://ablog.readthedocs.org/manual/notebook_support/
.. _installation:
.. _Atom feeds: https://ablog.readthedocs.org/blog/atom.xml
.. _Archive pages: https://ablog.readthedocs.org/blog/
.. _Blog sidebars: https://ablog.readthedocs.org/manual/ablog-configuration-options/#sidebars
.. _Disqus integration: https://ablog.readthedocs.org/manual/ablog-configuration-options/#disqus-integration
.. _Font-Awesome integration: https://ablog.readthedocs.org/manual/ablog-configuration-options/#fa
.. _Easy GitHub Pages deploys: https://ablog.readthedocs.org/manual/deploy-to-github-pages/
.. _Jupiter Notebook Support for blog posts: https://ablog.readthedocs.org/manual/notebook_support/
Installation
------------
@ -49,13 +48,13 @@ or anaconda_::
conda config --add channels conda-forge
conda install ablog
This will also install `Sphinx <http://sphinx-doc.org/>`_, Alabaster_, Werkzeug_, and Invoke_ respectively required for building your website, making it look good, generating feeds, and running deploy commands.
This will also install `Sphinx <http://sphinx-doc.org/>`__, Alabaster_, Werkzeug_, and Invoke_ respectively required for building your website, making it look good, generating feeds, and running deploy commands.
.. _pip: https://pip.pypa.io
.. _pip: https://pip.pypa.io
.. _anaconda: https://www.anaconda.com/
.. _Werkzeug: http://werkzeug.pocoo.org/
.. _Werkzeug: https://werkzeug.pocoo.org/
.. _Alabaster: https://github.com/bitprophet/alabaster
.. _Invoke: http://www.pyinvoke.org/
.. _Invoke: https://www.pyinvoke.org/
Getting Started
---------------
@ -81,12 +80,12 @@ If you already have a project, enable blogging by making following changes in ``
# 2b. if `templates_path` is defined
templates_path.append(ablog.get_html_templates_path())
.. _ABlog Quick Start: http://ablog.readthedocs.org/manual/ablog-quick-start
.. _ABlog Quick Start: https://ablog.readthedocs.org/manual/ablog-quick-start
How it works
------------
If you are new to Sphinx_ and reStructuredText markup language, you might find `reStructuredText Primer`_ useful.
If you are new to Sphinx and reStructuredText markup language, you might find `reStructuredText Primer`_ useful.
Once you have content (in ``.rst`` files), you can post *any page* using the ``post`` directive as follows:
.. code-block:: rst
@ -110,27 +109,4 @@ You can also include a list of posts using ``postlist`` directive:
:format: {title}
:sort:
For ABlog documentation, this converts to the following where you can find more about configuring and using ABlog:
.. postlist::
:category: Manual
:list-style: circle
:format: {title}
:sort:
.. _reStructuredText Primer: http://sphinx-doc.org/rest.html
.. only:: html
.. image:: https://secure.travis-ci.org/sunpy/ablog.png?branch=devel
:target: http://travis-ci.org/#!/sunpy/ablog
.. image:: https://readthedocs.org/projects/ablog/badge/?version=latest
:target: http://ablog.readthedocs.org/
.. toctree::
:hidden:
:glob:
*/*

View file

@ -1,87 +1,103 @@
# -*- coding: utf-8 -*-
"""ABlog for Sphinx"""
"""
ABlog for Sphinx.
"""
from __future__ import absolute_import, division, print_function
import os
from .blog import Blog, CONFIG
from .post import (PostDirective, PostListDirective, UpdateDirective,
UpdateNode, process_posts, process_postlist, purge_posts,
generate_archive_pages, generate_atom_feeds,
missing_reference)
from .blog import CONFIG, Blog
from .post import (
PostDirective,
PostListDirective,
UpdateDirective,
UpdateNode,
generate_archive_pages,
generate_atom_feeds,
missing_reference,
process_postlist,
process_posts,
purge_posts,
)
from .version import version as __version__
__version__ = '0.9.5'
__all__ = ['setup']
__all__ = ["setup"]
def anchor(post):
"""Return anchor string for posts that arepage sections."""
"""
Return anchor string for posts that arepage sections.
"""
if post.section:
return '#' + post.section
return "#" + post.section
else:
return ''
return ""
def builder_support(builder):
"""Return True when builder is supported. Supported builders output in
html format, but exclude `PickleHTMLBuilder` and `JSONHTMLBuilder`,
which run into issues when serializing blog objects."""
"""
Return True when builder is supported.
if hasattr(builder, 'builder'):
Supported builders output in html format, but exclude
`PickleHTMLBuilder` and `JSONHTMLBuilder`, which run into issues
when serializing blog objects.
"""
if hasattr(builder, "builder"):
builder = builder.builder
not_supported = set(['json', 'pickle'])
return builder.format == 'html' and not builder.name in not_supported
not_supported = {"json", "pickle"}
return builder.format == "html" and not builder.name in not_supported
def html_page_context(app, pagename, templatename, context, doctree):
if builder_support(app):
context['ablog'] = blog = Blog(app)
context['anchor'] = anchor
context["ablog"] = blog = Blog(app)
context["anchor"] = anchor
# following is already available for archive pages
if blog.blog_baseurl and 'feed_path' not in context:
context['feed_path'] = blog.blog_path
context['feed_title'] = blog.blog_title
if blog.blog_baseurl and "feed_path" not in context:
context["feed_path"] = blog.blog_path
context["feed_title"] = blog.blog_title
def setup(app):
"""Setup ABlog extension."""
"""
Setup ABlog extension.
"""
for args in CONFIG:
app.add_config_value(*args)
app.add_directive('post', PostDirective)
app.add_directive('postlist', PostListDirective)
app.add_directive("post", PostDirective)
app.add_directive("postlist", PostListDirective)
app.connect('doctree-read', process_posts)
app.connect("doctree-read", process_posts)
app.connect('env-purge-doc', purge_posts)
app.connect('doctree-resolved', process_postlist)
app.connect('missing-reference', missing_reference)
app.connect('html-collect-pages', generate_archive_pages)
app.connect('html-collect-pages', generate_atom_feeds)
app.connect('html-page-context', html_page_context)
app.connect("env-purge-doc", purge_posts)
app.connect("doctree-resolved", process_postlist)
app.connect("missing-reference", missing_reference)
app.connect("html-collect-pages", generate_archive_pages)
app.connect("html-collect-pages", generate_atom_feeds)
app.connect("html-page-context", html_page_context)
app.add_directive('update', UpdateDirective)
app.add_node(UpdateNode,
html=(lambda s, n: s.visit_admonition(n),
lambda s, n: s.depart_admonition(n)),
latex=(lambda s, n: s.visit_admonition(n),
lambda s, n: s.depart_admonition(n)),
)
app.add_directive("update", UpdateDirective)
app.add_node(
UpdateNode,
html=(lambda s, n: s.visit_admonition(n), lambda s, n: s.depart_admonition(n)),
latex=(lambda s, n: s.visit_admonition(n), lambda s, n: s.depart_admonition(n)),
)
pkgdir = os.path.abspath(os.path.dirname(__file__))
locale_dir = os.path.join(pkgdir, 'locale')
locale_dir = os.path.join(pkgdir, "locale")
app.config.locale_dirs.append(locale_dir)
return {'version': __version__} # identifies the version of our extension
return {"version": __version__} # identifies the version of our extension
def get_html_templates_path():
"""Return path to ABlog templates folder."""
"""
Return path to ABlog templates folder.
"""
pkgdir = os.path.abspath(os.path.dirname(__file__))
return os.path.join(pkgdir, 'templates')
return os.path.join(pkgdir, "templates")

View file

@ -1,26 +1,27 @@
# -*- coding: utf-8 -*-
"""Classes for handling posts and archives."""
"""
Classes for handling posts and archives.
"""
from __future__ import absolute_import, division, print_function
import os
import re
import sys
import datetime as dtmod
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from datetime import datetime
from unicodedata import normalize
from docutils import nodes
from docutils.io import StringOutput
from docutils.utils import new_document
from sphinx import addnodes
from sphinx.util.osutil import relative_uri
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
if sys.version_info >= (3, 0):
text_type = str
re_flag = 0
@ -31,64 +32,61 @@ else:
text_type = unicode
re_flag = re.UNICODE
__all__ = ['Blog', 'Post', 'Collection']
__all__ = ["Blog", "Post", "Collection"]
def slugify(string):
"""Slugify *s*."""
"""
Slugify *s*.
"""
string = text_type(string)
string = normalize('NFKD', string)
string = normalize("NFKD", string)
if re_flag is None:
string = re.sub(r'[^\w\s-]', '', string).strip().lower()
return re.sub(r'[-\s]+', '-', string)
string = re.sub(r"[^\w\s-]", "", string).strip().lower()
return re.sub(r"[-\s]+", "-", string)
else:
string = re.sub(r'[^\w\s-]', '', string, flags=re_flag).strip().lower()
return re.sub(r'[-\s]+', '-', string, flags=re_flag)
string = re.sub(r"[^\w\s-]", "", string, flags=re_flag).strip().lower()
return re.sub(r"[-\s]+", "-", string, flags=re_flag)
def os_path_join(path, *paths):
return os.path.join(path, *paths).replace(os.path.sep, '/')
return os.path.join(path, *paths).replace(os.path.sep, "/")
DEBUG = True
CONFIG = [
# name, default, rebuild
('blog_path', 'blog', True),
('blog_title', 'Blog', True),
('blog_baseurl', None, True),
('blog_archive_titles', None, False),
('blog_feed_archives', False, True),
('blog_feed_fulltext', False, True),
('blog_feed_subtitle', None, True),
('blog_feed_titles', None, False),
('blog_feed_length', None, None),
('blog_authors', {}, True),
('blog_default_author', None, True),
('blog_locations', {}, True),
('blog_default_location', None, True),
('blog_languages', {}, True),
('blog_default_language', None, True),
('fontawesome_link_cdn', None, True),
('fontawesome_included', False, True),
('fontawesome_css_file', '', True),
('post_date_format', '%d %B %Y', True),
('post_date_format_short', '%d %B', True),
('post_auto_image', 0, True),
('post_auto_excerpt', 1, True),
('post_auto_orphan', True, True),
('post_redirect_refresh', 5, True),
('post_always_section', False, True),
('disqus_shortname', None, True),
('disqus_drafts', False, True),
('disqus_pages', False, True),
("blog_path", "blog", True),
("blog_title", "Blog", True),
("blog_baseurl", None, True),
("blog_archive_titles", None, False),
("blog_feed_archives", False, True),
("blog_feed_fulltext", False, True),
("blog_feed_subtitle", None, True),
("blog_feed_titles", None, False),
("blog_feed_length", None, None),
("blog_authors", {}, True),
("blog_default_author", None, True),
("blog_locations", {}, True),
("blog_default_location", None, True),
("blog_languages", {}, True),
("blog_default_language", None, True),
("fontawesome_link_cdn", None, True),
("fontawesome_included", False, True),
("fontawesome_css_file", "", True),
("post_date_format", "%d %B %Y", True),
("post_date_format_short", "%d %B", True),
("post_auto_image", 0, True),
("post_auto_excerpt", 1, True),
("post_auto_orphan", True, True),
("post_redirect_refresh", 5, True),
("post_always_section", False, True),
("disqus_shortname", None, True),
("disqus_drafts", False, True),
("disqus_pages", False, True),
]
@ -100,7 +98,7 @@ FUTURE = datetime(9999, 12, 31)
def revise_pending_xrefs(doctree, docname):
for node in doctree.traverse(addnodes.pending_xref):
node['refdoc'] = docname
node["refdoc"] = docname
try:
@ -110,9 +108,12 @@ except ImportError:
def link_posts(posts):
"""Link posts after sorting them post by published date."""
"""
Link posts after sorting them post by published date.
"""
from operator import attrgetter
posts = filter(attrgetter("order"), posts)
posts = sorted(posts)
posts[0].prev = posts[-1].next = None
@ -124,7 +125,9 @@ def link_posts(posts):
class Blog(Container):
"""Handle blog operations."""
"""
Handle blog operations.
"""
# using a shared state
_dict = {}
@ -136,7 +139,9 @@ class Blog(Container):
self._init(app)
def _init(self, app):
"""Instantiate Blog."""
"""
Instantiate Blog.
"""
self.app = app
self.config = {}
@ -147,75 +152,75 @@ class Blog(Container):
for opt in CONFIG:
self.config[opt[0]] = getattr(app.config, opt[0])
opt = self.config['blog_default_author']
opt = self.config["blog_default_author"]
if opt is not None and not isinstance(opt, list):
self.config['blog_default_author'] = [opt]
self.config["blog_default_author"] = [opt]
opt = self.config['blog_default_location']
opt = self.config["blog_default_location"]
if opt is not None and not isinstance(opt, list):
self.config['blog_default_location'] = [opt]
self.config["blog_default_location"] = [opt]
opt = self.config['blog_default_language']
opt = self.config["blog_default_language"]
if opt is not None and not isinstance(opt, list):
self.config['blog_default_language'] = [opt]
self.config["blog_default_language"] = [opt]
# blog catalog contains all posts
self.blog = Catalog(self, 'blog', 'blog', None)
self.blog = Catalog(self, "blog", "blog", None)
# contains post collections by year
self.archive = Catalog(self, 'archive', 'archive', None, reverse=True)
self.archive.docname += '/archive'
refs['blog-archives'] = (self.archive.docname, 'Archives')
self.archive = Catalog(self, "archive", "archive", None, reverse=True)
self.archive.docname += "/archive"
refs["blog-archives"] = (self.archive.docname, "Archives")
self.catalogs = cat = {} # catalogs of user set labels
self.tags = cat['tags'] = Catalog(self, 'tags', 'tag', 'tag')
refs['blog-tags'] = (self.tags.docname, 'Tags')
self.tags = cat["tags"] = Catalog(self, "tags", "tag", "tag")
refs["blog-tags"] = (self.tags.docname, "Tags")
self.author = cat['author'] = Catalog(self, 'author',
'author', 'author')
refs['blog-authors'] = (self.author.docname, 'Authors')
self.author = cat["author"] = Catalog(self, "author", "author", "author")
refs["blog-authors"] = (self.author.docname, "Authors")
self.location = cat['location'] = Catalog(self, 'location',
'location', 'location')
refs['blog-locations'] = (self.location.docname, 'Locations')
self.location = cat["location"] = Catalog(self, "location", "location", "location")
refs["blog-locations"] = (self.location.docname, "Locations")
self.language = cat['language'] = Catalog(self, 'language',
'language', 'language')
refs['blog-languages'] = (self.language.docname, 'Languages')
self.language = cat["language"] = Catalog(self, "language", "language", "language")
refs["blog-languages"] = (self.language.docname, "Languages")
self.category = cat['category'] = Catalog(self, 'category',
'category', 'category')
refs['blog-categories'] = (self.category.docname, 'Categories')
self.category = cat["category"] = Catalog(self, "category", "category", "category")
refs["blog-categories"] = (self.category.docname, "Categories")
for catname in ['author', 'location', 'language']:
for catname in ["author", "location", "language"]:
catalog = self.catalogs[catname]
items = self.config['blog_' + catname + 's'].items()
items = self.config["blog_" + catname + "s"].items()
for label, (name, link) in items:
catalog[label] = Collection(catalog, label, name, link)
self.posts = self.blog['post'] = Collection(self.blog, 'post',
'Posts', path=self.blog_path)
self.drafts = self.blog['draft'] = Collection(self.blog, 'draft',
'Drafts', path=os_path_join(self.blog_path, 'drafts'))
self.posts = self.blog["post"] = Collection(self.blog, "post", "Posts", path=self.blog_path)
self.drafts = self.blog["draft"] = Collection(
self.blog, "draft", "Drafts", path=os_path_join(self.blog_path, "drafts")
)
# add references to posts and drafts
# e.g. :ref:`blog-posts`
refs['blog-posts'] = (os_path_join(self.config['blog_path'], 'index'), 'Posts')
refs['blog-drafts'] = (os_path_join(self.config['blog_path'], 'drafts', 'index'), 'Drafts')
refs['blog-feed'] = (os_path_join(self.config['blog_path'], 'atom.xml'), self.blog_title + ' Feed')
refs["blog-posts"] = (os_path_join(self.config["blog_path"], "index"), "Posts")
refs["blog-drafts"] = (os_path_join(self.config["blog_path"], "drafts", "index"), "Drafts")
refs["blog-feed"] = (
os_path_join(self.config["blog_path"], "atom.xml"),
self.blog_title + " Feed",
)
# set some internal configuration options
self.config['fontawesome'] = (self.config['fontawesome_included'] or
self.config['fontawesome_link_cdn'] or
self.config['fontawesome_css_file'])
self.config["fontawesome"] = (
self.config["fontawesome_included"]
or self.config["fontawesome_link_cdn"]
or self.config["fontawesome_css_file"]
)
def __getattr__(self, name):
try:
attr = self.config[name]
except KeyError:
raise AttributeError('ABlog has no configuration option {}'
.format(repr(name)))
raise AttributeError("ABlog has no configuration option {}".format(repr(name)))
return attr
def __getitem__(self, key):
@ -236,12 +241,16 @@ class Blog(Container):
@property
def feed_path(self):
"""RSS feed page name."""
"""
RSS feed page name.
"""
return os_path_join(self.blog_path, 'atom.xml')
return os_path_join(self.blog_path, "atom.xml")
def register(self, docname, info):
"""Register post *docname*."""
"""
Register post *docname*.
"""
post = Post(self, docname, info)
if post.date and post.date < TOMORROW:
@ -252,7 +261,9 @@ class Blog(Container):
catalog.add(post)
def recent(self, num, docname=None, **labels):
"""Yield *num* recent posts, excluding the one with `docname`."""
"""
Yield *num* recent posts, excluding the one with `docname`.
"""
if num is None:
num = len(self)
@ -265,95 +276,107 @@ class Blog(Container):
yield post
def page_id(self, pagename):
"""Return pagename, trimming :file:`index` from end when found.
Return value is used as disqus page identifier."""
"""
Return pagename, trimming :file:`index` from end when found.
if self.config['blog_baseurl']:
if pagename.endswith('index'):
Return value is used as disqus page identifier.
"""
if self.config["blog_baseurl"]:
if pagename.endswith("index"):
pagename = pagename[:-5]
pagename = pagename.strip('/')
return '/' + pagename + ('/' if pagename else '')
pagename = pagename.strip("/")
return "/" + pagename + ("/" if pagename else "")
def page_url(self, pagename):
"""Return page URL when :confval:`blog_baseurl` is set, otherwise
``None``. When found, :file:`index.html` is trimmed from the end
of the URL."""
"""
Return page URL when :confval:`blog_baseurl` is set, otherwise
``None``.
if self.config['blog_baseurl']:
url = urljoin(self.config['blog_baseurl'], pagename)
if url.endswith('index'):
When found, :file:`index.html` is trimmed from the end of the
URL.
"""
if self.config["blog_baseurl"]:
url = urljoin(self.config["blog_baseurl"], pagename)
if url.endswith("index"):
url = url[:-5]
return url
def html_builder_write_doc(self, docname, doctree):
"""Part of :meth:`sphinx.builders.html.StandaloneHTMLBuilder.write_doc`
method used to convert *doctree* to HTML."""
"""
Part of :meth:`sphinx.builders.html.StandaloneHTMLBuilder.write_doc` method
used to convert *doctree* to HTML.
"""
destination = StringOutput(encoding='utf-8')
destination = StringOutput(encoding="utf-8")
doctree.settings = self.docsettings
self.secnumbers = {}
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.imgpath = relative_uri(self.get_target_uri(docname), "_images")
self.dlpath = relative_uri(self.get_target_uri(docname), "_downloads")
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
return self.docwriter.parts['fragment']
return self.docwriter.parts["fragment"]
class BlogPageMixin(object):
class BlogPageMixin:
def __str__(self):
return self.title
def __repr__(self):
return str(self) + ' <' + text_type(self.docname) + '>'
return str(self) + " <" + text_type(self.docname) + ">"
@property
def blog(self):
"""Reference to :class:`~ablog.blog.Blog` object."""
"""
Reference to :class:`~ablog.blog.Blog` object.
"""
return self._blog
@property
def title(self):
return getattr(self, 'name', getattr(self, '_title'))
return getattr(self, "name", getattr(self, "_title"))
class Post(BlogPageMixin):
"""Handle post metadata."""
"""
Handle post metadata.
"""
def __init__(self, blog, docname, info):
self._blog = blog
self.docname = docname
self.section = info['section']
self.order = info['order']
self.date = date = info['date']
self.update = info['update']
self.nocomments = info['nocomments']
self.section = info["section"]
self.order = info["order"]
self.date = date = info["date"]
self.update = info["update"]
self.nocomments = info["nocomments"]
self.published = date and date < TOMORROW
self.draft = not self.published
self._title = info['title']
self.excerpt = info['excerpt']
self.doctree = info['doctree']
self._title = info["title"]
self.excerpt = info["excerpt"]
self.doctree = info["doctree"]
self._next = self._prev = -1
self._computed_date = date or FUTURE
#self.language = info.get('language')
# self.language = info.get('language')
# archives
# self.blog = []
if self.published:
self.tags = info.get('tags')
self.author = info.get('author')
self.category = info.get('category')
self.location = info.get('location')
self.language = info.get('language')
self.tags = info.get("tags")
self.author = info.get("author")
self.category = info.get("category")
self.location = info.get("location")
self.language = info.get("language")
if not self.author and blog.blog_default_author:
self.author = blog.blog_default_author
@ -366,14 +389,14 @@ class Post(BlogPageMixin):
self.archive.add(self)
else:
self.tags = info.get('tags')
self.author = info.get('author')
self.category = info.get('category')
self.location = info.get('location')
self.language = info.get('language')
self.tags = info.get("tags")
self.author = info.get("author")
self.category = info.get("category")
self.location = info.get("location")
self.language = info.get("language")
self.archive = []
self.redirect = info.get('redirect')
self.redirect = info.get("redirect")
self.options = info
@ -381,12 +404,16 @@ class Post(BlogPageMixin):
return (self._computed_date, self.title) < (other._computed_date, other.title)
def to_html(self, pagename, fulltext=False, drop_h1=True):
"""Return excerpt or *fulltext* as HTML after resolving references
with respect to *pagename*. By default, first `<h1>` tag is dropped
from the output. More than one can be dropped by setting *drop_h1*
to the desired number of tags to be dropped."""
"""
Return excerpt or *fulltext* as HTML after resolving references with
respect to *pagename*.
doctree = new_document('')
By default, first `<h1>` tag is dropped from the output. More
than one can be dropped by setting *drop_h1* to the desired
number of tags to be dropped.
"""
doctree = new_document("")
if fulltext:
deepcopy = self.doctree.deepcopy()
if isinstance(deepcopy, nodes.document):
@ -401,20 +428,21 @@ class Post(BlogPageMixin):
revise_pending_xrefs(doctree, pagename)
app.env.resolve_references(doctree, pagename, app.builder)
add_permalinks, app.builder.add_permalinks = (
app.builder.add_permalinks, False)
add_permalinks, app.builder.add_permalinks = (app.builder.add_permalinks, False)
html = html_builder_write_doc(app.builder, pagename, doctree)
app.builder.add_permalinks = add_permalinks
if drop_h1:
html = re.sub('<h1>(.*?)</h1>', '', html, count=abs(int(drop_h1)))
html = re.sub("<h1>(.*?)</h1>", "", html, count=abs(int(drop_h1)))
return html
@property
def next(self):
"""Next published post in chronological order."""
"""
Next published post in chronological order.
"""
if self._next == -1:
link_posts(self._blog.posts)
@ -422,13 +450,17 @@ class Post(BlogPageMixin):
@next.setter
def next(self, post):
"""Set next published post in chronological order."""
"""
Set next published post in chronological order.
"""
self._next = post
@property
def prev(self):
"""Previous published post in chronological order."""
"""
Previous published post in chronological order.
"""
if self._prev == -1:
link_posts(self._blog.posts)
@ -436,14 +468,18 @@ class Post(BlogPageMixin):
@prev.setter
def prev(self, post):
"""Set previous published post in chronological order."""
"""
Set previous published post in chronological order.
"""
self._prev = post
class Catalog(BlogPageMixin):
"""Handles collections of posts."""
"""
Handles collections of posts.
"""
def __init__(self, blog, name, xref, path, reverse=False):
@ -492,8 +528,10 @@ class Catalog(BlogPageMixin):
yield self.collections[key]
def add(self, post):
"""Add post to appropriate collection(s) and replace collections
labels with collection objects."""
"""
Add post to appropriate collection(s) and replace collections labels
with collection objects.
"""
colls = []
for label in getattr(post, self.name, []):
@ -504,19 +542,21 @@ class Catalog(BlogPageMixin):
setattr(post, self.name, colls)
def _minmax(self):
"""Return minimum and maximum sizes of collections."""
"""
Return minimum and maximum sizes of collections.
"""
if (self._coll_lens is None or
len(self._coll_lens) != len(self.collections)):
self._coll_lens = [len(coll) for coll in self.collections.values()
if len(coll)]
if self._coll_lens is None or len(self._coll_lens) != len(self.collections):
self._coll_lens = [len(coll) for coll in self.collections.values() if len(coll)]
self._min_max = min(self._coll_lens), max(self._coll_lens)
return self._min_max
class Collection(BlogPageMixin):
"""Posts sharing a label, i.e. tag, category, author, or location."""
"""
Posts sharing a label, i.e. tag, category, author, or location.
"""
def __init__(self, catalog, label, name=None, href=None, path=None, page=0):
@ -529,7 +569,7 @@ class Collection(BlogPageMixin):
self._posts = {}
self._posts_iter = None
self._path = path
self.xref = self.catalog.xref + '-' + slugify(label)
self.xref = self.catalog.xref + "-" + slugify(label)
self._slug = None
self._html = None
@ -558,8 +598,7 @@ class Collection(BlogPageMixin):
posts.sort(reverse=True)
self._posts_iter = posts
for post in self._posts_iter:
yield post
yield from self._posts_iter
def __getitem__(self, key):
@ -576,28 +615,34 @@ class Collection(BlogPageMixin):
return self._catalog
def add(self, post):
"""Add post to the collection."""
"""
Add post to the collection.
"""
post_name = post.docname
if post.section:
post_name += '#' + post.section
post_name += "#" + post.section
self._posts[post_name] = post
def relsize(self, maxsize=5, minsize=1):
"""Relative size used in tag clouds."""
"""
Relative size used in tag clouds.
"""
min_, max_ = self.catalog._minmax()
diff = maxsize - minsize
if len(self.catalog) == 1 or min_ == max_:
return int(round(diff / 2. + minsize))
return int(round(diff / 2.0 + minsize))
size = int(1. * (len(self) - min_) / (max_ - min_) * diff + minsize)
size = int(1.0 * (len(self) - min_) / (max_ - min_) * diff + minsize)
return size
@property
def docname(self):
"""Collection page document name."""
"""
Collection page document name.
"""
if self._path is None:
self._path = os_path_join(self.catalog.path, slugify(self.name))

View file

@ -1,70 +1,79 @@
from __future__ import absolute_import, division, print_function
import os
import io
import sys
import glob
import ablog
import shutil
import argparse
from distutils.version import LooseVersion
from sphinx import __version__
import ablog
BUILDDIR = '_website'
DOCTREES = '.doctrees'
SPHINX_LT_17 = LooseVersion(__version__) < LooseVersion('1.7')
from .start import ablog_start
__all__ = ['ablog_build', 'ablog_clean',
'ablog_serve', 'ablog_deploy', 'ablog_main']
BUILDDIR = "_website"
DOCTREES = ".doctrees"
__all__ = ["ablog_build", "ablog_clean", "ablog_serve", "ablog_deploy", "ablog_main"]
def find_confdir(sourcedir=None):
"""Return path to current directory or its parent that contains conf.py"""
"""
Return path to current directory or its parent that contains conf.py.
"""
from os.path import isfile, join, abspath
confdir = (sourcedir or os.getcwd())
def parent(d): return abspath(join(d, '..'))
confdir = sourcedir or os.getcwd()
while not isfile(join(confdir, 'conf.py')) and confdir != parent(confdir):
def parent(d):
return abspath(join(d, ".."))
while not isfile(join(confdir, "conf.py")) and confdir != parent(confdir):
confdir = parent(confdir)
conf = join(confdir, 'conf.py')
conf = join(confdir, "conf.py")
if isfile(conf) and 'ablog' in open(conf).read():
if isfile(conf) and "ablog" in open(conf).read():
return confdir
else:
sys.exit("Current directory and its parents doesn't "
"contain configuration file (conf.py).")
sys.exit(
"Current directory and its parents doesn't " "contain configuration file (conf.py)."
)
def read_conf(confdir):
"""Return conf.py file as a module."""
"""
Return conf.py file as a module.
"""
sys.path.insert(0, confdir)
conf = __import__('conf')
conf = __import__("conf")
sys.path.pop(0)
return conf
parser = argparse.ArgumentParser(
description="ABlog for blogging with Sphinx",
epilog="See 'ablog <command> -h' for more information on a specific "
"command.")
epilog="See 'ablog <command> -h' for more information on a specific " "command.",
)
parser.add_argument('-v', '--version',
help="print ABlog version and exit",
action='version', version=ablog.__version__)
parser.add_argument(
"-v",
"--version",
help="print ABlog version and exit",
action="version",
version=ablog.__version__,
)
commands = ablog_commands = parser.add_subparsers(title='commands')
commands = ablog_commands = parser.add_subparsers(title="commands")
def cmd(func=None, **kwargs):
if func is None:
def cmd_inner(func):
return cmd(func, **kwargs)
return cmd_inner
else:
command = commands.add_parser(**kwargs)
@ -81,8 +90,10 @@ def arg(*args, **kwargs):
else:
func = None
if func is None:
def arg_inner(func):
return arg(func, *args, **kwargs)
return arg_inner
else:
func.command.add_argument(*args, **kwargs)
@ -91,146 +102,197 @@ def arg(*args, **kwargs):
def arg_website(func):
arg(func, '-w', dest='website', type=str,
arg(
func,
"-w",
dest="website",
type=str,
help="path for website, default is %s when `ablog_website` "
"is not set in conf.py" % BUILDDIR)
"is not set in conf.py" % BUILDDIR,
)
return func
def arg_doctrees(func):
arg(func, '-d', dest='doctrees', type=str,
arg(
func,
"-d",
dest="doctrees",
type=str,
help="path for the cached environment and doctree files, "
"default %s when `ablog_doctrees` is not set in conf.py" %
DOCTREES)
"default %s when `ablog_doctrees` is not set in conf.py" % DOCTREES,
)
return func
from .start import ablog_start
cmd(ablog_start, name='start', help='start a new blog project',
cmd(
ablog_start,
name="start",
help="start a new blog project",
description="Start a new blog project by answering a few questions. "
"You will end up with a configuration file and sample pages.")
"You will end up with a configuration file and sample pages.",
)
@arg('-P', dest='runpdb',
action='store_true', default=False,
help="run pdb on exception")
@arg('-T', dest='traceback',
action='store_true', default=False,
help="show full traceback on exception")
@arg('-W', dest='werror',
action='store_true', default=False,
help='turn warnings into errors')
@arg('-N', dest='no_colors',
action='store_true', default=False,
help='do not emit colored output')
@arg('-Q', dest='extra_quiet',
action='store_true', default=False,
help='no output at all, not even warnings')
@arg('-q', dest='quiet',
action='store_true', default=False,
help='no output on stdout, just warnings on stderr')
@arg('-v', dest='verbosity',
action='count', default=0,
help='increase verbosity (can be repeated)')
@arg("-P", dest="runpdb", action="store_true", default=False, help="run pdb on exception")
@arg(
"-T",
dest="traceback",
action="store_true",
default=False,
help="show full traceback on exception",
)
@arg("-W", dest="werror", action="store_true", default=False, help="turn warnings into errors")
@arg("-N", dest="no_colors", action="store_true", default=False, help="do not emit colored output")
@arg(
"-Q",
dest="extra_quiet",
action="store_true",
default=False,
help="no output at all, not even warnings",
)
@arg(
"-q",
dest="quiet",
action="store_true",
default=False,
help="no output on stdout, just warnings on stderr",
)
@arg("-v", dest="verbosity", action="count", default=0, help="increase verbosity (can be repeated)")
@arg_doctrees
@arg_website
@arg('-s', dest='sourcedir', type=str,
help="root path for source files, "
"default is path to the folder that contains conf.py")
@arg('-b', dest='builder', type=str,
help="builder to use, default `ablog_builder` or dirhtml")
@arg('-a', dest='allfiles', action='store_true', default=False,
help="write all files; default is to only write new and changed files")
@cmd(name='build', help='build your blog project',
description="Path options can be set in conf.py. "
"Default values of paths are relative to conf.py.")
def ablog_build(builder=None, sourcedir=None, website=None, doctrees=None,
traceback=False, runpdb=False, allfiles=False, werror=False, verbosity=0,
quiet=False, extra_quiet=False, no_colors=False, **kwargs):
@arg(
"-s",
dest="sourcedir",
type=str,
help="root path for source files, " "default is path to the folder that contains conf.py",
)
@arg("-b", dest="builder", type=str, help="builder to use, default `ablog_builder` or dirhtml")
@arg(
"-a",
dest="allfiles",
action="store_true",
default=False,
help="write all files; default is to only write new and changed files",
)
@cmd(
name="build",
help="build your blog project",
description="Path options can be set in conf.py. "
"Default values of paths are relative to conf.py.",
)
def ablog_build(
builder=None,
sourcedir=None,
website=None,
doctrees=None,
traceback=False,
runpdb=False,
allfiles=False,
werror=False,
verbosity=0,
quiet=False,
extra_quiet=False,
no_colors=False,
**kwargs,
):
confdir = find_confdir(sourcedir)
conf = read_conf(confdir)
website = (website or
os.path.join(confdir, getattr(conf, 'ablog_website', BUILDDIR)))
doctrees = (doctrees or
os.path.join(confdir, getattr(conf, 'ablog_doctrees', DOCTREES)))
sourcedir = (sourcedir or confdir)
website = website or os.path.join(confdir, getattr(conf, "ablog_website", BUILDDIR))
doctrees = doctrees or os.path.join(confdir, getattr(conf, "ablog_doctrees", DOCTREES))
sourcedir = sourcedir or confdir
argv = sys.argv[:1]
argv.extend(['-b', builder or getattr(conf, 'ablog_builder', 'dirhtml')])
argv.extend(['-d', doctrees])
argv.extend(["-b", builder or getattr(conf, "ablog_builder", "dirhtml")])
argv.extend(["-d", doctrees])
if traceback:
argv.extend(['-T'])
argv.extend(["-T"])
if runpdb:
argv.extend(['-P'])
argv.extend(["-P"])
if allfiles:
argv.extend(['-a'])
argv.extend(["-a"])
if werror:
argv.extend(['-W'])
argv.extend(["-W"])
if verbosity > 0:
argv.extend(['-v'] * verbosity)
argv.extend(["-v"] * verbosity)
if quiet:
argv.extend(['-q'])
argv.extend(["-q"])
if extra_quiet:
argv.extend(['-Q'])
argv.extend(["-Q"])
if no_colors:
argv.extend(['-N'])
argv.extend(["-N"])
argv.extend([sourcedir, website])
if SPHINX_LT_17:
from sphinx import main
sys.exit(main(argv))
else:
from sphinx.cmd.build import main
# As of Sphinx 1.7, the first argument is now no longer ignored
sys.exit(main(argv[1:]))
from sphinx.cmd.build import main
sys.exit(main(argv[1:]))
@arg('-D', dest='deep', action='store_true', default=False,
help="deep clean, remove cached environment and doctree files")
@arg(
"-D",
dest="deep",
action="store_true",
default=False,
help="deep clean, remove cached environment and doctree files",
)
@arg_doctrees
@arg_website
@cmd(name='clean', help='clean your blog build files',
description="Path options can be set in conf.py. "
"Default values of paths are relative to conf.py.")
@cmd(
name="clean",
help="clean your blog build files",
description="Path options can be set in conf.py. "
"Default values of paths are relative to conf.py.",
)
def ablog_clean(website=None, doctrees=None, deep=False, **kwargs):
confdir = find_confdir()
conf = read_conf(confdir)
website = (website or
os.path.join(confdir, getattr(conf, 'ablog_website', BUILDDIR)))
website = website or os.path.join(confdir, getattr(conf, "ablog_website", BUILDDIR))
doctrees = (doctrees or
os.path.join(confdir, getattr(conf, 'ablog_doctrees', DOCTREES)))
doctrees = doctrees or os.path.join(confdir, getattr(conf, "ablog_doctrees", DOCTREES))
nothing = True
if glob.glob(os.path.join(website, '*')):
if glob.glob(os.path.join(website, "*")):
shutil.rmtree(website)
print('Removed {}.'.format(os.path.relpath(website)))
print("Removed {}.".format(os.path.relpath(website)))
nothing = False
if deep and glob.glob(os.path.join(doctrees, '*')):
if deep and glob.glob(os.path.join(doctrees, "*")):
shutil.rmtree(doctrees)
print('Removed {}.'.format(os.path.relpath(doctrees)))
print("Removed {}.".format(os.path.relpath(doctrees)))
nothing = False
if nothing:
print('Nothing to clean.')
print("Nothing to clean.")
@arg('--patterns', dest='patterns', default='*.rst;*.txt',
help="patterns for triggering rebuilds")
@arg('-r', dest='rebuild', action='store_true', default=False,
help="rebuild when a file matching patterns change or get added")
@arg('-n', dest='view', action='store_false', default=True,
help="do not open website in a new browser tab")
@arg('-p', dest='port', type=int, default=8000,
help='port number for HTTP server; default is 8000')
@arg("--patterns", dest="patterns", default="*.rst;*.txt", help="patterns for triggering rebuilds")
@arg(
"-r",
dest="rebuild",
action="store_true",
default=False,
help="rebuild when a file matching patterns change or get added",
)
@arg(
"-n",
dest="view",
action="store_false",
default=True,
help="do not open website in a new browser tab",
)
@arg("-p", dest="port", type=int, default=8000, help="port number for HTTP server; default is 8000")
@arg_website
@cmd(name='serve', help='serve and view your project',
description="Serve options can be set in conf.py. "
"Default values of paths are relative to conf.py.")
def ablog_serve(website=None, port=8000, view=True, rebuild=False,
patterns='*.rst;*.txt', **kwargs):
@cmd(
name="serve",
help="serve and view your project",
description="Serve options can be set in conf.py. "
"Default values of paths are relative to conf.py.",
)
def ablog_serve(
website=None, port=8000, view=True, rebuild=False, patterns="*.rst;*.txt", **kwargs
):
confdir = find_confdir()
conf = read_conf(confdir)
@ -252,35 +314,36 @@ def ablog_serve(website=None, port=8000, view=True, rebuild=False,
httpd = socketserver.TCPServer(("", port), Handler)
ip, port = httpd.socket.getsockname()
print("Serving HTTP on {}:{}.".format(ip, port))
print(f"Serving HTTP on {ip}:{port}.")
print("Quit the server with Control-C.")
website = (website or
os.path.join(confdir, getattr(conf, 'ablog_website', '_website')))
website = website or os.path.join(confdir, getattr(conf, "ablog_website", "_website"))
os.chdir(website)
if rebuild:
#from watchdog.watchmedo import observe_with
# from watchdog.watchmedo import observe_with
from watchdog.observers import Observer
from watchdog.tricks import ShellCommandTrick
patterns = patterns.split(';')
ignore_patterns = [os.path.join(website, '*')]
handler = ShellCommandTrick(shell_command='ablog build -s ' + confdir,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=False,
wait_for_process=True,
drop_during_process=False)
patterns = patterns.split(";")
ignore_patterns = [os.path.join(website, "*")]
handler = ShellCommandTrick(
shell_command="ablog build -s " + confdir,
patterns=patterns,
ignore_patterns=ignore_patterns,
ignore_directories=False,
wait_for_process=True,
drop_during_process=False,
)
observer = Observer(timeout=1)
observer.schedule(handler, confdir, recursive=True)
observer.start()
try:
if view:
(webbrowser.open_new_tab('http://127.0.0.1:{}'.format(port)) and
httpd.serve_forever())
(webbrowser.open_new_tab(f"http://127.0.0.1:{port}") and httpd.serve_forever())
else:
httpd.serve_forever()
except KeyboardInterrupt:
@ -289,20 +352,17 @@ def ablog_serve(website=None, port=8000, view=True, rebuild=False,
else:
if view:
(webbrowser.open_new_tab('http://127.0.0.1:{}'.format(port)) and
httpd.serve_forever())
(webbrowser.open_new_tab(f"http://127.0.0.1:{port}") and httpd.serve_forever())
else:
httpd.serve_forever()
@arg('-t', dest='title', type=str,
help='post title; default is formed from filename')
@arg(dest='filename', type=str,
help='filename, e.g. my-nth-post (.rst appended)')
@cmd(name='post', help='create a blank post',)
@arg("-t", dest="title", type=str, help="post title; default is formed from filename")
@arg(dest="filename", type=str, help="filename, e.g. my-nth-post (.rst appended)")
@cmd(name="post", help="create a blank post")
def ablog_post(filename, title=None, **kwargs):
POST_TEMPLATE = u'''
POST_TEMPLATE = """
%(title)s
%(equal)s
@ -310,85 +370,111 @@ def ablog_post(filename, title=None, **kwargs):
:tags:
:category:
'''
"""
from datetime import date
from os import path
# Generate basic post params.
today = date.today()
if not filename.lower().endswith('.rst'):
filename += '.rst'
if not filename.lower().endswith(".rst"):
filename += ".rst"
today = today.strftime("%b %d, %Y")
if not title:
title = filename[:-4].replace('-', ' ').title()
title = filename[:-4].replace("-", " ").title()
pars = {'date': today,
'title': title,
'equal': '=' * len(title)
}
pars = {"date": today, "title": title, "equal": "=" * len(title)}
if path.isfile(filename):
pass
# read the file, and add post directive
# and save it
else:
with io.open(filename, 'w', encoding='utf-8') as out:
with open(filename, "w", encoding="utf-8") as out:
post_text = POST_TEMPLATE % pars
out.write(post_text)
print('Blog post created: %s' % filename)
print("Blog post created: %s" % filename)
@arg('--github-token', dest='github_token', type=str,
help="environment variable name storing GitHub access token")
@arg('--push-quietly', dest='push_quietly', action='store_true', default=False,
help="be more quiet when pushing changes")
@arg('-f', dest='push_force', action='store_true', default=False,
help="owerwrite last commit, i.e. `commit --amend; push -f`")
@arg('-m', dest='message', type=str, help="commit message")
@arg('-g', dest='github_pages', type=str,
help="GitHub username for deploying to GitHub pages")
@arg('-p', dest='repodir', type=str,
help="path to the location of repository to be deployed, e.g. "
"`../username.github.io`, default is folder containing `conf.py`")
@arg(
"--github-token",
dest="github_token",
type=str,
help="environment variable name storing GitHub access token",
)
@arg(
"--push-quietly",
dest="push_quietly",
action="store_true",
default=False,
help="be more quiet when pushing changes",
)
@arg(
"-f",
dest="push_force",
action="store_true",
default=False,
help="owerwrite last commit, i.e. `commit --amend; push -f`",
)
@arg("-m", dest="message", type=str, help="commit message")
@arg("-g", dest="github_pages", type=str, help="GitHub username for deploying to GitHub pages")
@arg(
"-p",
dest="repodir",
type=str,
help="path to the location of repository to be deployed, e.g. "
"`../username.github.io`, default is folder containing `conf.py`",
)
@arg_website
@cmd(name='deploy', help='deploy your website build files',
description="Path options can be set in conf.py. "
"Default values of paths are relative to conf.py.")
def ablog_deploy(website, message=None, github_pages=None,
push_quietly=False, push_force=False, github_token=None, repodir=None,
**kwargs):
@cmd(
name="deploy",
help="deploy your website build files",
description="Path options can be set in conf.py. "
"Default values of paths are relative to conf.py.",
)
def ablog_deploy(
website,
message=None,
github_pages=None,
push_quietly=False,
push_force=False,
github_token=None,
repodir=None,
**kwargs,
):
confdir = find_confdir()
conf = read_conf(confdir)
github_pages = (github_pages or getattr(conf, 'github_pages', None))
github_pages = github_pages or getattr(conf, "github_pages", None)
website = (website or
os.path.join(confdir, getattr(conf, 'ablog_builddir', '_website')))
website = website or os.path.join(confdir, getattr(conf, "ablog_builddir", "_website"))
tomove = glob.glob(os.path.join(website, '*'))
tomove = glob.glob(os.path.join(website, "*"))
if not tomove:
print('Nothing to deploy, build first.')
print("Nothing to deploy, build first.")
return
try:
from invoke import run
except ImportError:
raise ImportError("invoke is required by deploy command, "
"run `pip install invoke`")
raise ImportError("invoke is required by deploy command, " "run `pip install invoke`")
if github_pages:
if repodir is None:
repodir = os.path.join(confdir, "{0}.github.io".format(github_pages))
repodir = os.path.join(confdir, f"{github_pages}.github.io")
if os.path.isdir(repodir):
os.chdir(repodir)
run("git pull", echo=True)
else:
run("git clone https://github.com/{0}/{0}.github.io.git {1}"
.format(github_pages, repodir), echo=True)
run(
"git clone https://github.com/{0}/{0}.github.io.git {1}".format(
github_pages, repodir
),
echo=True,
)
git_add = []
for tm in tomove:
@ -406,41 +492,43 @@ def ablog_deploy(website, message=None, github_pages=None,
os.renames(fn, fnnew)
git_add.append(fnnew)
print('Moved {} files to {}.github.io'
.format(len(git_add), github_pages))
print("Moved {} files to {}.github.io".format(len(git_add), github_pages))
os.chdir(repodir)
run("git add -f " + " ".join(['"{}"'.format(os.path.relpath(p))
for p in git_add]), echo=True)
if not os.path.isfile('.nojekyll'):
open('.nojekyll', 'w')
run(
"git add -f " + " ".join(['"{}"'.format(os.path.relpath(p)) for p in git_add]),
echo=True,
)
if not os.path.isfile(".nojekyll"):
open(".nojekyll", "w")
run("git add -f .nojekyll")
commit = 'git commit -m "{}"'.format(message or 'Updates.')
commit = 'git commit -m "{}"'.format(message or "Updates.")
if push_force:
commit += ' --amend'
commit += " --amend"
run(commit, echo=True)
if github_token:
with open(os.path.join(repodir, '.git/credentials'), 'w') as out:
out.write('https://{}:@github.com'
.format(os.environ[github_token]))
with open(os.path.join(repodir, ".git/credentials"), "w") as out:
out.write("https://{}:@github.com".format(os.environ[github_token]))
run('git config credential.helper "store --file=.git/credentials"')
push = 'git push'
push = "git push"
if push_quietly:
push += ' -q'
push += " -q"
if push_force:
push += ' -f'
push += ' origin master'
push += " -f"
push += " origin master"
run(push, echo=True)
else:
print('No place to deploy.')
print("No place to deploy.")
def ablog_main():
"Ablog Main"
"""
Ablog Main.
"""
if len(sys.argv) == 1:
parser.print_help()
else:

View file

@ -1,75 +1,96 @@
# -*- coding: utf-8 -*-
"""post and postlist directives."""
"""
post and postlist directives.
"""
from __future__ import absolute_import, division, print_function
import io
import os
import sys
from string import Formatter
from datetime import datetime
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from sphinx.locale import _
from sphinx.util.nodes import set_source_info
import ablog
from .blog import Blog, os_path_join, revise_pending_xrefs, slugify
try:
from dateutil.parser import parse as date_parser
except ImportError:
date_parser = None
from docutils import nodes
from sphinx.locale import _
from sphinx.util.nodes import set_source_info
from docutils.parsers.rst import directives, Directive
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
import ablog
from .blog import Blog, slugify, os_path_join, revise_pending_xrefs
text_type = str
if sys.version_info >= (3, 0):
text_type = str
else:
text_type = unicode
__all__ = [
"PostNode",
"PostList",
"UpdateNode",
"PostDirective",
"UpdateDirective",
"PostListDirective",
"purge_posts",
"process_posts",
"process_postlist",
"generate_archive_pages",
"generate_atom_feeds",
"register_posts",
]
__all__ = ['PostNode', 'PostList', 'UpdateNode', 'PostDirective',
'UpdateDirective', 'PostListDirective', 'purge_posts',
'process_posts', 'process_postlist', 'generate_archive_pages',
'generate_atom_feeds', 'register_posts']
class PostNode(nodes.Element):
"""Represent ``post`` directive content and options in document tree."""
"""
Represent ``post`` directive content and options in document tree.
"""
pass
class PostList(nodes.General, nodes.Element):
"""Represent ``postlist`` directive converted to a list of links."""
"""
Represent ``postlist`` directive converted to a list of links.
"""
pass
class UpdateNode(nodes.admonition):
"""Represent ``update`` directive."""
"""
Represent ``update`` directive.
"""
pass
class PostDirective(Directive):
"""Handle ``post`` directives."""
"""
Handle ``post`` directives.
"""
def _split(a): return [s.strip() for s in (a or '').split(',') if s.strip()]
def _split(a):
return [s.strip() for s in (a or "").split(",") if s.strip()]
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
'tags': _split,
'author': _split,
'category': _split,
'location': _split,
'language': _split,
'redirect': _split,
'title': lambda a: a.strip(),
'image': int,
'excerpt': int,
'exclude': directives.flag,
'nocomments': directives.flag,
"tags": _split,
"author": _split,
"category": _split,
"location": _split,
"language": _split,
"redirect": _split,
"title": lambda a: a.strip(),
"image": int,
"excerpt": int,
"exclude": directives.flag,
"nocomments": directives.flag,
}
def run(self):
@ -77,21 +98,20 @@ class PostDirective(Directive):
node = PostNode()
node.document = self.state.document
set_source_info(self, node)
self.state.nested_parse(self.content, self.content_offset,
node, match_titles=1)
self.state.nested_parse(self.content, self.content_offset, node, match_titles=1)
node['date'] = self.arguments[0] if self.arguments else None
node['tags'] = self.options.get('tags', [])
node['author'] = self.options.get('author', [])
node['category'] = self.options.get('category', [])
node['location'] = self.options.get('location', [])
node['language'] = self.options.get('language', [])
node['redirect'] = self.options.get('redirect', [])
node['title'] = self.options.get('title', None)
node['image'] = self.options.get('image', None)
node['excerpt'] = self.options.get('excerpt', None)
node['exclude'] = 'exclude' in self.options
node['nocomments'] = 'nocomments' in self.options
node["date"] = self.arguments[0] if self.arguments else None
node["tags"] = self.options.get("tags", [])
node["author"] = self.options.get("author", [])
node["category"] = self.options.get("category", [])
node["location"] = self.options.get("location", [])
node["language"] = self.options.get("language", [])
node["redirect"] = self.options.get("redirect", [])
node["title"] = self.options.get("title", None)
node["image"] = self.options.get("image", None)
node["excerpt"] = self.options.get("excerpt", None)
node["exclude"] = "exclude" in self.options
node["nocomments"] = "nocomments" in self.options
return [node]
@ -100,30 +120,34 @@ class UpdateDirective(BaseAdmonition):
node_class = UpdateNode
def run(self):
ad = super(UpdateDirective, self).run()
ad[0]['date'] = self.arguments[0] if self.arguments else ''
ad = super().run()
ad[0]["date"] = self.arguments[0] if self.arguments else ""
return ad
class PostListDirective(Directive):
"""Handle ``postlist`` directives."""
"""
Handle ``postlist`` directives.
"""
def _split(a):
return {s.strip() for s in a.split(",")}
def _split(a): return set(s.strip() for s in a.split(','))
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {
'tags': _split,
'author': _split,
'category': _split,
'location': _split,
'language': _split,
'format': lambda a: a.strip(),
'date': lambda a: a.strip(),
'sort': directives.flag,
'excerpts': directives.flag,
'list-style': lambda a: a.strip(),
"tags": _split,
"author": _split,
"category": _split,
"location": _split,
"language": _split,
"format": lambda a: a.strip(),
"date": lambda a: a.strip(),
"sort": directives.flag,
"excerpts": directives.flag,
"list-style": lambda a: a.strip(),
}
def run(self):
@ -131,37 +155,40 @@ class PostListDirective(Directive):
node = PostList()
node.document = self.state.document
set_source_info(self, node)
self.state.nested_parse(self.content, self.content_offset,
node, match_titles=1)
self.state.nested_parse(self.content, self.content_offset, node, match_titles=1)
node['length'] = int(self.arguments[0]) if self.arguments else None
node['tags'] = self.options.get('tags', [])
node['author'] = self.options.get('author', [])
node['category'] = self.options.get('category', [])
node['location'] = self.options.get('location', [])
node['language'] = self.options.get('language', [])
node['format'] = self.options.get('format', '{date} - {title}')
node['date'] = self.options.get('date', None)
node['sort'] = 'sort' in self.options
node['excerpts'] = 'excerpts' in self.options
node['image'] = 'image' in self.options
node['list-style'] = self.options.get('list-style', 'none')
node["length"] = int(self.arguments[0]) if self.arguments else None
node["tags"] = self.options.get("tags", [])
node["author"] = self.options.get("author", [])
node["category"] = self.options.get("category", [])
node["location"] = self.options.get("location", [])
node["language"] = self.options.get("language", [])
node["format"] = self.options.get("format", "{date} - {title}")
node["date"] = self.options.get("date", None)
node["sort"] = "sort" in self.options
node["excerpts"] = "excerpts" in self.options
node["image"] = "image" in self.options
node["list-style"] = self.options.get("list-style", "none")
return [node]
def purge_posts(app, env, docname):
"""Remove post and reference to it from the standard domain when its
document is removed or changed."""
"""
Remove post and reference to it from the standard domain when its document
is removed or changed.
"""
if hasattr(env, 'ablog_posts'):
if hasattr(env, "ablog_posts"):
env.ablog_posts.pop(docname, None)
filename = os.path.split(docname)[1]
env.domains['std'].data['labels'].pop(filename, None)
env.domains["std"].data["labels"].pop(filename, None)
def _get_section_title(section):
"""Return section title as text."""
"""
Return section title as text.
"""
for title in section.traverse(nodes.title):
return title.astext()
@ -171,48 +198,51 @@ def _get_section_title(section):
def _get_update_dates(section, docname, post_date_format):
"""Return list of dates of updates found section."""
"""
Return list of dates of updates found section.
"""
update_nodes = list(section.traverse(UpdateNode))
update_dates = []
for update_node in update_nodes:
try:
update = datetime.strptime(update_node['date'], post_date_format)
update = datetime.strptime(update_node["date"], post_date_format)
except ValueError:
if date_parser:
try:
update = date_parser(update_node['date'])
update = date_parser(update_node["date"])
except ValueError:
raise ValueError('invalid post date in: ' + docname)
raise ValueError("invalid post date in: " + docname)
else:
raise ValueError('invalid post date (%s) in ' % (date) +
docname +
". Expected format: %s" % post_date_format)
raise ValueError(
"invalid post date (%s) in " % (date)
+ docname
+ ". Expected format: %s" % post_date_format
)
# Insert a new title element which contains the `Updated on {date}` logic.
substitute = nodes.title(u'',
'Updated on '
+ update.strftime(post_date_format)
)
substitute = nodes.title("", "Updated on " + update.strftime(post_date_format))
update_node.insert(0, substitute)
update_node['classes'] = ['note', 'update']
update_node["classes"] = ["note", "update"]
update_dates.append(update)
return update_dates
def process_posts(app, doctree):
"""Process posts and map posted document names to post details in the
environment."""
"""
Process posts and map posted document names to post details in the
environment.
"""
env = app.builder.env
if not hasattr(env, 'ablog_posts'):
if not hasattr(env, "ablog_posts"):
env.ablog_posts = {}
post_nodes = list(doctree.traverse(PostNode))
if not post_nodes:
return
post_date_format = app.config['post_date_format']
should_auto_orphan = app.config['post_auto_orphan']
post_date_format = app.config["post_date_format"]
should_auto_orphan = app.config["post_auto_orphan"]
docname = env.docname
if should_auto_orphan:
@ -220,15 +250,15 @@ def process_posts(app, doctree):
# "document isn't included in any toctree" warning is not issued
# We do not simply assign to should_auto_orphan because if auto-orphan
# is false, we still want to respect the per-post :rst:dir`orphan` setting
app.env.metadata[docname]['orphan'] = True
app.env.metadata[docname]["orphan"] = True
blog = Blog(app)
auto_excerpt = blog.post_auto_excerpt
multi_post = len(post_nodes) > 1 or blog.post_always_section
for order, node in enumerate(post_nodes, start=1):
if node['excerpt'] is None:
node['excerpt'] = auto_excerpt
if node["excerpt"] is None:
node["excerpt"] = auto_excerpt
if multi_post:
# section title, and first few paragraphs of the section of post
@ -247,34 +277,34 @@ def process_posts(app, doctree):
# Making sure that post has a title because all post titles
# are needed when resolving post lists in documents
title = node['title'] or _get_section_title(section)
title = node["title"] or _get_section_title(section)
# creating a summary here, before references are resolved
excerpt = []
if node.children:
if node['exclude']:
if node["exclude"]:
node.replace_self([])
else:
node.replace_self(node.children)
for child in node.children:
excerpt.append(child.deepcopy())
elif node['excerpt']:
elif node["excerpt"]:
count = 0
for nod in section.traverse(nodes.paragraph):
excerpt.append(nod.deepcopy())
count += 1
if count >= (node['excerpt'] or 0):
if count >= (node["excerpt"] or 0):
break
node.replace_self([])
else:
node.replace_self([])
nimg = node['image'] or blog.post_auto_image
nimg = node["image"] or blog.post_auto_image
if nimg:
for img, nod in enumerate(section.traverse(nodes.image), start=1):
if img == nimg:
excerpt.append(nod.deepcopy())
break
date = node['date']
date = node["date"]
if date:
try:
date = datetime.strptime(date, post_date_format)
@ -283,11 +313,13 @@ def process_posts(app, doctree):
try:
date = date_parser(date)
except ValueError:
raise ValueError('invalid post date in: ' + docname)
raise ValueError("invalid post date in: " + docname)
else:
raise ValueError('invalid post date (%s) in ' % (date) +
docname +
". Expected format: %s" % post_date_format)
raise ValueError(
"invalid post date (%s) in " % (date)
+ docname
+ ". Expected format: %s" % post_date_format
)
else:
date = None
@ -296,20 +328,20 @@ def process_posts(app, doctree):
# a potential problem here is that there may be files/folders with the
# same name, so issuing a warning when that's the case may be a good idea
folder, label = os.path.split(docname)
if label == 'index':
if label == "index":
folder, label = os.path.split(folder)
if not label:
label = slugify(title)
section_name = ''
section_name = ""
if multi_post and section.parent is not doctree:
section_name = section.attributes['ids'][0]
label += '-' + section_name
section_name = section.attributes["ids"][0]
label += "-" + section_name
else:
# create a reference for the post
# if it is posting the document
# ! this does not work for sections
app.env.domains['std'].data['labels'][label] = (docname, label, title)
app.env.domains["std"].data["labels"][label] = (docname, label, title)
if section.parent is doctree:
section_copy = section[0].deepcopy()
@ -318,29 +350,29 @@ def process_posts(app, doctree):
# multiple posting may result having post nodes
for nn in section_copy.traverse(PostNode):
if nn['exclude']:
if nn["exclude"]:
nn.replace_self([])
else:
nn.replace_self(node.children)
postinfo = {
'docname': docname,
'section': section_name,
'order': order,
'date': date,
'update': max(update_dates + [date]),
'title': title,
'excerpt': excerpt,
'tags': node['tags'],
'author': node['author'],
'category': node['category'],
'location': node['location'],
'language': node['language'],
'redirect': node['redirect'],
'nocomments': node['nocomments'],
'image': node['image'],
'exclude': node['exclude'],
'doctree': section_copy
"docname": docname,
"section": section_name,
"order": order,
"date": date,
"update": max(update_dates + [date]),
"title": title,
"excerpt": excerpt,
"tags": node["tags"],
"author": node["author"],
"category": node["category"],
"location": node["location"],
"language": node["language"],
"redirect": node["redirect"],
"nocomments": node["nocomments"],
"image": node["image"],
"exclude": node["exclude"],
"doctree": section_copy,
}
if docname not in env.ablog_posts:
@ -349,25 +381,28 @@ def process_posts(app, doctree):
# instantiate catalogs and collections here
# so that references are created and no warnings are issued
if app.builder.format == 'html':
stdlabel = env.domains['std'].data['labels']
if app.builder.format == "html":
stdlabel = env.domains["std"].data["labels"]
else:
stdlabel = env.intersphinx_inventory.setdefault('std:label', {})
baseurl = getattr(env.config, 'blog_baseurl').rstrip('/') + '/'
stdlabel = env.intersphinx_inventory.setdefault("std:label", {})
baseurl = getattr(env.config, "blog_baseurl").rstrip("/") + "/"
project, version = env.config.project, text_type(env.config.version)
for key in ['tags', 'author', 'category', 'location', 'language']:
for key in ["tags", "author", "category", "location", "language"]:
catalog = blog.catalogs[key]
for label in postinfo[key]:
coll = catalog[label]
if postinfo['date']:
coll = blog.archive[postinfo['date'].year]
if postinfo["date"]:
coll = blog.archive[postinfo["date"].year]
def process_postlist(app, doctree, docname):
"""Replace `PostList` nodes with lists of posts. Also, register all posts
if they have not been registered yet."""
"""
Replace `PostList` nodes with lists of posts.
Also, register all posts if they have not been registered yet.
"""
blog = Blog(app)
if not blog:
@ -375,7 +410,7 @@ def process_postlist(app, doctree, docname):
for node in doctree.traverse(PostList):
colls = []
for cat in ['tags', 'author', 'category', 'location', 'language']:
for cat in ["tags", "author", "category", "location", "language"]:
for coll in node[cat]:
if coll in blog.catalogs[cat].collections:
colls.append(blog.catalogs[cat].collections[coll])
@ -386,27 +421,24 @@ def process_postlist(app, doctree, docname):
posts = posts & set(coll)
posts = list(posts)
posts.sort(reverse=True)
posts = posts[:node.attributes['length']]
posts = posts[: node.attributes["length"]]
else:
posts = list(blog.recent(node.attributes['length'], docname,
**node.attributes))
posts = list(blog.recent(node.attributes["length"], docname, **node.attributes))
if node.attributes['sort']:
if node.attributes["sort"]:
posts.sort() # in reverse chronological order, so no reverse=True
fmts = list(Formatter().parse(node.attributes['format']))
not_in = set(['date', 'title', 'author', 'location', 'language',
'category', 'tags', None])
fmts = list(Formatter().parse(node.attributes["format"]))
not_in = {"date", "title", "author", "location", "language", "category", "tags", None}
for text, key, __, __ in fmts:
if key not in not_in:
raise KeyError('{} is not recognized in postlist format'
.format(key))
raise KeyError(f"{key} is not recognized in postlist format")
excerpts = node.attributes['excerpts']
date_format = node.attributes['date'] or _(blog.post_date_format_short)
excerpts = node.attributes["excerpts"]
date_format = node.attributes["date"] or _(blog.post_date_format_short)
bl = nodes.bullet_list()
bl.attributes['classes'].append('postlist-style-' + node['list-style'])
bl.attributes['classes'].append('postlist')
bl.attributes["classes"].append("postlist-style-" + node["list-style"])
bl.attributes["classes"].append("postlist")
for post in posts:
bli = nodes.list_item()
bl.append(bli)
@ -418,30 +450,30 @@ def process_postlist(app, doctree, docname):
par.append(nodes.Text(text))
if key is None:
continue
if key == 'date':
if key == "date":
par.append(nodes.Text(post.date.strftime(date_format)))
else:
if key == 'title':
if key == "title":
items = [post]
else:
items = getattr(post, key)
for i, item in enumerate(items, start=1):
if key == 'title':
if key == "title":
ref = nodes.reference()
ref['refuri'] = app.builder.get_relative_uri(docname, item.docname)
ref['ids'] = []
ref['backrefs'] = []
ref['dupnames'] = []
ref['classes'] = []
ref['names'] = []
ref['internal'] = True
ref["refuri"] = app.builder.get_relative_uri(docname, item.docname)
ref["ids"] = []
ref["backrefs"] = []
ref["dupnames"] = []
ref["classes"] = []
ref["names"] = []
ref["internal"] = True
ref.append(nodes.Text(text_type(item)))
else:
ref = _missing_reference(app, item.xref, docname)
par.append(ref)
if i < len(items):
par.append(nodes.Text(', '))
par.append(nodes.Text(", "))
if excerpts and post.excerpt:
for enode in post.excerpt:
enode = enode.deepcopy()
@ -455,9 +487,8 @@ def process_postlist(app, doctree, docname):
def missing_reference(app, env, node, contnode):
target = node['reftarget']
return _missing_reference(app, target, node.get('refdoc'),
contnode, node.get('refexplicit'))
target = node["reftarget"]
return _missing_reference(app, target, node.get("refdoc"), contnode, node.get("refexplicit"))
def _missing_reference(app, target, refdoc, contnode=None, refexplicit=False):
@ -466,15 +497,14 @@ def _missing_reference(app, target, refdoc, contnode=None, refexplicit=False):
if target in blog.references:
docname, dispname = blog.references[target]
if 'html' in app.builder.name:
if "html" in app.builder.name:
internal = True
uri = app.builder.get_relative_uri(refdoc, docname)
else:
internal = False
uri = blog.blog_baseurl + '/' + docname
uri = blog.blog_baseurl + "/" + docname
newnode = nodes.reference('', '', internal=internal, refuri=uri,
reftitle=dispname)
newnode = nodes.reference("", "", internal=internal, refuri=uri, reftitle=dispname)
if refexplicit:
newnode.append(contnode)
else:
@ -486,8 +516,10 @@ def _missing_reference(app, target, refdoc, contnode=None, refexplicit=False):
def generate_archive_pages(app):
"""Generate archive pages for all posts, categories, tags, authors, and
drafts."""
"""
Generate archive pages for all posts, categories, tags, authors, and
drafts.
"""
if not ablog.builder_support(app):
return
@ -495,80 +527,77 @@ def generate_archive_pages(app):
blog = Blog(app)
for post in blog.posts:
for redirect in post.redirect:
yield (redirect, {'redirect': post.docname, 'post': post},
'redirect.html')
yield (redirect, {"redirect": post.docname, "post": post}, "redirect.html")
found_docs = app.env.found_docs
atom_feed = bool(blog.blog_baseurl)
feed_archives = blog.blog_feed_archives
blog_path = blog.blog_path
for title, header, catalog in [
(_('Authors'), _('Posts by'), blog.author),
(_('Locations'), _('Posts from'), blog.location),
(_('Languages'), _('Posts in'), blog.language),
(_('Categories'), _('Posts in'), blog.category),
(_('All posts'), _('Posted in'), blog.archive),
(_('Tags'), _('Posts tagged'), blog.tags), ]:
(_("Authors"), _("Posts by"), blog.author),
(_("Locations"), _("Posts from"), blog.location),
(_("Languages"), _("Posts in"), blog.language),
(_("Categories"), _("Posts in"), blog.category),
(_("All posts"), _("Posted in"), blog.archive),
(_("Tags"), _("Posts tagged"), blog.tags),
]:
if not catalog:
continue
context = {
'parents': [],
'title': title,
'header': header,
'catalog': catalog,
'summary': True,
"parents": [],
"title": title,
"header": header,
"catalog": catalog,
"summary": True,
}
if catalog.docname not in found_docs:
yield (catalog.docname, context, 'catalog.html')
yield (catalog.docname, context, "catalog.html")
for collection in catalog:
if not collection:
continue
context = {
'parents': [],
'title': u'{0} {1}'.format(header, collection),
'header': header,
'collection': collection,
'summary': True,
'feed_path': collection.path if feed_archives else blog_path,
'archive_feed': atom_feed and feed_archives
"parents": [],
"title": f"{header} {collection}",
"header": header,
"collection": collection,
"summary": True,
"feed_path": collection.path if feed_archives else blog_path,
"archive_feed": atom_feed and feed_archives,
}
context['feed_title'] = context['title']
context["feed_title"] = context["title"]
if collection.docname not in found_docs:
yield (collection.docname, context, 'collection.html')
yield (collection.docname, context, "collection.html")
#ppp = 5
# ppp = 5
# for page, i in enumerate(range(0, len(blog.posts), ppp)):
if 1:
context = {
'parents': [],
'title': _('All Posts'),
'header': _('All'),
'collection': blog.posts,
'summary': True,
'atom_feed': atom_feed,
'feed_path': blog.blog_path,
"parents": [],
"title": _("All Posts"),
"header": _("All"),
"collection": blog.posts,
"summary": True,
"atom_feed": atom_feed,
"feed_path": blog.blog_path,
}
docname = blog.posts.docname
# if page:
# docname += '/' + str(page)
yield (docname, context, 'collection.html')
yield (docname, context, "collection.html")
context = {
'parents': [],
'title': _('Drafts'),
'collection': blog.drafts,
'summary': True,
}
yield (blog.drafts.docname, context, 'collection.html')
context = {"parents": [], "title": _("Drafts"), "collection": blog.drafts, "summary": True}
yield (blog.drafts.docname, context, "collection.html")
def generate_atom_feeds(app):
"""Generate archive pages for all posts, categories, tags, authors, and
drafts."""
"""
Generate archive pages for all posts, categories, tags, authors, and
drafts.
"""
if not ablog.builder_support(app):
return
@ -585,23 +614,28 @@ def generate_atom_feeds(app):
app.warn("werkzeug is not found, continue without atom feeds support.")
return
feed_path = os.path.join(app.builder.outdir, blog.blog_path, 'atom.xml')
feed_path = os.path.join(app.builder.outdir, blog.blog_path, "atom.xml")
feeds = [(blog.posts,
blog.blog_path,
feed_path,
blog.blog_title,
os_path_join(url, blog.blog_path, 'atom.xml'))]
feeds = [
(
blog.posts,
blog.blog_path,
feed_path,
blog.blog_title,
os_path_join(url, blog.blog_path, "atom.xml"),
)
]
if blog.blog_feed_archives:
for header, catalog in [
(_('Posts by'), blog.author),
(_('Posts from'), blog.location),
(_('Posts in'), blog.language),
(_('Posts in'), blog.category),
(_('Posted in'), blog.archive),
(_('Posts tagged'), blog.tags), ]:
(_("Posts by"), blog.author),
(_("Posts from"), blog.location),
(_("Posts in"), blog.language),
(_("Posts in"), blog.category),
(_("Posted in"), blog.archive),
(_("Posts tagged"), blog.tags),
]:
for coll in catalog:
# skip collections containing only drafts
@ -611,12 +645,15 @@ def generate_atom_feeds(app):
if not os.path.isdir(folder):
os.makedirs(folder)
feeds.append((coll,
coll.path,
os.path.join(folder, 'atom.xml'),
blog.blog_title + u' - ' + header +
u' ' + text_type(coll),
os_path_join(url, coll.path, 'atom.xml')))
feeds.append(
(
coll,
coll.path,
os.path.join(folder, "atom.xml"),
blog.blog_title + " - " + header + " " + text_type(coll),
os_path_join(url, coll.path, "atom.xml"),
)
)
# Config options
feed_length = blog.blog_feed_length
@ -624,42 +661,45 @@ def generate_atom_feeds(app):
for feed_posts, pagename, feed_path, feed_title, feed_url in feeds:
feed = AtomFeed(feed_title,
title_type='text',
url=url,
feed_url=feed_url,
subtitle=blog.blog_feed_subtitle,
generator=('ABlog', 'http://ablog.readthedocs.org',
ablog.__version__))
feed = AtomFeed(
feed_title,
title_type="text",
url=url,
feed_url=feed_url,
subtitle=blog.blog_feed_subtitle,
generator=("ABlog", "https://ablog.readthedocs.org", ablog.__version__),
)
for i, post in enumerate(feed_posts):
if feed_length and i == feed_length:
break
post_url = os_path_join(
url, app.builder.get_target_uri(post.docname))
post_url = os_path_join(url, app.builder.get_target_uri(post.docname))
if post.section:
post_url += '#' + post.section
post_url += "#" + post.section
if blog.blog_feed_titles:
content = None
else:
content = post.to_html(pagename, fulltext=feed_fulltext)
feed.add(post.title,
content=content,
title_type='text',
content_type='html',
author=', '.join(a.name for a in post.author),
url=post_url,
id=post_url,
updated=post.update, published=post.date)
feed.add(
post.title,
content=content,
title_type="text",
content_type="html",
author=", ".join(a.name for a in post.author),
url=post_url,
id=post_url,
updated=post.update,
published=post.date,
)
parent_dir = os.path.dirname(feed_path)
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
with io.open(feed_path, 'w', encoding='utf-8') as out:
with open(feed_path, "w", encoding="utf-8") as out:
feed_str = feed.to_string()
try:
out.write(feed_str.encode('utf-8'))
out.write(feed_str.encode("utf-8"))
except TypeError:
out.write(feed_str)
@ -670,9 +710,11 @@ def generate_atom_feeds(app):
def register_posts(app):
"""Register posts found in the Sphinx build environment."""
"""
Register posts found in the Sphinx build environment.
"""
blog = Blog(app)
for docname, posts in getattr(app.env, 'ablog_posts', {}).items():
for docname, posts in getattr(app.env, "ablog_posts", {}).items():
for postinfo in posts:
blog.register(docname, postinfo)

View file

@ -1,46 +1,25 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
import time
import datetime
from distutils.version import LooseVersion
from os import path
from io import open
from docutils.utils import column_width
from os import path
from textwrap import wrap
from sphinx import __version__
from docutils.utils import column_width
from pkg_resources import DistributionNotFound, get_distribution
from sphinx.cmd.quickstart import do_prompt, ensuredir, is_path
from sphinx.util import texescape
from sphinx.util.console import bold, nocolor, color_terminal
from sphinx.util.console import bold, color_terminal, nocolor
from sphinx.util.osutil import make_filename
SPHINX_LT_17 = LooseVersion(__version__) < LooseVersion('1.7')
from .version import version as __version__
if SPHINX_LT_17:
from sphinx.quickstart import do_prompt, is_path, ensuredir
else:
from sphinx.cmd.quickstart import do_prompt, is_path, ensuredir
w = lambda t, ls=80: "\n".join(wrap(t, ls))
from ablog import __version__
__all__ = ["generate", "ask_user", "ablog_start"]
if sys.version_info >= (3, 0):
text_type = str
else:
text_type = unicode
w = lambda t, ls=80: '\n'.join(wrap(t, ls))
__all__ = ['generate', 'ask_user', 'ablog_start']
ABLOG_CONF = u''
# prevents that the file is checked for being written in Python 2.x syntax
if sys.version_info >= (3, 0):
ABLOG_CONF = u'#!/usr/bin/env python3\n'
ABLOG_CONF += u'''# -*- coding: utf-8 -*-
ABLOG_CONF = "#!/usr/bin/env python3\n"
ABLOG_CONF += """# -*- coding: utf-8 -*-
# %(project)s build configuration file, created by
# `ablog start` on %(now)s.
@ -94,7 +73,7 @@ blog_authors = {
# keys should be used in ``post`` directive to refer to the locations.
# Default is ``{}``.
#blog_locations = {
# 'Earth': ('The Blue Planet', 'http://en.wikipedia.org/wiki/Earth),
# 'Earth': ('The Blue Planet', 'https://en.wikipedia.org/wiki/Earth),
#}
@ -383,9 +362,9 @@ html_static_path = ['%(dot)sstatic']
htmlhelp_basename = '%(project_fn)sdoc'
'''
"""
ABLOG_INDEX = u'''
ABLOG_INDEX = """
.. %(project)s index file, created by `ablog start` on %(now)s.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
@ -414,9 +393,9 @@ Here is a list of most recent posts:
about.rst
'''
"""
ABLOG_ABOUT = u'''
ABLOG_ABOUT = """
.. _about:
About %(author)s
@ -424,9 +403,9 @@ About %(author)s
The world wants to know more about you.
'''
"""
ABLOG_POST = u'''
ABLOG_POST = """
.. %(project)s post example, created by `ablog start` on %(post_date)s.
.. post:: %(post_date)s
@ -439,64 +418,74 @@ First Post
World, hello again! This very first paragraph of the post will be used
as excerpt in archives and feeds. Find out how to control how much is shown
in `Post Excerpts and Images
<http://ablog.readthedocs.org/manual/post-excerpts-and-images/>`_. Remember
<https://ablog.readthedocs.org/manual/post-excerpts-and-images/>`_. Remember
that you can refer to posts by file name, e.g. ``:ref:`first-post``` results
in :ref:`first-post`. Find out more at `Cross-Referencing Blog Pages
<http://ablog.readthedocs.org/manual/cross-referencing-blog-pages/>`_.
'''
<https://ablog.readthedocs.org/manual/cross-referencing-blog-pages/>`_.
"""
CONF_DEFAULTS = {
'sep': False,
'dot': '_',
'language': None,
'suffix': '.rst',
'master': 'index',
'makefile': False,
'batchfile': False,
'epub': False,
'ext_todo': False,
"sep": False,
"dot": "_",
"language": None,
"suffix": ".rst",
"master": "index",
"makefile": False,
"batchfile": False,
"epub": False,
"ext_todo": False,
}
def generate(d, overwrite=True, silent=False):
'''Borrowed from Sphinx 1.3b3'''
"""
Borrowed from Sphinx 1.3b3.
"""
"""Generate project based on values in *d*."""
texescape.init()
if 'mastertoctree' not in d:
d['mastertoctree'] = ''
if 'mastertocmaxdepth' not in d:
d['mastertocmaxdepth'] = 2
if "mastertoctree" not in d:
d["mastertoctree"] = ""
if "mastertocmaxdepth" not in d:
d["mastertocmaxdepth"] = 2
d['project_fn'] = make_filename(d['project'])
d['project_manpage'] = d['project_fn'].lower()
d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '='
d["project_fn"] = make_filename(d["project"])
d["project_manpage"] = d["project_fn"].lower()
d["now"] = time.asctime()
d["project_underline"] = column_width(d["project"]) * "="
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
d['author_texescaped'] = text_type(d['author']
).translate(texescape.tex_escape_map)
d['project_doc'] = d['project'] + ' Documentation'
d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation'
).translate(texescape.tex_escape_map)
d["copyright"] = time.strftime("%Y") + ", " + d["author"]
d["author_texescaped"] = str(d["author"]).translate(texescape.tex_escape_map)
d["project_doc"] = d["project"] + " Documentation"
d["project_doc_texescaped"] = str(d["project"] + " Documentation").translate(
texescape.tex_escape_map
)
# escape backslashes and single quotes in strings that are put into
# a Python string literal
for key in ('project', 'project_doc', 'project_doc_texescaped',
'author', 'author_texescaped', 'copyright',
'version', 'release', 'master'):
d[key + '_str'] = d[key].replace('\\', '\\\\').replace("'", "\\'")
for key in (
"project",
"project_doc",
"project_doc_texescaped",
"author",
"author_texescaped",
"copyright",
"version",
"release",
"master",
):
d[key + "_str"] = d[key].replace("\\", "\\\\").replace("'", "\\'")
if not path.isdir(d['path']):
ensuredir(d['path'])
if not path.isdir(d["path"]):
ensuredir(d["path"])
srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
srcdir = d["sep"] and path.join(d["path"], "source") or d["path"]
ensuredir(srcdir)
d['exclude_patterns'] = ''
d["exclude_patterns"] = ""
# TODO: Work if we want this.
# if d['sep']:
# builddir = path.join(d['path'], 'build')
@ -505,41 +494,42 @@ def generate(d, overwrite=True, silent=False):
# builddir = path.join(srcdir, d['dot'] + 'build')
# d['exclude_patterns'] = repr(d['dot'] + 'build')
# ensuredir(builddir)
ensuredir(path.join(srcdir, d['dot'] + 'templates'))
ensuredir(path.join(srcdir, d['dot'] + 'static'))
ensuredir(path.join(srcdir, d["dot"] + "templates"))
ensuredir(path.join(srcdir, d["dot"] + "static"))
def write_file(fpath, content, newline=None):
if overwrite or not path.isfile(fpath):
print('Creating file %s.' % fpath)
f = open(fpath, 'wt', encoding='utf-8', newline=newline)
print("Creating file %s." % fpath)
f = open(fpath, "wt", encoding="utf-8", newline=newline)
try:
f.write(content)
finally:
f.close()
else:
print('File %s already exists, skipping.' % fpath)
print("File %s already exists, skipping." % fpath)
conf_text = ABLOG_CONF % d
write_file(path.join(srcdir, 'conf.py'), conf_text)
write_file(path.join(srcdir, "conf.py"), conf_text)
masterfile = path.join(srcdir, d['master'] + d['suffix'])
masterfile = path.join(srcdir, d["master"] + d["suffix"])
write_file(masterfile, ABLOG_INDEX % d)
about = path.join(srcdir, 'about' + d['suffix'])
about = path.join(srcdir, "about" + d["suffix"])
write_file(about, ABLOG_ABOUT % d)
d['post_date'] = datetime.datetime.today().strftime('%b %d, %Y')
firstpost = path.join(srcdir, 'first-post' + d['suffix'])
d["post_date"] = datetime.datetime.today().strftime("%b %d, %Y")
firstpost = path.join(srcdir, "first-post" + d["suffix"])
write_file(firstpost, ABLOG_POST % d)
if silent:
return
print(bold('Finished: An initial directory structure has been created.'))
print(bold("Finished: An initial directory structure has been created."))
def ask_user(d):
"""Borrowed from Sphinx 1.3b3
"""
Borrowed from Sphinx 1.3b3.
Ask the user for quickstart values missing from *d*.
@ -554,84 +544,89 @@ def ask_user(d):
d.update(CONF_DEFAULTS)
print(bold('Welcome to the ABlog %s quick start utility.') % __version__)
print('')
print(w('Please enter values for the following settings (just press Enter '
'to accept a default value, if one is given in brackets).'))
print(bold("Welcome to the ABlog %s quick start utility.") % __version__)
print("")
print(
w(
"Please enter values for the following settings (just press Enter "
"to accept a default value, if one is given in brackets)."
)
)
print('')
if 'path' in d:
print(bold('Selected root path: %s' % d['path']))
print("")
if "path" in d:
print(bold("Selected root path: %s" % d["path"]))
else:
print('Enter the root path for your blog project.')
if SPHINX_LT_17:
do_prompt(d, 'path', 'Root path for your project', '.', is_path)
else:
d['path'] = do_prompt('Root path for your project', '.', is_path)
print("Enter the root path for your blog project.")
d["path"] = do_prompt("Root path for your project", ".", is_path)
while path.isfile(path.join(d['path'], 'conf.py')) or \
path.isfile(path.join(d['path'], 'source', 'conf.py')):
print('')
print(bold(w('Error: an existing conf.py has been found in the '
'selected root path.')))
print('ablog start will not overwrite existing Sphinx projects.')
print('')
if SPHINX_LT_17:
do_prompt(d, 'path','Please enter a new root path (or just Enter to exit)', '', is_path)
else:
d['path'] = do_prompt('Please enter a new root path (or just Enter to exit)', '', is_path)
if not d['path']:
while path.isfile(path.join(d["path"], "conf.py")) or path.isfile(
path.join(d["path"], "source", "conf.py")
):
print("")
print(bold(w("Error: an existing conf.py has been found in the " "selected root path.")))
print("ablog start will not overwrite existing Sphinx projects.")
print("")
d["path"] = do_prompt("Please enter a new root path (or just Enter to exit)", "", is_path)
if not d["path"]:
sys.exit(1)
if 'project' not in d:
print('')
print(w('Project name will occur in several places in the website, '
'including blog archive pages and atom feeds. Later, you can '
'set separate names for different parts of the website in '
'configuration file.'))
if SPHINX_LT_17:
do_prompt(d, 'project', 'Project name')
else:
d['project'] = do_prompt('Project name')
if "project" not in d:
print("")
print(
w(
"Project name will occur in several places in the website, "
"including blog archive pages and atom feeds. Later, you can "
"set separate names for different parts of the website in "
"configuration file."
)
)
d["project"] = do_prompt("Project name")
if 'author' not in d:
print(w('This of author as the copyright holder of the content. '
'If your blog has multiple authors, you might want to enter '
'a team name here. Later, you can specify individual authors '
'using `blog_authors` configuration option.'))
if SPHINX_LT_17:
do_prompt(d, 'author', 'Author name(s)')
else:
d['author'] = do_prompt('Author name(s)')
if "author" not in d:
print(
w(
"This of author as the copyright holder of the content. "
"If your blog has multiple authors, you might want to enter "
"a team name here. Later, you can specify individual authors "
"using `blog_authors` configuration option."
)
)
d["author"] = do_prompt("Author name(s)")
d['release'] = d['version'] = ''
d["release"] = d["version"] = ""
while path.isfile(path.join(d['path'], d['master'] + d['suffix'])) or \
path.isfile(path.join(d['path'], 'source', d['master'] + d['suffix'])):
print('')
print(bold(w('Error: the master file %s has already been found in the '
'selected root path.' % (d['master'] + d['suffix']))))
print('ablog-start will not overwrite the existing file.')
print('')
if SPHINX_LT_17:
do_prompt(d, 'master', w('Please enter a new file name, or rename the '
'existing file and press Enter'), d['master'])
else:
d['master'] = do_prompt(w('Please enter a new file name, or rename the '
'existing file and press Enter'), d['master'])
while path.isfile(path.join(d["path"], d["master"] + d["suffix"])) or path.isfile(
path.join(d["path"], "source", d["master"] + d["suffix"])
):
print("")
print(
bold(
w(
"Error: the master file %s has already been found in the "
"selected root path." % (d["master"] + d["suffix"])
)
)
)
print("ablog-start will not overwrite the existing file.")
print("")
d["master"] = do_prompt(
w("Please enter a new file name, or rename the " "existing file and press Enter"),
d["master"],
)
if 'blog_baseurl' not in d:
print('')
print(w('Please enter the base URL for your project. Blog feeds will '
'be generated relative to this URL. If you don\'t have one yet, '
'you can set it in configuration file later.'))
if SPHINX_LT_17:
# APR: Not sure how do_prompt() worked prior to Sphinx 1.7; likely to be `lambda x: x` here too
do_prompt(d, 'blog_baseurl', 'Base URL for your project', None, lambda x: True)
else:
d['blog_baseurl'] = do_prompt('Base URL for your project', None, lambda x: x)
if "blog_baseurl" not in d:
print("")
print(
w(
"Please enter the base URL for your project. Blog feeds will "
"be generated relative to this URL. If you don't have one yet, "
"you can set it in configuration file later."
)
)
d["blog_baseurl"] = do_prompt("Base URL for your project", None, lambda x: x)
print('')
print("")
def ablog_start(**kwargs):
@ -643,8 +638,8 @@ def ablog_start(**kwargs):
try:
ask_user(d)
except (KeyboardInterrupt, EOFError):
print('')
print('[Interrupted.]')
print("")
print("[Interrupted.]")
return
generate(d)

View file

@ -46,8 +46,8 @@
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="http://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript>
<a href="http://disqus.com" class="dsq-brlink">comments powered by <span class="logo-disqus">Disqus</span></a>
<noscript>Please enable JavaScript to view the <a href="https://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript>
<a href="https://disqus.com" class="dsq-brlink">comments powered by <span class="logo-disqus">Disqus</span></a>
</div>
{% endif %}
</div>

7
ablog/version.py Normal file
View file

@ -0,0 +1,7 @@
# This file is for compatibility with astropy_helpers
from pkg_resources import DistributionNotFound, get_distribution
try:
version = get_distribution("ablog").version
except DistributionNotFound:
version = "unknown.dev"

View file

@ -1,23 +1,9 @@
# AppVeyor.com is a Continuous Integration service to build and run tests under Windows
environment:
global:
PYTHON: "C:\\conda"
MINICONDA_VERSION: "latest"
CMD_IN_ENV: "cmd /E:ON /V:ON /C .\\ci-helpers\\appveyor\\windows_sdk.cmd"
PYTHON_ARCH: "64" # needs to be set for CMD_IN_ENV to succeed. If a mix
# of 32 bit and 64 bit builds are needed, move this
# to the matrix section.
CONDA_CHANNELS: "conda-forge"
CONDA_DEPENDENCIES: "sphinx werkzeug alabaster invoke graphviz nbsphinx"
PIP_DEPENDENCIES: "sphinx-automodapi"
matrix:
- PYTHON_VERSION: "2.7"
- PYTHON_VERSION: "3.5"
- PYTHON_VERSION: "3.6"
- PYTHON_VERSION: "3.7"
matrix:
- PY_MAJOR_VER: 3
PYTHON_ARCH: "x86_64"
build: false
@ -36,12 +22,16 @@ install:
cinst graphviz --no-progress
}
- ps: $env:Path += ";C:\Program Files\Pandoc\"
- git clone git://github.com/astropy/ci-helpers.git
- powershell ci-helpers/appveyor/install-miniconda.ps1
- SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PATH%
- activate test
- pip install -e .
- ps: Start-FileDownload "https://repo.continuum.io/miniconda/Miniconda$env:PY_MAJOR_VER-latest-Windows-$env:PYTHON_ARCH.exe" C:\Miniconda.exe; echo "Finished downloading miniconda"
- cmd: C:\Miniconda.exe /S /D=C:\Py
- cmd: SET PATH=C:\Py;C:\Py\Scripts;C:\Py\Library\bin;%PATH%
- cmd: conda config --set always_yes yes
- cmd: conda update conda --quiet
- cmd: conda config --add channels conda-forge
- cmd: conda config --set channel_priority strict
- cmd: conda install sphinx werkzeug alabaster invoke graphviz nbsphinx --quiet
- cmd: python -m pip install --upgrade pip
- cmd: pip install -e .[all]
test_script:
- python setup.py build_sphinx

19
docs/Makefile Normal file
View file

@ -0,0 +1,19 @@
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

16
docs/_static/a.svg vendored
View file

@ -1,12 +1,12 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:dc="https://purl.org/dc/elements/1.1/"
xmlns:cc="https://creativecommons.org/ns#"
xmlns:rdf="https://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="https://www.w3.org/2000/svg"
xmlns="https://www.w3.org/2000/svg"
xmlns:sodipodi="https://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="https://www.inkscape.org/namespaces/inkscape"
height="850"
width="850"
id="svg2"
@ -20,7 +20,7 @@
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
rdf:resource="https://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>

Before

Width:  |  Height:  |  Size: 3.2 KiB

After

Width:  |  Height:  |  Size: 3.2 KiB

View file

@ -1,12 +1,12 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:dc="https://purl.org/dc/elements/1.1/"
xmlns:cc="https://creativecommons.org/ns#"
xmlns:rdf="https://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="https://www.w3.org/2000/svg"
xmlns="https://www.w3.org/2000/svg"
xmlns:sodipodi="https://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="https://www.inkscape.org/namespaces/inkscape"
height="1200"
width="3600"
id="svg4026"
@ -23,7 +23,7 @@
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
rdf:resource="https://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>

Before

Width:  |  Height:  |  Size: 28 KiB

After

Width:  |  Height:  |  Size: 28 KiB

View file

@ -1,6 +1,6 @@
/*!
* Font Awesome 4.2.0 by @davegandy - http://fontawesome.io - @fontawesome
* License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
* Font Awesome 4.2.0 by @davegandy - https://fontawesome.io - @fontawesome
* License - https://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
*/
/* FONT PATH
* -------------------------- */

View file

@ -1,6 +1,6 @@
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
<svg xmlns="http://www.w3.org/2000/svg">
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "https://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
<svg xmlns="https://www.w3.org/2000/svg">
<metadata></metadata>
<defs>
<font id="fontawesomeregular" horiz-adv-x="1536" >
@ -517,4 +517,4 @@
<glyph unicode="&#xf20e;" horiz-adv-x="1792" />
<glyph unicode="&#xf500;" horiz-adv-x="1792" />
</font>
</defs></svg>
</defs></svg>

Before

Width:  |  Height:  |  Size: 280 KiB

After

Width:  |  Height:  |  Size: 280 KiB

View file

@ -1,75 +1,85 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import re
import sys
import alabaster
from pkg_resources import get_distribution
from sphinx import addnodes
import ablog
ablog_builder = 'dirhtml'
ablog_website = '_website'
ablog_builder = "dirhtml"
ablog_website = "_website"
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinx.ext.extlinks',
'sphinx_automodapi.automodapi',
'alabaster',
'nbsphinx',
'ablog'
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.ifconfig",
"sphinx.ext.extlinks",
"sphinx_automodapi.automodapi",
"alabaster",
"nbsphinx",
"ablog",
]
#language = 'de'
#language = 'tr'
# language = 'de'
# language = 'tr'
# PROJECT
version = release = ablog.__version__
project = u'ABlog'
copyright = u'2014-2018, ABlog Team'
master_doc = 'index'
source_suffix = '.rst'
exclude_patterns = ['_build']
versionmod = get_distribution('ablog')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = '.'.join(versionmod.version.split('.')[:3])
# The full version, including alpha/beta/rc tags.
release = versionmod.version.split('+')[0]
# Is this version a development release
is_development = '.dev' in release
project = "ABlog"
copyright = "2014-2019, ABlog Team"
master_doc = "index"
source_suffix = ".rst"
exclude_patterns = ["_build"]
# HTML OUTPUT
html_title = "ABlog"
html_static_path = ['_static']
html_static_path = ["_static"]
html_use_index = True
html_domain_indices = False
html_show_sourcelink = True
html_favicon = '_static/ablog.ico'
html_favicon = "_static/ablog.ico"
# ABLOG
templates_path = [ablog.get_html_templates_path()]
blog_title = 'ABlog'
blog_baseurl = 'http://ablog.readthedocs.org'
blog_title = "ABlog"
blog_baseurl = "https://ablog.readthedocs.org"
blog_locations = {
'Pittsburgh': ('Pittsburgh, PA', 'http://en.wikipedia.org/wiki/Pittsburgh'),
'SF': ('San Francisco, CA', 'http://en.wikipedia.org/wiki/San_Francisco'),
'Denizli': ('Denizli, Turkey', 'http://en.wikipedia.org/wiki/Denizli'),
"Pittsburgh": ("Pittsburgh, PA", "https://en.wikipedia.org/wiki/Pittsburgh"),
"SF": ("San Francisco, CA", "https://en.wikipedia.org/wiki/San_Francisco"),
"Denizli": ("Denizli, Turkey", "https://en.wikipedia.org/wiki/Denizli"),
}
blog_languages = {
'en': ('English', None),
}
blog_default_language = 'en'
blog_languages = {"en": ("English", None)}
blog_default_language = "en"
blog_authors = {
'Ahmet': ('Ahmet Bakan', 'http://ahmetbakan.com'),
'Luc': ('Luc Saffre', 'http://saffre-rumma.net/luc/'),
'Mehmet': (u'Mehmet Gerçeker', 'https://github.com/mehmetg'),
"Ahmet": ("Ahmet Bakan", "https://ahmetbakan.com"),
"Luc": ("Luc Saffre", "https://saffre-rumma.net/luc/"),
"Mehmet": ("Mehmet Gerçeker", "https://github.com/mehmetg"),
}
blog_feed_archives = True
blog_feed_fulltext = True
blog_feed_length = None
disqus_shortname = 'ablogforsphinx'
disqus_shortname = "ablogforsphinx"
disqus_pages = True
fontawesome_css_file = 'css/font-awesome.css'
fontawesome_css_file = "css/font-awesome.css"
# blog_feed_titles = False
# blog_archive_titles = False
@ -77,53 +87,55 @@ fontawesome_css_file = 'css/font-awesome.css'
# THEME
html_style = 'alabaster.css'
html_theme = 'alabaster'
html_style = "alabaster.css"
html_theme = "alabaster"
html_sidebars = {
'**': ['about.html',
'postcard.html', 'recentposts.html',
'tagcloud.html', 'categories.html',
'archives.html',
'searchbox.html']
"**": [
"about.html",
"postcard.html",
"recentposts.html",
"tagcloud.html",
"categories.html",
"archives.html",
"searchbox.html",
]
}
html_theme_path = [alabaster.get_path()]
html_theme_options = {
'travis_button': True,
'github_user': 'sunpy',
'github_repo': 'ablog',
'description': 'ABlog for blogging with Sphinx',
'logo': 'ablog.png',
"travis_button": True,
"github_user": "sunpy",
"github_repo": "ablog",
"description": "ABlog for blogging with Sphinx",
"logo": "ablog.png",
}
# SPHINX
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'sphinx': ('http://sphinx-doc.org/', None)
"python": ("https://docs.python.org/", None),
"sphinx": ("http://www.sphinx-doc.org/en/latest/", None),
}
extlinks = {
'wiki': ('http://en.wikipedia.org/wiki/%s', ''),
'issue': ('https://github.com/sunpy/ablog/issues/%s', 'issue '),
'pull': ('https://github.com/sunpy/ablog/pull/%s', 'pull request '),
"wiki": ("https://en.wikipedia.org/wiki/%s", ""),
"issue": ("https://github.com/sunpy/ablog/issues/%s", "issue "),
"pull": ("https://github.com/sunpy/ablog/pull/%s", "pull request "),
}
exclude_patterns = ['docs/manual/.ipynb_checkpoints/*']
exclude_patterns = ["docs/manual/.ipynb_checkpoints/*"]
rst_epilog = '''
rst_epilog = """
.. _Sphinx: http://sphinx-doc.org/
.. _Python: http://python.org
.. _Disqus: http://disqus.com/
.. _Python: https://python.org
.. _Disqus: https://disqus.com/
.. _GitHub: https://github.com/sunpy/ablog
.. _PyPI: https://pypi.python.org/pypi/ablog
.. _Read The Docs: https://readthedocs.org/
.. _Alabaster: https://github.com/bitprophet/alabaster
'''
import re
from sphinx import addnodes
"""
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
event_sig_re = re.compile(r"([a-zA-Z-]+)\s*\((.*)\)")
def parse_event(env, sig, signode):
@ -134,7 +146,7 @@ def parse_event(env, sig, signode):
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
for arg in args.split(","):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
@ -144,11 +156,13 @@ def parse_event(env, sig, signode):
def setup(app):
from sphinx.ext.autodoc import cut_lines
from sphinx.util.docfields import GroupedField
app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_object_type('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')
fdesc = GroupedField('parameter', label='Parameters',
names=['param'], can_collapse=True)
app.add_object_type('event', 'event', 'pair: %s; event', parse_event,
doc_field_types=[fdesc])
app.connect("autodoc-process-docstring", cut_lines(4, what=["module"]))
app.add_object_type(
"confval",
"confval",
objname="configuration value",
indextemplate="pair: %s; configuration value",
)
fdesc = GroupedField("parameter", label="Parameters", names=["param"], can_collapse=True)
app.add_object_type("event", "event", "pair: %s; event", parse_event, doc_field_types=[fdesc])

View file

@ -11,12 +11,12 @@ website project into a full-fledged blog with:
* `Font-Awesome integration`_
* `Easy GitHub Pages deploys`_
.. _Atom feeds: http://ablog.readthedocs.org/blog/atom.xml
.. _Archive pages: http://ablog.readthedocs.org/blog/
.. _Blog sidebars: http://ablog.readthedocs.org/manual/ablog-configuration-options/#sidebars
.. _Disqus integration: http://ablog.readthedocs.org/manual/ablog-configuration-options/#disqus-integration
.. _Font-Awesome integration: http://ablog.readthedocs.org/manual/ablog-configuration-options/#fa
.. _Easy GitHub Pages deploys: http://ablog.readthedocs.org/manual/deploy-to-github-pages/
.. _Atom feeds: https://ablog.readthedocs.org/blog/atom.xml
.. _Archive pages: https://ablog.readthedocs.org/blog/
.. _Blog sidebars: https://ablog.readthedocs.org/manual/ablog-configuration-options/#sidebars
.. _Disqus integration: https://ablog.readthedocs.org/manual/ablog-configuration-options/#disqus-integration
.. _Font-Awesome integration: https://ablog.readthedocs.org/manual/ablog-configuration-options/#fa
.. _Easy GitHub Pages deploys: https://ablog.readthedocs.org/manual/deploy-to-github-pages/
.. _installation:
@ -33,9 +33,9 @@ making it look good, generating feeds, running deploy commands, and parsing
dates.
.. _pip: https://pip.pypa.io
.. _Werkzeug: http://werkzeug.pocoo.org/
.. _Werkzeug: https://werkzeug.pocoo.org/
.. _Alabaster: https://github.com/bitprophet/alabaster
.. _Invoke: http://www.pyinvoke.org/
.. _Invoke: https://www.pyinvoke.org/
.. _dateutil: https://pypi.python.org/pypi/python-dateutil
Getting Started
@ -62,7 +62,7 @@ If you already have a project, enable blogging by making following changes in ``
# 2b. if `templates_path` is defined
templates_path.append(ablog.get_html_templates_path())
.. _ABlog Quick Start: http://ablog.readthedocs.org/manual/ablog-quick-start
.. _ABlog Quick Start: https://ablog.readthedocs.org/manual/ablog-quick-start
How it works
@ -109,10 +109,10 @@ can find more about configuring and using ABlog:
.. only:: html
.. image:: https://secure.travis-ci.org/sunpy/ablog.png?branch=devel
:target: http://travis-ci.org/#!/sunpy/ablog
:target: https://travis-ci.org/#!/sunpy/ablog
.. image:: https://readthedocs.org/projects/ablog/badge/?version=latest
:target: http://ablog.readthedocs.org/
:target: https://ablog.readthedocs.org/
.. toctree::

35
docs/make.bat Normal file
View file

@ -0,0 +1,35 @@
@ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.https://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
:end
popd

View file

@ -49,7 +49,7 @@ Authors, languages, & locations
blog_authors = {
'Ahmet': ('Ahmet Bakan', 'http://ahmetbakan.com'),
'Durden': ('Tyler Durden',
'http://en.wikipedia.org/wiki/Tyler_Durden'),
'https://en.wikipedia.org/wiki/Tyler_Durden'),
}
.. confval:: blog_languages
@ -177,7 +177,7 @@ Font awesome
ABlog templates will use of `Font Awesome`_ icons if one of the following
is set:
.. _Font Awesome: http://fontawesome.io/
.. _Font Awesome: https://fontawesome.io/
.. confval:: fontawesome_link_cdn
@ -185,14 +185,14 @@ is set:
URL to `Font Awesome`_ :file:`.css` hosted at `Bootstrap CDN`_ or anywhere
else. Default: ``None``
.. _Bootstrap CDN: http://www.bootstrapcdn.com/fontawesome/
.. _Bootstrap CDN: https://www.bootstrapcdn.com/fontawesome/
.. update:: Jul 29, 2015
:confval:`fontawesome_link_cdn` was a *boolean* option, and now became a
*string* to enable using desired version of `Font Awesome`_.
To get the old behavior, use
``http://netdna.bootstrapcdn.com/font-awesome/4.0.3/css/font-awesome.min.css'``.
``https://netdna.bootstrapcdn.com/font-awesome/4.0.3/css/font-awesome.min.css'``.
.. confval:: fontawesome_included

View file

@ -58,4 +58,4 @@ involves the following steps:
.. _init_catalog: http://babel.edgewall.org/wiki/Documentation/setup.html#init-catalog
.. _update_catalog: http://babel.edgewall.org/wiki/Documentation/setup.html#update-catalog
.. _compile_catalog: http://babel.edgewall.org/wiki/Documentation/setup.html#id4
.. _compile_catalog: http://babel.edgewall.org/wiki/Documentation/setup.html#id4

View file

@ -85,7 +85,7 @@ Analytics
ABlog uses Alabaster_ theme by default. You can use theme options to set
your `Google Analytics`_ identifier to enable tracking.
.. _Google Analytics: http://www.google.com/analytics/
.. _Google Analytics: https://www.google.com/analytics/
Configuration
-------------
@ -121,5 +121,3 @@ not mention yet. Here they are:
* :file:`.doctree` folder, created after build command is called, is
where Sphinx_ stores the state of your project. Files in this folder
saves time when you rebuild your project.

View file

@ -12,7 +12,7 @@ Automate GitHub Pages Deploys
If being away from your personal computer is holding you back from blogging, keep
reading. This post will show you how to automate builds and deploys using
Travis CI. Once you set this up, all you need to do post an article will be
pushing to GitHub or creating a new file on `GitHub.com <http://github.com>`_
pushing to GitHub or creating a new file on `GitHub.com <https://github.com>`_
from any computer!
For this to work, you need to be hosting your website on GitHub pages.
@ -108,4 +108,4 @@ See :ref:`deploy-to-github-pages` and :ref:`commands` to find out more about
deploy options.
Finally, you can find out more about :file:`.travis.yml` file and customizing your built on
Travis CI `user documentation <http://docs.travis-ci.com/user/customizing-the-build/>`_.
Travis CI `user documentation <https://docs.travis-ci.com/user/customizing-the-build/>`_.

View file

@ -41,4 +41,4 @@ to get published:
Let us know how this works for you!
.. _Jekyll: http://jekyllrb.com/
.. _Jekyll: https://jekyllrb.com/

25
pyproject.toml Normal file
View file

@ -0,0 +1,25 @@
[build-system]
requires = ["setuptools", "setuptools_scm", "wheel"]
build-backend = 'setuptools.build_meta'
[tool.black]
line-length = 100
include = '\.pyi?$'
exclude = '''
(
/(
\.eggs
| \.git
| \.mypy_cache
| \.tox
| \.venv
| _build
| buck-out
| build
| dist
| astropy_helpers
| docs
)/
| ah_bootstrap.py
)
'''

44
setup.cfg Normal file
View file

@ -0,0 +1,44 @@
[metadata]
name = ablog
author = The SunPy Community
author_email = sunpy@googlegroups.com
description = A Sphinx extension that converts any documentation or personal website project into a full-fledged blog.
long_description = file: README.rst
license = MIT
url = https://ablog.readthedocs.org/
edit_on_github = True
github_project = sunpy/ablog
[options]
python_requires = >=3.6
packages = find:
include_package_data = True
setup_requires = setuptools_scm
install_requires =
werkzeug
sphinx>=2.0
alabaster
invoke
python-dateutil
sphinx-automodapi
[options.extras_require]
notebook =
nbsphinx
ipython
[options.entry_points]
console_scripts =
ablog = ablog.commands:ablog_main
[tool:isort]
line_length = 100
not_skip = __init__.py
sections = FUTURE, STDLIB, THIRDPARTY, FIRSTPARTY, LOCALFOLDER
default_section = THIRDPARTY
known_first_party = ablog
multi_line_output = 3
balanced_wrapping = True
include_trailing_comma = True
length_sort = False
length_sort_stdlib = True

View file

@ -1,68 +1,20 @@
from itertools import chain
from setuptools import setup
from setuptools.config import read_configuration
__version__ = ''
with open('ablog/__init__.py') as inp:
for line in inp:
if (line.startswith('__version__')):
exec(line.strip())
break
long_description = '''
ABlog for Sphinx
================
################################################################################
# Programmatically generate some extras combos.
################################################################################
extras = read_configuration("setup.cfg")["options"]["extras_require"]
Please note that is an official continuation of
`Eric Holscher's Ablog Sphinx extension <https://github.com/abakan/ablog/>`_.
# Dev is everything
extras["dev"] = list(chain(*extras.values()))
A Sphinx extension that converts any documentation or personal website project
into a full-fledged blog. See http://ablog.readthedocs.org for details.
# All is everything but tests and docs
exclude_keys = ("tests", "docs", "dev")
ex_extras = dict(filter(lambda i: i[0] not in exclude_keys, extras.items()))
# Concatenate all the values together for 'all'
extras["all"] = list(chain.from_iterable(ex_extras.values()))
.. image:: https://secure.travis-ci.org/sunpy/ablog.png?branch=devel
:target: http://travis-ci.org/#!/sunpy/ablog
.. image:: https://readthedocs.org/projects/ablog/badge/?version=latest
:target: http://ablog.readthedocs.org/
'''
setup(
name='ablog',
version=__version__,
author='SunPy Developers',
author_email='nabil.freij@gmail.com',
description='ABlog allows you to blog with Sphinx',
long_description=long_description,
url='http://ablog.readthedocs.org/',
packages=['ablog'],
package_dir={'ablog': 'ablog'},
package_data={'ablog': [
'templates/*.html',
'locale/sphinx.pot',
'locale/*/LC_MESSAGES/sphinx.*o']},
license='MIT License',
keywords=('Sphinx, extension, blogging, atom feeds'),
classifiers=[
'Development Status :: 4 - Beta',
'Topic :: Software Development :: Documentation',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
provides=['ablog'],
install_requires=['werkzeug', 'sphinx>=1.6', 'alabaster', 'invoke',
'python-dateutil', 'sphinx-automodapi'],
extra_requires={'notebook': ['nbsphinx', 'ipython']},
message_extractors={
'ablog': [
('**.html', 'jinja2', None),
('**.py', 'python', None),
]
},
entry_points={
'console_scripts': [
'ablog = ablog.commands:ablog_main',
],
},
)
setup(extras_require=extras, use_scm_version=True)