+blog = controllers.blog
+blog.enabled = True
+blog.path = "/blog"
+blog.name = "i3 - improved tiling wm - blog"
+blog.description = "News about the i3 window manager"
+blog.timezone = "Europe/Berlin"
+blog.posts_per_page = 5
+blog.disqus.enabled = False
--- /dev/null
+import logging
+
+from blogofile.cache import bf
+
+import archives
+import categories
+import chronological
+import feed
+import permapage
+import post
+
+config = {
+ "name": "Blog",
+ "description": "Creates a Blog",
+ "priority": 90.0,
+
+ #Posts
+ "post.date_format": "%Y/%m/%d %H:%M:%S"
+ }
+
+def run():
+ blog = bf.config.controllers.blog
+
+ #Parse the posts
+ blog.posts = post.parse_posts("_posts")
+ blog.dir = bf.util.path_join(bf.writer.output_dir, blog.path)
+
+ # Find all the categories and archives before we write any pages
+ blog.archived_posts = {} ## "/archive/Year/Month" -> [post, post, ... ]
+ blog.archive_links = [] ## [("/archive/2009/12", name, num_in_archive1), ...] (sorted in reverse by date)
+ blog.categorized_posts = {} ## "Category Name" -> [post, post, ... ]
+ blog.all_categories = [] ## [("Category 1",num_in_category_1), ...] (sorted alphabetically)
+ archives.sort_into_archives()
+ categories.sort_into_categories()
+
+ blog.logger = logging.getLogger(config['name'])
+
+ permapage.run()
+ chronological.run()
+ archives.run()
+ categories.run()
+ feed.run()
+
--- /dev/null
+################################################################################
+## Archives controller
+##
+## Writes out yearly, monthly, and daily archives.
+## Each archive is navigable to the next and previous archive
+## in which posts were made.
+################################################################################
+
+import operator
+
+from blogofile.cache import bf
+import chronological
+
+blog = bf.config.controllers.blog
+
+
+def run():
+ write_monthly_archives()
+
+
+def sort_into_archives():
+ #This is run in 0.initial.py
+ for post in blog.posts:
+ link = post.date.strftime("archive/%Y/%m")
+ try:
+ blog.archived_posts[link].append(post)
+ except KeyError:
+ blog.archived_posts[link] = [post]
+ for archive, posts in sorted(
+ blog.archived_posts.items(), key=operator.itemgetter(0), reverse=True):
+ name = posts[0].date.strftime("%B %Y")
+ blog.archive_links.append((archive, name, len(posts)))
+
+
+def write_monthly_archives():
+ for link, posts in blog.archived_posts.items():
+ name = posts[0].date.strftime("%B %Y")
+ chronological.write_blog_chron(posts, root=link)
--- /dev/null
+import os
+import shutil
+import operator
+import feed
+from blogofile.cache import bf
+
+blog = bf.config.controllers.blog
+
+
+def run():
+ write_categories()
+
+
+def sort_into_categories():
+ categories = set()
+ for post in blog.posts:
+ categories.update(post.categories)
+ for category in categories:
+ category_posts = [post for post in blog.posts
+ if category in post.categories]
+ blog.categorized_posts[category] = category_posts
+ for category, posts in sorted(
+ blog.categorized_posts.items(), key=operator.itemgetter(0)):
+ blog.all_categories.append((category, len(posts)))
+
+
+def write_categories():
+ """Write all the blog posts in categories"""
+ root = bf.util.path_join(blog.path, blog.category_dir)
+ #Find all the categories:
+ categories = set()
+ for post in blog.posts:
+ categories.update(post.categories)
+ for category, category_posts in blog.categorized_posts.items():
+ page_num = 1
+ while True:
+ path = bf.util.path_join(root, category.url_name,
+ str(page_num), "index.html")
+ page_posts = category_posts[:blog.posts_per_page]
+ category_posts = category_posts[blog.posts_per_page:]
+ #Forward and back links
+ if page_num > 1:
+ prev_link = bf.util.site_path_helper(
+ blog.path, blog.category_dir, category.url_name,
+ str(page_num - 1))
+ else:
+ prev_link = None
+ if len(category_posts) > 0:
+ next_link = bf.util.site_path_helper(
+ blog.path, blog.category_dir, category.url_name,
+ str(page_num + 1))
+ else:
+ next_link = None
+
+ env = {
+ "category": category,
+ "posts": page_posts,
+ "prev_link": prev_link,
+ "next_link": next_link
+ }
+ bf.writer.materialize_template("chronological.mako", path, env)
+
+ #Copy category/1 to category/index.html
+ if page_num == 1:
+ shutil.copyfile(
+ bf.util.path_join(bf.writer.output_dir, path),
+ bf.util.path_join(
+ bf.writer.output_dir, root, category.url_name,
+ "index.html"))
+ #Prepare next iteration
+ page_num += 1
+ if len(category_posts) == 0:
+ break
--- /dev/null
+# Write all the blog posts in reverse chronological order
+import os
+from blogofile.cache import bf
+
+blog = bf.config.controllers.blog
+
+
+def run():
+ write_blog_chron(posts=blog.posts, root=blog.pagination_dir.lstrip("/"))
+ write_blog_first_page()
+
+
+def write_blog_chron(posts, root):
+ page_num = 1
+ post_num = 0
+ html = []
+ while len(posts) > post_num:
+ #Write the pages, num_per_page posts per page:
+ page_posts = posts[post_num:post_num + blog.posts_per_page]
+ post_num += blog.posts_per_page
+ if page_num > 1:
+ prev_link = "../" + str(page_num - 1)
+ else:
+ prev_link = None
+ if len(posts) > post_num:
+ next_link = "../" + str(page_num + 1)
+ else:
+ next_link = None
+ page_dir = bf.util.path_join(blog.path, root, str(page_num))
+ fn = bf.util.path_join(page_dir, "index.html")
+ env = {
+ "posts": page_posts,
+ "next_link": next_link,
+ "prev_link": prev_link
+ }
+ bf.writer.materialize_template("chronological.mako", fn, env)
+ page_num += 1
+
+
+def write_blog_first_page():
+ if not blog.custom_index:
+ page_posts = blog.posts[:blog.posts_per_page]
+ path = bf.util.path_join(blog.path, "index.html")
+ blog.logger.info(u"Writing blog index page: " + path)
+ if len(blog.posts) > blog.posts_per_page:
+ next_link = bf.util.site_path_helper(
+ blog.path, blog.pagination_dir+"/2")
+ else:
+ next_link = None
+ env = {
+ "posts": page_posts,
+ "next_link": next_link,
+ "prev_link": None
+ }
+ bf.writer.materialize_template("chronological.mako", path, env)
--- /dev/null
+from blogofile.cache import bf
+
+blog = bf.config.controllers.blog
+
+
+def run():
+ write_feed(blog.posts, blog.path, blog.path + "/rss.xml", "rss.mako")
+ write_feed(blog.posts, blog.path, blog.path + "/atom.xml", "atom.mako")
+
+def write_feed(posts, root, path, template):
+ blog.logger.info("Writing RSS/Atom feed: " + path)
+ env = {"posts": posts, "root": root}
+ bf.writer.materialize_template(template, path, env)
--- /dev/null
+import urlparse
+from blogofile.cache import bf
+import re
+
+blog = bf.config.controllers.blog
+
+
+def run():
+ write_permapages()
+
+
+def write_permapages():
+ "Write blog posts to their permalink locations"
+ site_re = re.compile(bf.config.site.url, re.IGNORECASE)
+ num_posts = len(blog.posts)
+
+ for i, post in enumerate(blog.posts):
+ if post.permalink:
+ path = site_re.sub("", post.permalink)
+ blog.logger.info(u"Writing permapage for post: {0}".format(path))
+ else:
+ #Permalinks MUST be specified. No permalink, no page.
+ blog.logger.info(u"Post has no permalink: {0}".format(post.title))
+ continue
+
+ env = {
+ "post": post,
+ "posts": blog.posts
+ }
+
+ #Find the next and previous posts chronologically
+ if i < num_posts - 1:
+ env['prev_post'] = blog.posts[i + 1]
+ if i > 0:
+ env['next_post'] = blog.posts[i - 1]
+
+ bf.writer.materialize_template(
+ "permapage.mako", bf.util.path_join(path, "index.html"), env)
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+post.py parses post sources from the ./_post directory.
+"""
+
+__author__ = "Ryan McGuire (ryan@enigmacurry.com)"
+__date__ = "Mon Feb 2 21:21:04 2009"
+
+import os
+import sys
+import datetime
+import re
+import operator
+import urlparse
+import hashlib
+import codecs
+
+import pytz
+import yaml
+import logging
+import BeautifulSoup
+
+import blogofile_bf as bf
+
+logger = logging.getLogger("blogofile.post")
+
+config = bf.config.controllers.blog.post
+config.mod = sys.modules[globals()["__name__"]]
+
+# These are all the Blogofile reserved field names for posts. It is not
+# recommended that users re-use any of these field names for purposes other
+# than the one stated.
+reserved_field_names = {
+ "title" :"A one-line free-form title for the post",
+ "date" :"The date that the post was originally created",
+ "updated" :"The date that the post was last updated",
+ "categories" :"A list of categories that the post pertains to, "\
+ "each seperated by commas",
+ "tags" :"A list of tags that the post pertains to, "\
+ "each seperated by commas",
+ "permalink" :"The full permanent URL for this post. "\
+ "Automatically created if not provided",
+ "path" :"The path from the permalink of the post",
+ "guid" :"A unique hash for the post, if not provided it "\
+ "is assumed that the permalink is the guid",
+ "slug" :"The title part of the URL for the post, if not "\
+ "provided it is automatically generated from the title."\
+ "It is not used if permalink does not contain :title",
+ "author" :"The name of the author of the post",
+ "filters" :"The filter chain to apply to the entire post. "\
+ "If not specified, a default chain based on the file extension is "\
+ "applied. If set to 'None' it disables all filters, even default ones.",
+ "filter" :"synonym for filters",
+ "draft" :"If 'true' or 'True', the post is considered to be only a "\
+ "draft and not to be published.",
+ "source" :"Reserved internally",
+ "yaml" :"Reserved internally",
+ "content" :"Reserved internally",
+ "filename" :"Reserved internally"
+ }
+
+
+class PostParseException(Exception):
+
+ def __init__(self, value):
+ self.value = value
+
+ def __str__(self):
+ return repr(self.value)
+
+
+class Post(object):
+ """
+ Class to describe a blog post and associated metadata
+ """
+ def __init__(self, source, filename="Untitled"):
+ self.source = source
+ self.yaml = None
+ self.title = None
+ self.__timezone = bf.config.controllers.blog.timezone
+ self.date = None
+ self.updated = None
+ self.categories = set()
+ self.tags = set()
+ self.permalink = None
+ self.content = u""
+ self.excerpt = u""
+ self.filename = filename
+ self.author = ""
+ self.guid = None
+ self.slug = None
+ self.draft = False
+ self.filters = None
+ self.__parse()
+ self.__post_process()
+
+ def __repr__(self): #pragma: no cover
+ return u"<Post title='{0}' date='{1}'>".format(
+ self.title, self.date.strftime("%Y/%m/%d %H:%M:%S"))
+
+ def __parse(self):
+ """Parse the yaml and fill fields"""
+ yaml_sep = re.compile("^---$", re.MULTILINE)
+ content_parts = yaml_sep.split(self.source, maxsplit=2)
+ if len(content_parts) < 2:
+ raise PostParseException(u"{0}: Post has no YAML section".format(
+ self.filename))
+ else:
+ #Extract the yaml at the top
+ self.__parse_yaml(content_parts[1])
+ post_src = content_parts[2]
+ self.__apply_filters(post_src)
+ #Do post excerpting
+ self.__parse_post_excerpting()
+
+ def __apply_filters(self, post_src):
+ """Apply filters to the post"""
+ #Apply block level filters (filters on only part of the post)
+ # TODO: block level filters on posts
+ #Apply post level filters (filters on the entire post)
+ #If filter is unspecified, use the default filter based on
+ #the file extension:
+ if self.filters is None:
+ try:
+ file_extension = os.path.splitext(self.filename)[-1][1:]
+ self.filters = bf.config.controllers.blog.post_default_filters[
+ file_extension]
+ except KeyError:
+ self.filters = []
+ self.content = bf.filter.run_chain(self.filters, post_src)
+
+ def __parse_post_excerpting(self):
+ if bf.config.controllers.blog.post_excerpts.enabled:
+ length = bf.config.controllers.blog.post_excerpts.word_length
+ try:
+ self.excerpt = bf.config.post_excerpt(self.content, length)
+ except AttributeError:
+ self.excerpt = self.__excerpt(length)
+
+ def __excerpt(self, num_words=50):
+ #Default post excerpting function
+ #Can be overridden in _config.py by
+ #defining post_excerpt(content,num_words)
+ if len(self.excerpt) == 0:
+ """Retrieve excerpt from article"""
+ s = BeautifulSoup.BeautifulSoup(self.content)
+ # get rid of javascript, noscript and css
+ [[tree.extract() for tree in s(elem)] for elem in (
+ 'script', 'noscript', 'style')]
+ # get rid of doctype
+ subtree = s.findAll(text=re.compile("DOCTYPE|xml"))
+ [tree.extract() for tree in subtree]
+ # remove headers
+ [[tree.extract() for tree in s(elem)] for elem in (
+ 'h1', 'h2', 'h3', 'h4', 'h5', 'h6')]
+ text = ''.join(s.findAll(text=True))\
+ .replace("\n", "").split(" ")
+ return " ".join(text[:num_words]) + '...'
+
+ def __post_process(self):
+ # fill in empty default value
+ if not self.title:
+ self.title = u"Untitled - {0}".format(
+ datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
+
+ if not self.slug:
+ self.slug = re.sub("[ ?]", "-", self.title).lower()
+
+ if not self.date:
+ self.date = datetime.datetime.now(pytz.timezone(self.__timezone))
+ if not self.updated:
+ self.updated = self.date
+
+ if not self.categories or len(self.categories) == 0:
+ self.categories = set([Category('Uncategorized')])
+ if not self.permalink and \
+ bf.config.controllers.blog.auto_permalink.enabled:
+ self.permalink = bf.config.site.url.rstrip("/") + \
+ bf.config.controllers.blog.auto_permalink.path
+ self.permalink = \
+ re.sub(":blog_path", bf.config.blog.path, self.permalink)
+ self.permalink = \
+ re.sub(":year", self.date.strftime("%Y"), self.permalink)
+ self.permalink = \
+ re.sub(":month", self.date.strftime("%m"), self.permalink)
+ self.permalink = \
+ re.sub(":day", self.date.strftime("%d"), self.permalink)
+ self.permalink = \
+ re.sub(":title", self.slug, self.permalink)
+
+ # TODO: slugification should be abstracted out somewhere reusable
+ self.permalink = re.sub(
+ ":filename", re.sub(
+ "[ ?]", "-", self.filename).lower(), self.permalink)
+
+ # Generate sha hash based on title
+ self.permalink = re.sub(":uuid", hashlib.sha1(
+ self.title.encode('utf-8')).hexdigest(), self.permalink)
+
+ logger.debug(u"Permalink: {0}".format(self.permalink))
+
+ def __parse_yaml(self, yaml_src):
+ y = yaml.load(yaml_src)
+ # Load all the fields that require special processing first:
+ fields_need_processing = ('permalink', 'guid', 'date', 'updated',
+ 'categories', 'tags', 'draft')
+ try:
+ self.permalink = y['permalink']
+ if self.permalink.startswith("/"):
+ self.permalink = urlparse.urljoin(bf.config.site.url,
+ self.permalink)
+ #Ensure that the permalink is for the same site as bf.config.site.url
+ if not self.permalink.startswith(bf.config.site.url):
+ raise PostParseException(u"{0}: permalink for a different site"
+ " than configured".format(self.filename))
+ logger.debug(u"path from permalink: {0}".format(self.path))
+ except KeyError:
+ pass
+ try:
+ self.guid = y['guid']
+ except KeyError:
+ self.guid = self.permalink
+ try:
+ self.date = pytz.timezone(self.__timezone).localize(
+ datetime.datetime.strptime(y['date'], config.date_format))
+ except KeyError:
+ pass
+ try:
+ self.updated = pytz.timezone(self.__timezone).localize(
+ datetime.datetime.strptime(y['updated'], config.date_format))
+ except KeyError:
+ pass
+ try:
+ self.categories = set([Category(x.strip()) for x in \
+ y['categories'].split(",")])
+ except:
+ pass
+ try:
+ self.tags = set([x.strip() for x in y['tags'].split(",")])
+ except:
+ pass
+ try:
+ self.filters = y['filter'] #filter is a synonym for filters
+ except KeyError:
+ pass
+ try:
+ if y['draft']:
+ self.draft = True
+ logger.info(u"Post {0} is set to draft, "
+ "ignoring this post".format(self.filename))
+ else:
+ self.draft = False
+ except KeyError:
+ self.draft = False
+ # Load the rest of the fields that don't need processing:
+ for field, value in y.items():
+ if field not in fields_need_processing:
+ setattr(self,field,value)
+
+ def permapath(self):
+ """Get just the path portion of a permalink"""
+ return urlparse.urlparse(self.permalink)[2]
+
+ def __cmp__(self, other_post):
+ "Posts should be comparable by date"
+ return cmp(self.date, other_post.date)
+
+ def __eq__(self, other_post):
+ return self is other_post
+
+ def __getattr__(self, name):
+ if name == "path":
+ #Always generate the path from the permalink
+ return self.permapath()
+ else:
+ raise AttributeError, name
+
+
+class Category(object):
+
+ def __init__(self, name):
+ self.name = unicode(name)
+ # TODO: slugification should be abstracted out somewhere reusable
+ # TODO: consider making url_name and path read-only properties?
+ self.url_name = self.name.lower().replace(" ", "-")
+ self.path = bf.util.site_path_helper(
+ bf.config.controllers.blog.path,
+ bf.config.controllers.blog.category_dir,
+ self.url_name)
+
+ def __eq__(self, other):
+ if self.name == other.name:
+ return True
+ return False
+
+ def __hash__(self):
+ return hash(self.name)
+
+ def __repr__(self):
+ return self.name
+
+ def __cmp__(self, other):
+ return cmp(self.name, other.name)
+
+
+def parse_posts(directory):
+ """Retrieve all the posts from the directory specified.
+
+ Returns a list of the posts sorted in reverse by date."""
+ posts = []
+ post_filename_re = re.compile(
+ ".*((\.textile$)|(\.markdown$)|(\.org$)|(\.html$)|(\.txt$)|(\.rst$))")
+ if not os.path.isdir("_posts"):
+ logger.warn("This site has no _posts directory.")
+ return []
+ post_paths = [f.decode("utf-8") for f in bf.util.recursive_file_list(
+ directory, post_filename_re) if post_filename_re.match(f)]
+
+ for post_path in post_paths:
+ post_fn = os.path.split(post_path)[1]
+ logger.debug(u"Parsing post: {0}".format(post_path))
+ #IMO codecs.open is broken on Win32.
+ #It refuses to open files without replacing newlines with CR+LF
+ #reverting to regular open and decode:
+ try:
+ src = open(post_path, "r").read().decode(
+ bf.config.controllers.blog.post_encoding)
+ except:
+ logger.exception(u"Error reading post: {0}".format(post_path))
+ raise
+ try:
+ p = Post(src, filename=post_fn)
+ except PostParseException as e:
+ logger.warning(u"{0} : Skipping this post.".format(e.value))
+ continue
+ #Exclude some posts
+ if not (p.permalink is None or p.draft is True):
+ posts.append(p)
+ posts.sort(key=operator.attrgetter('date'), reverse=True)
+ return posts
--- /dev/null
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+org.py convert org source file into html file
+"""
+
+__author__ = "Jaemok Jeong(jmjeong@gmail.com)"
+__date__ = "Tue Aug 11 12:50:17 2009"
+
+
+import os
+import tempfile
+import logging
+import re
+import sys
+import commands
+import codecs
+import datetime
+import pytz
+from BeautifulSoup import BeautifulSoup
+
+import blogofile_bf as bf
+
+logger = logging.getLogger("blogofile.org")
+
+
+class EmacsNotFoundException(Exception):
+ pass
+
+
+post = bf.config.controllers.blog.post.mod
+
+
+class org(object):
+ """
+ Class to Convert org file into html file
+
+ It composes org-content with source, preamble, and postample.
+ Launches emacs and convert the org-content into html file.
+
+ Generated html file is processed with BeautifulSoup module to
+ extract body section and title and categories.
+
+ self.content = body
+ self.title = title (which is first '*' in org-file)
+ self.category = categories (which is tags in first '*' in org-file)
+ self.date = date (which is scheduled file?)
+
+ """
+ def __init__(self, source):
+ self.source = source
+ return self.__convert()
+
+ def __convert(self):
+ temp_file = tempfile.NamedTemporaryFile(suffix='.org')
+ try:
+ temp_file.write(bf.config.blog.emacs_orgmode_preamble)
+ temp_file.write("\n")
+ except AttributeError:
+ pass
+ temp_file.write(self.source.encode(bf.config.blog_post_encoding))
+ temp_file.flush()
+
+ pname = ""
+ try:
+ pname = bf.config.blog.emacs_binary
+ except AttributeError:
+ raise EmacsNotFoundException("Emacs binary is not defined")
+
+ pname += " --batch"
+ try:
+ if bf.config.blog.emacs_preload_elisp:
+ pname += " --load={0}".format(
+ bf.config.blog.emacs_preload_elisp)
+ except AttributeError:
+ pass
+
+ pname += " --visit={0} --funcall org-export-as-html-batch".format(
+ temp_file.name)
+ logger.debug("Exec name::: %s" % pname)
+
+ status, output = commands.getstatusoutput(pname)
+ logger.debug("Convert output:::\n\t%s"%output)
+ if status:
+ raise EmacsNotFoundException("orgfile filter failed")
+
+ html = temp_file.name[:-4] + '.html'
+ temp_file.close()
+
+ #IMO codecs.open is broken on Win32.
+ #It refuses to open files without replacing newlines with CR+LF
+ #reverting to regular open and decode:
+ content = open(html, "rb").read().decode(bf.config.blog_post_encoding)
+
+ # remote the temporary file
+ os.remove(html)
+
+ soup = BeautifulSoup(content)
+
+ # the first h2 section will be used for title, category, and date
+ metaline = soup.find('div', {'id': 'outline-container-1'}).h2
+
+ # extract title
+ try:
+ self.title = re.sub(' ', '', metaline.contents[0]).strip()
+ except AttributeError:
+ self.title = None
+
+ # extract category
+ try:
+ categories = metaline('span', {'class':'tag'})[0].string
+ self.categories = set([post.Category(x)
+ for x in categories.split(' ')])
+ except:
+ self.categories = None
+
+ # extract date
+ try:
+ date = metaline('span', {'class':'timestamp'})[0].string # 2009-08-22 Sat 15:22
+ # date_format = "%Y/%m/%d %H:%M:%S"
+ self.date = datetime.datetime.strptime(date, "%Y-%m-%d %a %H:%M")
+ self.date = self.date.replace(
+ tzinfo=pytz.timezone(bf.config.blog_timezone))
+ except:
+ self.date = None
+
+ # delete first h2 section (which is title and category)
+ try:
+ metaline.extract()
+ except AttributeError:
+ pass
+
+ # print soup.body
+ try:
+ toc = soup.find('div',{'id': 'table-of-contents'})
+ content = soup.find('div', {'id': 'outline-container-1'})
+
+ if toc != None:
+ content = str(toc) + str(content)
+
+ self.content = str(content).decode(bf.config.blog_post_encoding)
+ except:
+ pass
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod(verbose=True)
+
--- /dev/null
+import markdown
+import logging
+
+config = {
+ 'name': "Markdown",
+ 'description': "Renders markdown formatted text to HTML",
+ 'aliases': ['markdown']
+ }
+
+
+#Markdown logging is noisy, pot it down:
+logging.getLogger("MARKDOWN").setLevel(logging.ERROR)
+
+
+def run(content):
+ return markdown.markdown(content)
--- /dev/null
+import docutils.core
+
+config = {
+ 'name': "reStructuredText",
+ 'description': "Renders reStructuredText formatted text to HTML",
+ 'aliases': ['rst']
+ }
+
+
+def run(content):
+ return docutils.core.publish_parts(content, writer_name='html')['html_body']
--- /dev/null
+import re
+import os
+
+import pygments
+from pygments import formatters, util, lexers
+import blogofile_bf as bf
+
+config = {"name": "Syntax Highlighter",
+ "description": "Highlights blocks of code based on syntax",
+ "author": "Ryan McGuire",
+ "css_dir": "/css",
+ "preload_styles": []}
+
+
+def init():
+ #This filter normally only loads pygments styles when needed.
+ #This will force a particular style to get loaded at startup.
+ for style in bf.config.filters.syntax_highlight.preload_styles:
+ css_class = "pygments_{0}".format(style)
+ formatter = pygments.formatters.HtmlFormatter(
+ linenos=False, cssclass=css_class, style=style)
+ write_pygments_css(style, formatter)
+
+
+example = """
+
+This is normal text.
+
+The following is a python code block:
+
+$$code(lang=python)
+import this
+
+prices = {'apple' : 0.50, #Prices of fruit
+ 'orange' : 0.65,
+ 'pear' : 0.90}
+
+def print_prices():
+ for fruit, price in prices.items():
+ print "An %s costs %s" % (fruit, price)
+$$/code
+
+This is a ruby code block:
+
+$$code(lang=ruby)
+class Person
+ attr_reader :name, :age
+ def initialize(name, age)
+ @name, @age = name, age
+ end
+ def <=>(person) # Comparison operator for sorting
+ @age <=> person.age
+ end
+ def to_s
+ "#@name (#@age)"
+ end
+end
+
+group = [
+ Person.new("Bob", 33),
+ Person.new("Chris", 16),
+ Person.new("Ash", 23)
+]
+
+puts group.sort.reverse
+$$/code
+
+This is normal text
+"""
+
+css_files_written = set()
+
+code_block_re = re.compile(
+ r"(?:^|\s)" # $$code Must start as a new word
+ r"\$\$code" # $$code is the start of the block
+ r"(?P<args>\([^\r\n]*\))?" # optional arguments are passed in brackets
+ r"[^\r\n]*\r?\n" # ignore everything else on the 1st line
+ r"(?P<code>.*?)\s\$\$/code" # code block continues until $$/code
+ , re.DOTALL)
+
+argument_re = re.compile(
+ r"[ ]*" # eat spaces at the beginning
+ "(?P<arg>" # start of argument
+ ".*?" # the name of the argument
+ "=" # the assignment
+ r"""(?:(?:[^"']*?)""" # a non-quoted value
+ r"""|(?:"[^"]*")""" # or, a double-quoted value
+ r"""|(?:'[^']*')))""" # or, a single-quoted value
+ "[ ]*" # eat spaces at the end
+ "[,\r\n]" # ends in a comma or newline
+ )
+
+
+def highlight_code(code, language, formatter):
+ try:
+ lexer = pygments.lexers.get_lexer_by_name(language)
+ except pygments.util.ClassNotFound:
+ lexer = pygments.lexers.get_lexer_by_name("text")
+ #Highlight with pygments and surround by blank lines
+ #(blank lines required for markdown syntax)
+ highlighted = "\n\n{0}\n\n".format(
+ pygments.highlight(code, lexer, formatter))
+ return highlighted
+
+
+def parse_args(args):
+ #Make sure the args are newline terminated (req'd by regex)
+ opts = {}
+ if args is None:
+ return opts
+ args = args.lstrip("(").rstrip(")")
+ if args[-1] != "\n":
+ args = args+"\n"
+ for m in argument_re.finditer(args):
+ arg = m.group('arg').split('=')
+ opts[arg[0]] = arg[1]
+ return opts
+
+
+def write_pygments_css(style, formatter,
+ location=bf.config.filters.syntax_highlight.css_dir):
+ path = bf.util.path_join("_site", bf.util.fs_site_path_helper(location))
+ bf.util.mkdir(path)
+ css_file = "pygments_{0}.css".format(style)
+ css_path = os.path.join(path, css_file)
+ css_site_path = css_path.replace("_site", "")
+ if css_site_path in css_files_written:
+ return #already written, no need to overwrite it.
+ f = open(css_path, "w")
+ css_class = ".pygments_{0}".format(style)
+ f.write(formatter.get_style_defs(css_class))
+ f.close()
+ css_files_written.add(css_site_path)
+
+
+def run(src):
+ substitutions = {}
+ for m in code_block_re.finditer(src):
+ args = parse_args(m.group('args'))
+ #Make default args
+ if args.has_key('lang'):
+ lang = args['lang']
+ elif args.has_key('language'):
+ lang = args['language']
+ else:
+ lang = 'text'
+ try:
+ if args.has_key('linenums'):
+ linenums = args['linenums']
+ elif args.has_key("linenos"):
+ linenums = args['linenos']
+ if linenums.lower().strip() == "true":
+ linenums = True
+ else:
+ linenums = False
+ except:
+ linenums = False
+ try:
+ style = args['style']
+ except KeyError:
+ style = bf.config.filters.syntax_highlight.style
+ try:
+ css_class = args['cssclass']
+ except KeyError:
+ css_class = "pygments_{0}".format(style)
+ formatter = pygments.formatters.HtmlFormatter(
+ linenos=linenums, cssclass=css_class, style=style)
+ write_pygments_css(style, formatter)
+ substitutions[m.group()] = highlight_code(
+ m.group('code'), lang, formatter)
+ if len(substitutions) > 0:
+ p = re.compile('|'.join(map(re.escape, substitutions)))
+ src = p.sub(lambda x: substitutions[x.group(0)], src)
+ return src
+ else:
+ return src
--- /dev/null
+import textile
+
+config = {
+ 'name': "Textile",
+ 'description': "Renders textile formatted text to HTML",
+ 'aliases': ['textile']
+ }
+
+
+def run(content):
+ return textile.textile(content)
--- /dev/null
+---
+date: 2010/11/08 10:00:00
+title: i3lock supports numpad
+---
+
+i3lock supports numpad keys now. Please upgrade to the latest git and confirm
+that everything is still working. An i3lock release should follow somewhen this
+week.
--- /dev/null
+---
+date: 2010/12/06 10:00:00
+title: tree branch preview release
+---
+
+The first preview version of the i3 tree branch has been released. Check <a
+href="http://i3.zekjur.net/tree/">i3.zekjur.net/tree/</a>.
--- /dev/null
+---
+date: 2011/01/05 10:00:00
+title: support for RandR changes in tree
+---
+
+The latest git version of the tree branch now has support for RandR changes.
+That means, you can configure your outputs with xrandr or some graphical
+frontend and i3 will correctly pick up these changes.
--- /dev/null
+---
+date: 2011/01/07 10:00:00
+title: i3 in grml
+---
+
+The most recent version 2010.12 of <a href="http://www.grml.org/">grml, a live
+linux distribution for sysadmins and texttool users</a>, comes with i3 included
+by default (in the GRML-FULL flavor). Just press x followed by 3 after grml has
+booted to start X11 with i3.
+
+As I am a long-time grml user myself, this makes me quite happy :). Thanks to
+Mika for building such an excellent linux distribution and for encouraging me
+at FrOSCon to get i3 into grml. You (and all contributors to grml) rock!
+
+For all i3 users, this also marks an important step: There now is a live CD
+(also usable on your USB thumb drive or by booting from network of course)
+which comes with i3. It’s a nice way to demonstrate i3 and a lot of other
+commandline tools to other people (just boot from USB).
+
+![grml 2010.12 with i3](/img/grml.png "screenshot of grml 2010.12 with i3")
--- /dev/null
+---
+date: 2011/01/19 10:00:00
+title: i3 v3.ε-bf2 was just released
+---
+
+i3 v3.ε-bf2 was just released. Check <a href="http://i3.zekjur.net/downloads/RELEASE-NOTES-3.e-bf2.txt">the release announcement</a>.
--- /dev/null
+---
+date: 2011/03/07 10:00:00
+title: "tree branch: second preview release"
+---
+
+The second preview version of the i3 tree branch has been released. Check <a href="http://i3.zekjur.net/tree/">i3.zekjur.net/tree/</a>.
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?><% from datetime import datetime %>
+<feed
+ xmlns="http://www.w3.org/2005/Atom"
+ xmlns:thr="http://purl.org/syndication/thread/1.0"
+ xml:lang="en"
+ >
+ <title type="text">${bf.config.blog.name}</title>
+ <subtitle type="text">${bf.config.blog.description}</subtitle>
+
+ <updated>${datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")}</updated>
+ <generator uri="http://blogofile.com/">Blogofile</generator>
+
+ <link rel="alternate" type="text/html" href="${bf.config.blog.url}" />
+ <id>${bf.config.blog.url}/feed/atom/</id>
+ <link rel="self" type="application/atom+xml" href="${bf.config.blog.url}/feed/atom/" />
+% for post in posts[:10]:
+ <entry>
+ <author>
+ <name>${post.author}</name>
+ <uri>${bf.config.blog.url}</uri>
+ </author>
+ <title type="html"><![CDATA[${post.title}]]></title>
+ <link rel="alternate" type="text/html" href="${post.permalink}" />
+ <id>${post.permalink}</id>
+ <updated>${post.updated.strftime("%Y-%m-%dT%H:%M:%SZ")}</updated>
+ <published>${post.date.strftime("%Y-%m-%dT%H:%M:%SZ")}</published>
+% for category in post.categories:
+ <category scheme="${bf.config.blog.url}" term="${category}" />
+% endfor
+ <summary type="html"><![CDATA[${post.title}]]></summary>
+ <content type="html" xml:base="${post.permalink}"><![CDATA[${post.content}]]></content>
+ </entry>
+% endfor
+</feed>
--- /dev/null
+<%!
+ section = "docs"
+%>
+<%inherit file="i3.mako" />
+
+<div id="content">
+% for post in posts:
+ <%include file="post.mako" args="post=post" />
+ <br>
+% endfor
+% if prev_link:
+ <a href="${prev_link}">« Previous Page</a>
+% endif
+% if prev_link and next_link:
+ --
+% endif
+% if next_link:
+ <a href="${next_link}">Next Page »</a>
+% endif
+
+</div>
--- /dev/null
+<%!
+ section = "docs"
+%>
+<%inherit file="i3.mako" />
+<div id="content">
+<%include file="post.mako" args="post=post" />
+</div>
--- /dev/null
+<%page args="post"/>
+<div class="blog_post">
+ <a name="${post.slug}"></a>
+ <h2 class="blog_post_title"><a href="${post.permapath()}" rel="bookmark" title="Permanent Link to ${post.title}">${post.date.strftime("%Y-%m-%d")}: ${post.title}</a></h2>
+ <div class="post_prose">
+ ${self.post_prose(post)}
+ </div>
+</div>
+
+<%def name="post_prose(post)">
+ ${post.content}
+</%def>
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?><% from datetime import datetime %>
+<rss version="2.0"
+ xmlns:content="http://purl.org/rss/1.0/modules/content/"
+ xmlns:sy="http://purl.org/rss/1.0/modules/syndication/"
+ xmlns:atom="http://www.w3.org/2005/Atom"
+ xmlns:dc="http://purl.org/dc/elements/1.1/"
+ xmlns:wfw="http://wellformedweb.org/CommentAPI/"
+ >
+ <channel>
+ <title>${bf.config.blog.name}</title>
+ <link>${bf.config.blog.url}</link>
+ <description>${bf.config.blog.description}</description>
+ <pubDate>${datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT")}</pubDate>
+ <generator>Blogofile</generator>
+ <sy:updatePeriod>hourly</sy:updatePeriod>
+ <sy:updateFrequency>1</sy:updateFrequency>
+% for post in posts[:10]:
+ <item>
+ <title>${post.title}</title>
+ <link>${post.permalink}</link>
+ <pubDate>${post.date.strftime("%a, %d %b %Y %H:%M:%S %Z")}</pubDate>
+% for category in post.categories:
+ <category><![CDATA[${category}]]></category>
+% endfor
+% if post.guid:
+ <guid>${post.guid}</guid>
+% else:
+ <guid isPermaLink="true">${post.permalink}</guid>
+% endif
+ <description>${post.title}</description>
+ <content:encoded><![CDATA[${post.content}]]></content:encoded>
+ </item>
+% endfor
+ </channel>
+</rss>