summaryrefslogtreecommitdiffstats
path: root/roles/wiki
diff options
context:
space:
mode:
authorOpal <847966@proton.me>2022-10-15 15:27:41 -0700
committerOpal <847966@proton.me>2022-10-15 15:27:41 -0700
commita287e741842f67d0a04c48276221d85f16079d55 (patch)
tree2db596b4ba6709fe10168942bcd0fc5ed2850d02 /roles/wiki
parentd86946ec21f2175d3a5aad58c1ae236291c74b7a (diff)
downloademacsconf-ansible-a287e741842f67d0a04c48276221d85f16079d55.tar.xz
emacsconf-ansible-a287e741842f67d0a04c48276221d85f16079d55.zip
merging code between old emacsconf repo, to sachac's emacsconf repo
Diffstat (limited to 'roles/wiki')
-rw-r--r--roles/wiki/tasks/docker.yml45
-rw-r--r--roles/wiki/tasks/main.yml57
-rw-r--r--roles/wiki/templates/Scrubber.pm749
-rw-r--r--roles/wiki/templates/copyright.pm60
-rw-r--r--roles/wiki/templates/emacsconf.setup440
-rwxr-xr-xroles/wiki/templates/htmlscrubber.pm132
-rw-r--r--roles/wiki/templates/license.pm59
7 files changed, 1542 insertions, 0 deletions
diff --git a/roles/wiki/tasks/docker.yml b/roles/wiki/tasks/docker.yml
new file mode 100644
index 0000000..a5d73e2
--- /dev/null
+++ b/roles/wiki/tasks/docker.yml
@@ -0,0 +1,45 @@
+- name: Set up docker stuff if on docker
+ apt:
+ pkg:
+ - lighttpd
+ - supervisor
+- name: Create the anon user
+ user:
+ name: anon
+- name: Set up Ikiwiki setup
+ template:
+ src: emacsconf.setup
+ dest: "{{ ikiwiki_path }}/emacsconf.setup"
+ owner: www-data
+ group: www-data
+- name: Set up the ikiwiki directories
+ file:
+ dest: /var/www/html
+ state: directory
+ owner: www-data
+ group: www-data
+- name: Clone the bare git repo
+ git:
+ bare: true
+ repo: "{{ ikiwiki_git_source_mount }}"
+ dest: "{{ ikiwiki_bare_git_dir }}"
+ version: "{{ ikiwiki_git_branch }}"
+- name: Change owner
+ file:
+ dest: "{{ ikiwiki_bare_git_dir }}"
+ recurse: true
+ owner: www-data
+ group: www-data
+- name: Clone the working git repo
+ git:
+ repo: "{{ ikiwiki_bare_git_dir }}"
+ dest: "{{ ikiwiki_src_dir }}"
+ version: "{{ ikiwiki_git_branch }}"
+- name: Copy supervisor config
+ template:
+ src: supervisord.conf
+ dest: /etc/supervisor/conf.d/ikiwiki.conf
+- name: Start lighttpd
+ service:
+ name: lighttpd
+ state: started
diff --git a/roles/wiki/tasks/main.yml b/roles/wiki/tasks/main.yml
new file mode 100644
index 0000000..0fa2336
--- /dev/null
+++ b/roles/wiki/tasks/main.yml
@@ -0,0 +1,57 @@
+---
+- name: Set up packages
+ ansible.builtin.apt:
+ update_cache: true
+ pkg:
+ - ikiwiki
+ - git
+ - openssh-server
+ - libimage-magick-perl
+ - libtext-csv-perl
+ - libxml-writer-perl
+ - imagemagick
+ - nginx
+ - wget
+ state: present
+- name: Create ikiwiki group
+ group:
+ name: ikiwiki
+ state: present
+- name: Create ikiwiki user
+ user:
+ name: ikiwiki
+ group: ikiwiki
+ state: present
+- debug:
+ var: docker
+- name: Set up or update repositories
+ ansible.builtin.git:
+ repo: git://git.emacsconf.org/emacsconf-wiki
+ dest: "{{ ikiwiki_src_dir }}"
+ when: not docker|d(false)
+- name: Template the config
+ ansible.builtin.template:
+ src: emacsconf.setup
+ dest: "{{ ikiwiki_path }}/emacsconf.setup"
+ owner: ikiwiki
+- name: Copy Ikiwiki plugins
+ template:
+ src: "{{ item }}"
+ dest: "{{ ikiwiki_plugin_path }}"
+ loop:
+ - copyright.pm
+ - htmlscrubber.pm
+ - license.pm
+- include: docker.yml
+ when: docker is true
+- name: Chown all the files to ikiwiki
+ file:
+ dest: "{{ ikiwiki_path }}"
+ owner: ikiwiki
+ group: ikiwiki
+ recurse: true
+- name: Regenerate all the files
+ shell: ikiwiki --setup "{{ ikiwiki_path }}/emacsconf.setup" --rebuild --wrappers
+ register: output
+- debug:
+ var: output
diff --git a/roles/wiki/templates/Scrubber.pm b/roles/wiki/templates/Scrubber.pm
new file mode 100644
index 0000000..2efaa10
--- /dev/null
+++ b/roles/wiki/templates/Scrubber.pm
@@ -0,0 +1,749 @@
+package HTML::Scrubber;
+
+# ABSTRACT: Perl extension for scrubbing/sanitizing html
+
+
+use 5.008; # enforce minimum perl version of 5.8
+use strict;
+use warnings;
+use HTML::Parser 3.47 ();
+use HTML::Entities;
+use Scalar::Util ('weaken');
+
+our ( @_scrub, @_scrub_fh );
+
+our $VERSION = '0.15'; # VERSION
+our $AUTHORITY = 'cpan:NIGELM'; # AUTHORITY
+
+# my my my my, these here to prevent foolishness like
+# http://perlmonks.org/index.pl?node_id=251127#Stealing+Lexicals
+(@_scrub) = ( \&_scrub, "self, event, tagname, attr, attrseq, text" );
+(@_scrub_fh) = ( \&_scrub_fh, "self, event, tagname, attr, attrseq, text" );
+
+sub new {
+ my $package = shift;
+ my $p = HTML::Parser->new(
+ api_version => 3,
+ default_h => \@_scrub,
+ marked_sections => 0,
+ strict_comment => 0,
+ unbroken_text => 1,
+ case_sensitive => 0,
+ boolean_attribute_value => undef,
+ empty_element_tags => 1,
+ );
+
+ my $self = {
+ _p => $p,
+ _rules => { '*' => 0, },
+ _comment => 0,
+ _process => 0,
+ _r => "",
+ _optimize => 1,
+ _script => 0,
+ _style => 0,
+ };
+
+ $p->{"\0_s"} = bless $self, $package;
+ weaken( $p->{"\0_s"} );
+
+ return $self unless @_;
+
+ my (%args) = @_;
+
+ for my $f (qw[ default allow deny rules process comment ]) {
+ next unless exists $args{$f};
+ if ( ref $args{$f} ) {
+ $self->$f( @{ $args{$f} } );
+ }
+ else {
+ $self->$f( $args{$f} );
+ }
+ }
+
+ return $self;
+}
+
+
+sub comment {
+ return $_[0]->{_comment}
+ if @_ == 1;
+ $_[0]->{_comment} = $_[1];
+ return;
+}
+
+
+sub process {
+ return $_[0]->{_process}
+ if @_ == 1;
+ $_[0]->{_process} = $_[1];
+ return;
+}
+
+
+sub script {
+ return $_[0]->{_script}
+ if @_ == 1;
+ $_[0]->{_script} = $_[1];
+ return;
+}
+
+
+sub style {
+ return $_[0]->{_style}
+ if @_ == 1;
+ $_[0]->{_style} = $_[1];
+ return;
+}
+
+
+sub allow {
+ my $self = shift;
+ for my $k (@_) {
+ $self->{_rules}{ lc $k } = 1;
+ }
+ $self->{_optimize} = 1; # each time a rule changes, reoptimize when parse
+
+ return;
+}
+
+
+sub deny {
+ my $self = shift;
+
+ for my $k (@_) {
+ $self->{_rules}{ lc $k } = 0;
+ }
+
+ $self->{_optimize} = 1; # each time a rule changes, reoptimize when parse
+
+ return;
+}
+
+
+sub rules {
+ my $self = shift;
+ my (%rules) = @_;
+ for my $k ( keys %rules ) {
+ $self->{_rules}{ lc $k } = $rules{$k};
+ }
+
+ $self->{_optimize} = 1; # each time a rule changes, reoptimize when parse
+
+ return;
+}
+
+
+sub default {
+ return $_[0]->{_rules}{'*'}
+ if @_ == 1;
+
+ $_[0]->{_rules}{'*'} = $_[1] if defined $_[1];
+ $_[0]->{_rules}{'_'} = $_[2] if defined $_[2] and ref $_[2];
+ $_[0]->{_optimize} = 1; # each time a rule changes, reoptimize when parse
+
+ return;
+}
+
+
+sub scrub_file {
+ if ( @_ > 2 ) {
+ return unless defined $_[0]->_out( $_[2] );
+ }
+ else {
+ $_[0]->{_p}->handler( default => @_scrub );
+ }
+
+ $_[0]->_optimize(); #if $_[0]->{_optimize};
+
+ $_[0]->{_p}->parse_file( $_[1] );
+
+ return delete $_[0]->{_r} unless exists $_[0]->{_out};
+ print { $_[0]->{_out} } $_[0]->{_r} if length $_[0]->{_r};
+ delete $_[0]->{_out};
+ return 1;
+}
+
+
+sub scrub {
+ if ( @_ > 2 ) {
+ return unless defined $_[0]->_out( $_[2] );
+ }
+ else {
+ $_[0]->{_p}->handler( default => @_scrub );
+ }
+
+ $_[0]->_optimize(); # if $_[0]->{_optimize};
+
+ $_[0]->{_p}->parse( $_[1] ) if defined( $_[1] );
+ $_[0]->{_p}->eof();
+
+ return delete $_[0]->{_r} unless exists $_[0]->{_out};
+ delete $_[0]->{_out};
+ return 1;
+}
+
+
+sub _out {
+ my ( $self, $o ) = @_;
+
+ unless ( ref $o and ref \$o ne 'GLOB' ) {
+ open my $F, '>', $o or return;
+ binmode $F;
+ $self->{_out} = $F;
+ }
+ else {
+ $self->{_out} = $o;
+ }
+
+ $self->{_p}->handler( default => @_scrub_fh );
+
+ return 1;
+}
+
+
+sub _validate {
+ my ( $s, $t, $r, $a, $as ) = @_;
+ return "<$t>" unless %$a;
+
+ $r = $s->{_rules}->{$r};
+ my %f;
+
+ for my $k ( keys %$a ) {
+ my $check = exists $r->{$k} ? $r->{$k} : exists $r->{'*'} ? $r->{'*'} : next;
+
+ if ( ref $check eq 'CODE' ) {
+ my @v = $check->( $s, $t, $k, $a->{$k}, $a, \%f );
+ next unless @v;
+ $f{$k} = shift @v;
+ }
+ elsif ( ref $check || length($check) > 1 ) {
+ $f{$k} = $a->{$k} if $a->{$k} =~ m{$check};
+ }
+ elsif ($check) {
+ $f{$k} = $a->{$k};
+ }
+ }
+
+ if (%f) {
+ my %seen;
+ return "<$t $r>"
+ if $r = join ' ', map {
+ defined $f{$_}
+ ? qq[$_="] . encode_entities( $f{$_} ) . q["]
+ : $_; # boolean attribute (TODO?)
+ } grep { exists $f{$_} and !$seen{$_}++; } @$as;
+ }
+
+ return "<$t>";
+}
+
+
+sub _scrub_str {
+ my ( $p, $e, $t, $a, $as, $text ) = @_;
+
+ my $s = $p->{"\0_s"};
+ my $outstr = '';
+
+ if ( $e eq 'start' ) {
+ if ( exists $s->{_rules}->{$t} ) # is there a specific rule
+ {
+ if ( ref $s->{_rules}->{$t} ) # is it complicated?(not simple;)
+ {
+ $outstr .= $s->_validate( $t, $t, $a, $as );
+ }
+ elsif ( $s->{_rules}->{$t} ) # validate using default attribute rule
+ {
+ $outstr .= $s->_validate( $t, '_', $a, $as );
+ }
+ }
+ elsif ( $s->{_rules}->{'*'} ) # default allow tags
+ {
+ $outstr .= $s->_validate( $t, '_', $a, $as );
+ }
+ }
+ elsif ( $e eq 'end' ) {
+ my $place = 0;
+ if ( exists $s->{_rules}->{$t} ) {
+ $place = 1 if $s->{_rules}->{$t};
+ }
+ elsif ( $s->{_rules}->{'*'} ) {
+ $place = 1;
+ }
+ if ($place) {
+ if ( length $text ) {
+ $outstr .= "</$t>";
+ }
+ else {
+ substr $s->{_r}, -1, 0, ' /';
+ }
+ }
+ }
+ elsif ( $e eq 'comment' ) {
+ if ( $s->{_comment} ) {
+
+ # only copy comments through if they are well formed...
+ $outstr .= $text if ( $text =~ m|^<!--.*-->$|ms );
+ }
+ }
+ elsif ( $e eq 'process' ) {
+ $outstr .= $text if $s->{_process};
+ }
+ elsif ( $e eq 'text' or $e eq 'default' ) {
+ $text =~ s/</&lt;/g; #https://rt.cpan.org/Ticket/Attachment/8716/10332/scrubber.patch
+ $text =~ s/>/&gt;/g;
+
+ $outstr .= $text;
+ }
+ elsif ( $e eq 'start_document' ) {
+ $outstr = "";
+ }
+
+ return $outstr;
+}
+
+
+sub _scrub_fh {
+ my $self = $_[0]->{"\0_s"};
+ print { $self->{_out} } $self->{'_r'} if length $self->{_r};
+ $self->{'_r'} = _scrub_str(@_);
+}
+
+
+sub _scrub {
+
+ $_[0]->{"\0_s"}->{_r} .= _scrub_str(@_);
+}
+
+sub _optimize {
+ my ($self) = @_;
+
+ my (@ignore_elements) = grep { not $self->{"_$_"} } qw(script style);
+ $self->{_p}->ignore_elements(@ignore_elements); # if @ is empty, we reset ;)
+
+ return unless $self->{_optimize};
+
+ #sub allow
+ # return unless $self->{_optimize}; # till I figure it out (huh)
+
+ if ( $self->{_rules}{'*'} ) { # default allow
+ $self->{_p}->report_tags(); # so clear it
+ }
+ else {
+
+ my (@reports) =
+ grep { # report only tags we want
+ $self->{_rules}{$_}
+ } keys %{ $self->{_rules} };
+
+ $self->{_p}->report_tags( # default deny, so optimize
+ @reports
+ ) if @reports;
+ }
+
+ # sub deny
+ # return unless $self->{_optimize}; # till I figure it out (huh)
+ my (@ignores) =
+ grep { not $self->{_rules}{$_} } grep { $_ ne '*' } keys %{ $self->{_rules} };
+
+ $self->{_p}->ignore_tags( # always ignore stuff we don't want
+ @ignores
+ ) if @ignores;
+
+ $self->{_optimize} = 0;
+ return;
+}
+
+1;
+
+#print sprintf q[ '%-12s => %s,], "$_'", $h{$_} for sort keys %h;# perl!
+#perl -ne"chomp;print $_;print qq'\t\t# test ', ++$a if /ok\(/;print $/" test.pl >test2.pl
+#perl -ne"chomp;print $_;if( /ok\(/ ){s/\#test \d+$//;print qq'\t\t# test ', ++$a }print $/" test.pl >test2.pl
+#perl -ne"chomp;if(/ok\(/){s/# test .*$//;print$_,qq'\t\t# test ',++$a}else{print$_}print$/" test.pl >test2.pl
+
+__END__
+
+=pod
+
+=for stopwords html cpan callback homepage Perlbrew perltidy respository
+
+=head1 NAME
+
+HTML::Scrubber - Perl extension for scrubbing/sanitizing html
+
+=head1 VERSION
+
+version 0.15
+
+=head1 SYNOPSIS
+
+ use HTML::Scrubber;
+
+ my $scrubber = HTML::Scrubber->new( allow => [ qw[ p b i u hr br ] ] );
+ print $scrubber->scrub('<p><b>bold</b> <em>missing</em></p>');
+ # output is: <p><b>bold</b> </p>
+
+ # more complex input
+ my $html = q[
+ <style type="text/css"> BAD { background: #666; color: #666;} </style>
+ <script language="javascript"> alert("Hello, I am EVIL!"); </script>
+ <HR>
+ a => <a href=1>link </a>
+ br => <br>
+ b => <B> bold </B>
+ u => <U> UNDERLINE </U>
+ ];
+
+ print $scrubber->scrub($html);
+
+ $scrubber->deny( qw[ p b i u hr br ] );
+
+ print $scrubber->scrub($html);
+
+=head1 DESCRIPTION
+
+If you want to "scrub" or "sanitize" html input in a reliable and flexible
+fashion, then this module is for you.
+
+I wasn't satisfied with HTML::Sanitizer because it is based on
+HTML::TreeBuilder, so I thought I'd write something similar that works directly
+with HTML::Parser.
+
+=head1 METHODS
+
+First a note on documentation: just study the L<EXAMPLE|"EXAMPLE"> below. It's
+all the documentation you could need
+
+Also, be sure to read all the comments as well as L<How does it work?|"How does
+it work?">.
+
+If you're new to perl, good luck to you.
+
+=head2 comment
+
+ warn "comments are ", $p->comment ? 'allowed' : 'not allowed';
+ $p->comment(0); # off by default
+
+=head2 process
+
+ warn "process instructions are ", $p->process ? 'allowed' : 'not allowed';
+ $p->process(0); # off by default
+
+=head2 script
+
+ warn "script tags (and everything in between) are supressed"
+ if $p->script; # off by default
+ $p->script( 0 || 1 );
+
+B<**> Please note that this is implemented using HTML::Parser's ignore_elements
+function, so if C<script> is set to true, all script tags encountered will be
+validated like all other tags.
+
+=head2 style
+
+ warn "style tags (and everything in between) are supressed"
+ if $p->style; # off by default
+ $p->style( 0 || 1 );
+
+B<**> Please note that this is implemented using HTML::Parser's ignore_elements
+function, so if C<style> is set to true, all style tags encountered will be
+validated like all other tags.
+
+=head2 allow
+
+ $p->allow(qw[ t a g s ]);
+
+=head2 deny
+
+ $p->deny(qw[ t a g s ]);
+
+=head2 rules
+
+ $p->rules(
+ img => {
+ src => qr{^(?!http://)}i, # only relative image links allowed
+ alt => 1, # alt attribute allowed
+ '*' => 0, # deny all other attributes
+ },
+ a => {
+ href => sub { ... }, # check or adjust with a callback
+ },
+ b => 1,
+ ...
+ );
+
+Updates set of attribute rules. Each rule can be 1/0, regular expression or a
+callback. Values longer than 1 char are treated as regexps. Callback is called
+with the following arguments: this object, tag name, attribute name and
+attribute value, should return empty list to drop attribute, C<undef> to keep
+it without value or a new scalar value.
+
+=head2 default
+
+ print "default is ", $p->default();
+ $p->default(1); # allow tags by default
+ $p->default(
+ undef, # don't change
+ { # default attribute rules
+ '*' => 1, # allow attributes by default
+ }
+ );
+
+=head2 scrub_file
+
+ $html = $scrubber->scrub_file('foo.html'); ## returns giant string
+ die "Eeek $!" unless defined $html; ## opening foo.html may have failed
+ $scrubber->scrub_file('foo.html', 'new.html') or die "Eeek $!";
+ $scrubber->scrub_file('foo.html', *STDOUT)
+ or die "Eeek $!"
+ if fileno STDOUT;
+
+=head2 scrub
+
+ print $scrubber->scrub($html); ## returns giant string
+ $scrubber->scrub($html, 'new.html') or die "Eeek $!";
+ $scrubber->scrub($html', *STDOUT)
+ or die "Eeek $!"
+ if fileno STDOUT;
+
+=for comment _out
+ $scrubber->_out(*STDOUT) if fileno STDOUT;
+ $scrubber->_out('foo.html') or die "eeek $!";
+
+=for comment _validate
+Uses $self->{_rules} to do attribute validation.
+Takes tag, rule('_' || $tag), attrref.
+
+=for comment _scrub_str
+
+I<default> handler, used by both _scrub and _scrub_fh Moved all the common code
+(basically all of it) into a single routine for ease of maintenance
+
+=for comment _scrub_fh
+
+I<default> handler, does the scrubbing if we're scrubbing out to a file. Now
+calls _scrub_str and pushes that out to a file.
+
+=for comment _scrub
+
+I<default> handler, does the scrubbing if we're returning a giant string. Now
+calls _scrub_str and appends that to the output string.
+
+=head1 How does it work?
+
+When a tag is encountered, HTML::Scrubber allows/denies the tag using the
+explicit rule if one exists.
+
+If no explicit rule exists, Scrubber applies the default rule.
+
+If an explicit rule exists, but it's a simple rule(1), the default attribute
+rule is applied.
+
+=head2 EXAMPLE
+
+=for example begin
+
+ #!/usr/bin/perl -w
+ use HTML::Scrubber;
+ use strict;
+
+ my @allow = qw[ br hr b a ];
+
+ my @rules = (
+ script => 0,
+ img => {
+ src => qr{^(?!http://)}i, # only relative image links allowed
+ alt => 1, # alt attribute allowed
+ '*' => 0, # deny all other attributes
+ },
+ );
+
+ my @default = (
+ 0 => # default rule, deny all tags
+ {
+ '*' => 1, # default rule, allow all attributes
+ 'href' => qr{^(?:http|https|ftp)://}i,
+ 'src' => qr{^(?:http|https|ftp)://}i,
+
+ # If your perl doesn't have qr
+ # just use a string with length greater than 1
+ 'cite' => '(?i-xsm:^(?:http|https|ftp):)',
+ 'language' => 0,
+ 'name' => 1, # could be sneaky, but hey ;)
+ 'onblur' => 0,
+ 'onchange' => 0,
+ 'onclick' => 0,
+ 'ondblclick' => 0,
+ 'onerror' => 0,
+ 'onfocus' => 0,
+ 'onkeydown' => 0,
+ 'onkeypress' => 0,
+ 'onkeyup' => 0,
+ 'onload' => 0,
+ 'onmousedown' => 0,
+ 'onmousemove' => 0,
+ 'onmouseout' => 0,
+ 'onmouseover' => 0,
+ 'onmouseup' => 0,
+ 'onreset' => 0,
+ 'onselect' => 0,
+ 'onsubmit' => 0,
+ 'onunload' => 0,
+ 'src' => 0,
+ 'type' => 0,
+ }
+ );
+
+ my $scrubber = HTML::Scrubber->new();
+ $scrubber->allow(@allow);
+ $scrubber->rules(@rules); # key/value pairs
+ $scrubber->default(@default);
+ $scrubber->comment(1); # 1 allow, 0 deny
+
+ ## preferred way to create the same object
+ $scrubber = HTML::Scrubber->new(
+ allow => \@allow,
+ rules => \@rules,
+ default => \@default,
+ comment => 1,
+ process => 0,
+ );
+
+ require Data::Dumper, die Data::Dumper::Dumper($scrubber) if @ARGV;
+
+ my $it = q[
+ <?php echo(" EVIL EVIL EVIL "); ?> <!-- asdf -->
+ <hr>
+ <I FAKE="attribute" > IN ITALICS WITH FAKE="attribute" </I><br>
+ <B> IN BOLD </B><br>
+ <A NAME="evil">
+ <A HREF="javascript:alert('die die die');">HREF=JAVA &lt;!&gt;</A>
+ <br>
+ <A HREF="image/bigone.jpg" ONMOUSEOVER="alert('die die die');">
+ <IMG SRC="image/smallone.jpg" ALT="ONMOUSEOVER JAVASCRIPT">
+ </A>
+ </A> <br>
+ ];
+
+ print "#original text", $/, $it, $/;
+ print
+ "#scrubbed text (default ", $scrubber->default(), # no arguments returns the current value
+ " comment ", $scrubber->comment(), " process ", $scrubber->process(), " )", $/, $scrubber->scrub($it), $/;
+
+ $scrubber->default(1); # allow all tags by default
+ $scrubber->comment(0); # deny comments
+
+ print
+ "#scrubbed text (default ",
+ $scrubber->default(),
+ " comment ",
+ $scrubber->comment(),
+ " process ",
+ $scrubber->process(),
+ " )", $/,
+ $scrubber->scrub($it),
+ $/;
+
+ $scrubber->process(1); # allow process instructions (dangerous)
+ $default[0] = 1; # allow all tags by default
+ $default[1]->{'*'} = 0; # deny all attributes by default
+ $scrubber->default(@default); # set the default again
+
+ print
+ "#scrubbed text (default ",
+ $scrubber->default(),
+ " comment ",
+ $scrubber->comment(),
+ " process ",
+ $scrubber->process(),
+ " )", $/,
+ $scrubber->scrub($it),
+ $/;
+
+=for example end
+
+=head2 FUN
+
+If you have Test::Inline (and you've installed HTML::Scrubber), try
+
+ pod2test Scrubber.pm >scrubber.t
+ perl scrubber.t
+
+=head1 SEE ALSO
+
+L<HTML::Parser>, L<Test::Inline>.
+
+The C<HTML::Sanitizer> module is no longer available on CPAN.
+
+=head1 VERSION REQUIREMENTS
+
+As of version 0.14 I have added a perl minimum version requirement of 5.8. This
+is basically due to failures on the smokers perl 5.6 installations - which
+appears to be down to installation mechanisms and requirements.
+
+Since I don't want to spend the time supporting a version that is so old (and
+may not work for reasons on UTF support etc), I have added a C<use 5.008;> to
+the main module.
+
+If this is problematic I am very willing to accept patches to fix this up,
+although I do not personally see a good reason to support a release that has
+been obsolete for 13 years.
+
+=head1 CONTRIBUTING
+
+If you want to contribute to the development of this module, the code is on
+L<GitHub|http://github.com/nigelm/html-scrubber>. You'll need a perl
+environment with L<Dist::Zilla>, and if you're just getting started, there's
+some documentation on using Vagrant and Perlbrew
+L<here|http://mrcaron.github.io/2015/03/06/Perl-CPAN-Pull-Request.html>.
+
+There is now a C<.perltidyrc> and a <.tidyallrc> file within the respository
+for the standard perltidy settings used - I will apply these before new
+releases. Please do not let formatting prevent you from sending in patches etc
+- this can be sorted out as part of the release process. Info on C<tidyall>
+can be found at
+L<https://metacpan.org/pod/distribution/Code-TidyAll/bin/tidyall>.
+
+=head1 INSTALLATION
+
+See perlmodinstall for information and options on installing Perl modules.
+
+=head1 BUGS AND LIMITATIONS
+
+You can make new bug reports, and view existing ones, through the
+web interface at L<http://rt.cpan.org/Public/Dist/Display.html?Name=HTML-Scrubber>.
+
+=head1 AVAILABILITY
+
+The project homepage is L<https://metacpan.org/release/HTML-Scrubber>.
+
+The latest version of this module is available from the Comprehensive Perl
+Archive Network (CPAN). Visit L<http://www.perl.com/CPAN/> to find a CPAN
+site near you, or see L<https://metacpan.org/module/HTML::Scrubber/>.
+
+=head1 AUTHORS
+
+=over 4
+
+=item *
+
+Ruslan Zakirov <Ruslan.Zakirov@gmail.com>
+
+=item *
+
+Nigel Metheringham <nigelm@cpan.org>
+
+=item *
+
+D. H. <podmaster@cpan.org>
+
+=back
+
+=head1 COPYRIGHT AND LICENSE
+
+This software is copyright (c) 2015 by Ruslan Zakirov, Nigel Metheringham, 2003-2004 D. H..
+
+This is free software; you can redistribute it and/or modify it under
+the same terms as the Perl 5 programming language system itself.
+
+=cut
diff --git a/roles/wiki/templates/copyright.pm b/roles/wiki/templates/copyright.pm
new file mode 100644
index 0000000..16acacc
--- /dev/null
+++ b/roles/wiki/templates/copyright.pm
@@ -0,0 +1,60 @@
+# A plugin for ikiwiki to implement adding a footer with copyright information
+# based on a default value taken out of a file.
+
+# Copyright © 2007, 2008 Thomas Schwinge <tschwinge@gnu.org>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+# Unless overridden with the `meta' plugin, a footer with copyright information
+# will be added to every page using a source file `copyright' (e.g.,
+# `copyright.mdwn') (using the same ``locating rules'' as for the sidebar
+# plugin).
+#
+# The state which page's copyright text was gathered from which source is not
+# tracked, so you'll need a full wiki-rebuild if the `copyright' file is
+# changed.
+
+package IkiWiki::Plugin::copyright;
+
+use warnings;
+use strict;
+use IkiWiki 2.00;
+
+my %copyright;
+
+sub import
+{
+ hook (type => "scan", id => "copyright", call => \&scan);
+}
+
+sub scan (@)
+{
+ my %params = @_;
+ my $page = $params{page};
+
+ return if defined $pagestate{$page}{meta}{copyright};
+
+ my $content;
+ my $copyright_page = bestlink ($page, "copyright") || return;
+ my $copyright_file = $pagesources{$copyright_page} || return;
+
+ # Only an optimization to avoid reading the same file again and again.
+ $copyright{$copyright_file} = readfile (srcfile ($copyright_file))
+ unless defined $copyright{$copyright_file};
+
+ $pagestate{$page}{meta}{copyright} = $copyright{$copyright_file};
+}
+
+1
diff --git a/roles/wiki/templates/emacsconf.setup b/roles/wiki/templates/emacsconf.setup
new file mode 100644
index 0000000..7ab3916
--- /dev/null
+++ b/roles/wiki/templates/emacsconf.setup
@@ -0,0 +1,440 @@
+# This file is managed by the wiki role in git@git.emacsconf.org:pub/emacsconf-ansible
+# Please make sure your changes are also reflected there.
+#
+# IkiWiki::Setup::Yaml - YAML formatted setup file
+#
+# Setup file for ikiwiki.
+#
+# Passing this to ikiwiki --setup will make ikiwiki generate
+# wrappers and build the wiki.
+#
+# Remember to re-run ikiwiki --setup any time you edit this file.
+#
+# name of the wiki
+wikiname: EmacsConf
+# contact email for wiki
+adminemail: {{ikiwiki_admin}}
+# users who are wiki admins
+adminuser:
+- bandali
+# users who are banned from the wiki
+banned_users: []
+# where the source of the wiki is located
+srcdir: {{ ikiwiki_src_dir }}
+# where to build the wiki
+destdir: {{ ikiwiki_dest }}
+# base url to the wiki
+url: {{ ikiwiki_url }}
+# url to the ikiwiki.cgi
+#cgiurl: https://emacsconf.org/ikiwiki.cgi
+# do not adjust cgiurl if CGI is accessed via different URL
+reverse_proxy: 0
+# filename of cgi wrapper to generate
+#cgi_wrapper: /home/ikiwiki/public_html/emacsconf/ikiwiki.cgi
+cgi_wrapper: {{ ikiwiki_path }}/ikiwiki.cgi
+# mode for cgi_wrapper (can safely be made suid)
+cgi_wrappermode: 06755
+# number of seconds to delay CGI requests when overloaded
+cgi_overload_delay: ''
+# message to display when overloaded (may contain html)
+cgi_overload_message: ''
+# enable optimization of only refreshing committed changes?
+only_committed_changes: 0
+# rcs backend to use
+rcs: git
+# plugins to add to the default configuration
+add_plugins:
+{% for plugin in ikiwiki_plugins %}
+- {{ plugin }}
+{% endfor %}
+# plugins to disable
+disable_plugins: []
+#disable_plugins:
+#- editpage
+# additional directory to search for template files
+#templatedir: /usr/share/ikiwiki/templates
+#templatedir: /home/ikiwiki/.ikiwiki/templates
+# base wiki source location
+underlaydir: {{ ikiwiki_underlay }}
+# display verbose messages?
+#verbose: 1
+# log to syslog?
+syslog: 1
+# create output files named page/index.html?
+usedirs: 1
+# use '!'-prefixed preprocessor directives?
+prefix_directives: 1
+# use page/index.mdwn source files
+indexpages: 0
+# enable Discussion pages?
+discussion: 1
+# name of Discussion pages
+discussionpage: discussion
+# use elements new in HTML5 like <section>?
+html5: 0
+# only send cookies over SSL connections?
+sslcookie: 0
+# extension to use for new pages
+default_pageext: md
+# extension to use for html files
+htmlext: html
+# strftime format string to display date
+#timeformat: '%c'
+#timeformat: '%a %d %b %Y %r %Z'
+timeformat: '%A %e %B %Y at %k:%M (%Z)'
+# UTF-8 locale to use
+#locale: en_US.UTF-8
+#locale: en_CA.UTF-8
+# put user pages below specified page
+userdir: ''
+# how many backlinks to show before hiding excess (0 to show all)
+numbacklinks: 10
+# attempt to hardlink source files? (optimisation for large files)
+hardlink: 0
+# force ikiwiki to use a particular umask (keywords public, group or private, or a number)
+#umask: public
+# group for wrappers to run in
+#wrappergroup: ikiwiki
+# extra library and plugin directories
+libdirs: []
+# extra library and plugin directory (searched after libdirs)
+libdir: {{ ikiwiki_plugin_path }}
+# environment variables
+ENV: {}
+# time zone name
+timezone: :/etc/localtime
+# regexp of normally excluded files to include
+#include: ^\.htaccess$
+# regexp of files that should be skipped
+#exclude: ^(*\.private|Makefile)$
+# specifies the characters that are allowed in source filenames
+wiki_file_chars: -[:alnum:]+/.:_
+# allow symlinks in the path leading to the srcdir (potentially insecure)
+allow_symlinks_before_srcdir: 0
+# cookie control
+cookiejar:
+ file: {{ ikiwiki_cookie_jar }}
+# set custom user agent string for outbound HTTP requests e.g. when fetching aggregated RSS feeds
+useragent: ikiwiki/3.20190228-1
+# theme has a responsive layout? (mobile-optimized)
+responsive_layout: 1
+# try harder to produce deterministic output
+deterministic: 0
+
+######################################################################
+# core plugins
+# (editpage, git, htmlscrubber, inline, link, meta, parentlinks,
+# templatebody)
+######################################################################
+
+# git plugin
+# git hook to generate
+#git_wrapper: /home/ikiwiki/emacsconf.git/hooks/post-update
+git_wrapper: {{ ikiwiki_git_wrapper }}
+# shell command for git_wrapper to run, in the background
+#git_wrapper_background_command: git push github
+# mode for git_wrapper (can safely be made suid)
+#git_wrappermode: 06755
+# git pre-receive hook to generate
+#git_test_receive_wrapper: /git/wiki.git/hooks/pre-receive
+git_test_receive_wrapper: {{ ikiwiki_git_test_receive_wrapper }}
+# unix users whose commits should be checked by the pre-receive hook
+#untrusted_committers: []
+untrusted_committers:
+- anon
+# gitweb url to show file history ([[file]] substituted)
+#historyurl: http://git.example.com/gitweb.cgi?p=wiki.git;a=history;f=[[file]];hb=HEAD
+historyurl: {{ ikiwiki_git_base_url }}/log/[[file]]
+# gitweb url to show a diff ([[file]], [[sha1_to]], [[sha1_from]], [[sha1_commit]], and [[sha1_parent]] substituted)
+#diffurl: http://git.example.com/gitweb.cgi?p=wiki.git;a=blobdiff;f=[[file]];h=[[sha1_to]];hp=[[sha1_from]];hb=[[sha1_commit]];hpb=[[sha1_parent]]
+diffurl: {{ ikiwiki_git_base_url }}/diff/[[file]]?id=[[sha1_commit]]&id2=[[sha1_parent]]
+# where to pull and push changes (set to empty string to disable)
+#gitorigin_branch: origin
+# branch that the wiki is stored in
+#gitmaster_branch: {{ ikiwiki_git_branch }}
+
+# htmlscrubber plugin
+# PageSpec specifying pages not to scrub
+#htmlscrubber_skip: '!*/Discussion'
+
+# inline plugin
+# enable rss feeds by default?
+rss: 1
+# enable atom feeds by default?
+atom: 1
+# allow rss feeds to be used?
+#allowrss: 0
+# allow atom feeds to be used?
+#allowatom: 0
+# urls to ping (using XML-RPC) on feed update
+#pingurl: http://rpc.technorati.com/rpc/ping
+
+######################################################################
+# auth plugins
+# (anonok, blogspam, emailauth, httpauth, lockedit, moderatedcomments,
+# opendiscussion, openid, passwordauth, signinedit)
+######################################################################
+
+# anonok plugin
+# PageSpec to limit which pages anonymous users can edit
+#anonok_pagespec: '*/discussion'
+
+# blogspam plugin
+# PageSpec of pages to check for spam
+#blogspam_pagespec: postcomment(*)
+# options to send to blogspam server
+#blogspam_options: blacklist=1.2.3.4,blacklist=8.7.6.5,max-links=10
+# blogspam server JSON url
+#blogspam_server: ''
+
+# emailauth plugin
+# email address to send emailauth mails as (default: adminemail)
+#emailauth_sender: ''
+
+# httpauth plugin
+# url to redirect to when authentication is needed
+#cgiauthurl: http://example.com/wiki/auth/ikiwiki.cgi
+# PageSpec of pages where only httpauth will be used for authentication
+#httpauth_pagespec: '!*/Discussion'
+
+# lockedit plugin
+# PageSpec controlling which pages are locked
+#locked_pages: '!*/Discussion'
+#locked_pages: 'index or edit'
+locked_pages: 'edit'
+
+# moderatedcomments plugin
+# PageSpec matching users or comment locations to moderate
+#moderate_pagespec: '*'
+
+# openid plugin
+# url pattern of openid realm (default is cgiurl)
+#openid_realm: ''
+# url to ikiwiki cgi to use for openid authentication (default is cgiurl)
+#openid_cgiurl: ''
+
+# passwordauth plugin
+# a password that must be entered when signing up for an account
+#account_creation_password: s3cr1t
+# cost of generating a password using Authen::Passphrase::BlowfishCrypt
+#password_cost: 8
+
+######################################################################
+# format plugins
+# (creole, highlight, hnb, html, mdwn, otl, rawhtml, rst, textile, txt)
+######################################################################
+
+# highlight plugin
+# types of source files to syntax highlight
+#tohighlight: .c .h .cpp .pl .py Makefile:make
+# location of highlight's filetypes.conf
+#filetypes_conf: /etc/highlight/filetypes.conf
+# location of highlight's langDefs directory
+#langdefdir: /usr/share/highlight/langDefs
+
+# mdwn plugin
+# enable multimarkdown features?
+#multimarkdown: 0
+# disable use of markdown discount?
+#nodiscount: 0
+# enable footnotes in Markdown (where supported)?
+#mdwn_footnotes: 1
+# interpret line like 'A. First item' as ordered list when using Discount?
+#mdwn_alpha_lists: 0
+
+######################################################################
+# special-purpose plugins
+# (osm, underlay)
+######################################################################
+
+# osm plugin
+# the default zoom when you click on the map link
+#osm_default_zoom: 15
+# the icon shown on links and on the main map
+#osm_default_icon: ikiwiki/images/osm.png
+# the alt tag of links, defaults to empty
+#osm_alt: ''
+# the output format for waypoints, can be KML, GeoJSON or CSV (one or many, comma-separated)
+#osm_format: KML
+# the icon attached to a tag, displayed on the map for tagged pages
+#osm_tag_default_icon: icon.png
+# Url for the OpenLayers.js file
+#osm_openlayers_url: http://www.openlayers.org/api/OpenLayers.js
+# Layers to use in the map. Can be either the 'OSM' string or a type option for Google maps (GoogleNormal, GoogleSatellite, GoogleHybrid or GooglePhysical). It can also be an arbitrary URL in a syntax acceptable for OpenLayers.Layer.OSM.url parameter.
+#osm_layers:
+# OSM: GoogleSatellite
+# Google maps API key, Google layer not used if missing, see https://code.google.com/apis/console/ to get an API key
+#osm_google_apikey: ''
+
+# underlay plugin
+# extra underlay directories to add
+#add_underlays:
+#- /home/ikiwiki/wiki.underlay
+
+######################################################################
+# web plugins
+# (404, attachment, comments, editdiff, edittemplate, getsource, google,
+# goto, mirrorlist, remove, rename, repolist, search, theme, userlist,
+# websetup, wmd)
+######################################################################
+
+# attachment plugin
+# enhanced PageSpec specifying what attachments are allowed
+#allowed_attachments: virusfree() and mimetype(image/*) and maxsize(50kb)
+allowed_attachments: (mimetype(text/*) or *.m3u or *.svg) and maxsize(300kb)
+# virus checker program (reads STDIN, returns nonzero if virus found)
+#virus_checker: clamdscan -
+
+# comments plugin
+# PageSpec of pages where comments are allowed
+#comments_pagespec: blog/* and !*/Discussion
+# PageSpec of pages where posting new comments is not allowed
+#comments_closed_pagespec: blog/controversial or blog/flamewar
+# Base name for comments, e.g. "comment_" for pages like "sandbox/comment_12"
+#comments_pagename: ''
+# Interpret directives in comments?
+#comments_allowdirectives: 0
+# Allow anonymous commenters to set an author name?
+#comments_allowauthor: 0
+# commit comments to the VCS
+#comments_commit: 1
+# Restrict formats for comments to (no restriction if empty)
+#comments_allowformats: mdwn txt
+
+# getsource plugin
+# Mime type for returned source.
+#getsource_mimetype: text/plain; charset=utf-8
+
+# mirrorlist plugin
+# list of mirrors
+#mirrorlist: {}
+# generate links that point to the mirrors' ikiwiki CGI
+#mirrorlist_use_cgi: 1
+
+# repolist plugin
+# URIs of repositories containing the wiki's source
+#repositories:
+#- svn://svn.example.org/wiki/trunk
+repositories:
+- https://git.emacsconf.org/emacsconf-wiki
+- git://git.emacsconf.org/emacsconf-wiki
+- ssh://anon@git.emacsconf.org:emacsconf-wiki
+
+# search plugin
+# path to the omega cgi program
+#omega_cgi: /usr/lib/cgi-bin/omega/omega
+# use google site search rather than internal xapian index?
+#google_search: 1
+
+# theme plugin
+# name of theme to enable
+#theme: actiontabs
+theme: actiontabs
+
+# websetup plugin
+# list of plugins that cannot be enabled/disabled via the web interface
+#websetup_force_plugins: []
+# list of additional setup field keys to treat as unsafe
+#websetup_unsafe: []
+# show unsafe settings, read-only, in web interface?
+#websetup_show_unsafe: 1
+
+######################################################################
+# widget plugins
+# (calendar, color, conditional, cutpaste, date, format, fortune,
+# graphviz, haiku, headinganchors, img, linkmap, listdirectives, map,
+# more, orphans, pagecount, pagestats, poll, polygen, postsparkline,
+# progress, shortcut, sparkline, table, template, teximg, toc, toggle,
+# version)
+######################################################################
+
+# calendar plugin
+# base of the archives hierarchy
+#archivebase: archives
+# PageSpec of pages to include in the archives, if option `calendar_autocreate` is true.
+#archive_pagespec: page(posts/*) and !*/Discussion
+# autocreate new calendar pages?
+#calendar_autocreate: 1
+# if set, when building calendar pages, also build pages of year and month when no pages were published (building empty calendars).
+#calendar_fill_gaps: 1
+
+# img plugin
+# Image formats to process (jpeg, png, gif, svg, pdf or 'everything' to accept all)
+#img_allowed_formats: ''
+
+# listdirectives plugin
+# directory in srcdir that contains directive descriptions
+#directive_description_dir: ikiwiki/directive
+
+# teximg plugin
+# Should teximg use dvipng to render, or dvips and convert?
+#teximg_dvipng: ''
+# LaTeX prefix for teximg plugin
+#teximg_prefix: |
+# \documentclass{article}
+# \usepackage[utf8]{inputenc}
+# \usepackage{amsmath}
+# \usepackage{amsfonts}
+# \usepackage{amssymb}
+# \pagestyle{empty}
+# \begin{document}
+# LaTeX postfix for teximg plugin
+#teximg_postfix: \end{document}
+
+######################################################################
+# other plugins
+# (aggregate, autoindex, brokenlinks, camelcase, ddate, embed, favicon,
+# filecheck, flattr, goodstuff, htmlbalance, localstyle, loginselector,
+# notifyemail, pagetemplate, pingee, pinger, prettydate, recentchanges,
+# recentchangesdiff, relativedate, rsync, sidebar, smiley,
+# sortnaturally, tag, testpagespec, trail, transient)
+######################################################################
+
+# aggregate plugin
+# enable aggregation to internal pages?
+#aggregateinternal: 1
+# allow aggregation to be triggered via the web?
+#aggregate_webtrigger: 0
+
+# autoindex plugin
+# commit autocreated index pages
+#autoindex_commit: 1
+
+# camelcase plugin
+# list of words to not turn into links
+#camelcase_ignore: []
+
+# flattr plugin
+# userid or user name to use by default for Flattr buttons
+#flattr_userid: joeyh
+
+# pinger plugin
+# how many seconds to try pinging before timing out
+#pinger_timeout: 15
+
+# prettydate plugin
+# format to use to display date
+#prettydateformat: '%X, %B %o, %Y'
+
+# recentchanges plugin
+# name of the recentchanges page
+#recentchangespage: recentchanges
+recentchangespage: recent-changes
+# number of changes to track
+#recentchangesnum: 100
+
+# rsync plugin
+# command to run to sync updated pages
+#rsync_command: rsync -qa --delete . user@host:/path/to/docroot/
+
+# sidebar plugin
+# show sidebar page on all pages?
+#global_sidebars: 1
+
+# tag plugin
+# parent page tags are located under
+#tagbase: tag
+# autocreate new tag pages?
+tag_autocreate: 1
+# commit autocreated tag pages
+#tag_autocreate_commit: 1
diff --git a/roles/wiki/templates/htmlscrubber.pm b/roles/wiki/templates/htmlscrubber.pm
new file mode 100755
index 0000000..904a2dc
--- /dev/null
+++ b/roles/wiki/templates/htmlscrubber.pm
@@ -0,0 +1,132 @@
+#!/usr/bin/perl
+package IkiWiki::Plugin::htmlscrubber;
+
+use warnings;
+use strict;
+use IkiWiki 3.00;
+
+# This regexp matches urls that are in a known safe scheme.
+# Feel free to use it from other plugins.
+our $safe_url_regexp;
+
+sub import {
+ hook(type => "getsetup", id => "htmlscrubber", call => \&getsetup);
+ hook(type => "sanitize", id => "htmlscrubber", call => \&sanitize);
+
+ # Only known uri schemes are allowed to avoid all the ways of
+ # embedding javascrpt.
+ # List at http://en.wikipedia.org/wiki/URI_scheme
+ my $uri_schemes=join("|", map quotemeta,
+ # IANA registered schemes
+ "http", "https", "ftp", "mailto", "file", "telnet", "gopher",
+ "aaa", "aaas", "acap", "cap", "cid", "crid",
+ "dav", "dict", "dns", "fax", "go", "h323", "im", "imap",
+ "ldap", "mid", "news", "nfs", "nntp", "pop", "pres",
+ "sip", "sips", "snmp", "tel", "urn", "wais", "xmpp",
+ "z39.50r", "z39.50s",
+ # Selected unofficial schemes
+ "aim", "callto", "cvs", "ed2k", "feed", "fish", "gg",
+ "irc", "ircs", "lastfm", "ldaps", "magnet", "mms",
+ "msnim", "notes", "rsync", "secondlife", "skype", "ssh",
+ "sftp", "smb", "sms", "snews", "webcal", "ymsgr",
+ "bitcoin", "git", "svn", "bzr", "darcs", "hg"
+ );
+ # data is a special case. Allow a few data:image/ types,
+ # but disallow data:text/javascript and everything else.
+ $safe_url_regexp=qr/^(?:(?:$uri_schemes):|data:image\/(?:png|jpeg|gif)|[^:]+(?:$|[\/\?#]))|^#/i;
+}
+
+sub getsetup () {
+ return
+ plugin => {
+ safe => 1,
+ rebuild => undef,
+ section => "core",
+ },
+ htmlscrubber_skip => {
+ type => "pagespec",
+ example => "!*/Discussion",
+ description => "PageSpec specifying pages not to scrub",
+ link => "ikiwiki/PageSpec",
+ safe => 1,
+ rebuild => undef,
+ },
+}
+
+sub sanitize (@) {
+ my %params=@_;
+
+ if (exists $config{htmlscrubber_skip} &&
+ length $config{htmlscrubber_skip} &&
+ exists $params{page} &&
+ pagespec_match($params{page}, $config{htmlscrubber_skip})) {
+ return $params{content};
+ }
+
+ return scrubber()->scrub($params{content});
+}
+
+my $_scrubber;
+sub scrubber {
+ return $_scrubber if defined $_scrubber;
+
+ eval q{use HTML::Scrubber};
+ error($@) if $@;
+ # Lists based on http://feedparser.org/docs/html-sanitization.html
+ # With html5 tags added.
+ $_scrubber = HTML::Scrubber->new(
+ allow => [qw{
+ svg rect text g title
+ a abbr acronym address area b big blockquote br br/
+ button caption center cite code col colgroup dd del
+ dfn dir div dl dt em fieldset font form h1 h2 h3 h4
+ h5 h6 hr hr/ i img input ins kbd label legend li map
+ track
+ menu ol optgroup option p p/ pre q s samp select small
+ span strike strong sub sup table tbody td textarea
+ tfoot th thead tr tt u ul var
+ video audio source section nav article aside hgroup
+ header footer figure figcaption time mark canvas
+ datalist progress meter ruby rt rp details summary
+ }],
+ default => [undef, { (
+ map { $_ => 1 } qw{
+ version xmlns x y fill font-size stroke stroke-dasharray transform
+ data-start data-end data-video data-target data-tracks
+ kind label srclang default
+ abbr accept accept-charset accesskey
+ align alt axis border cellpadding cellspacing
+ char charoff charset checked class
+ clear cols colspan color compact coords
+ datetime dir disabled enctype for frame
+ headers height hreflang hspace id ismap
+ kind srclang src default
+ label lang maxlength media method
+ multiple name nohref noshade nowrap prompt
+ readonly rel rev rows rowspan rules scope
+ selected shape size span start summary
+ tabindex target title type valign
+ value vspace width
+ autofocus autoplay preload loopstart
+ loopend end playcount controls pubdate
+ loop muted
+ placeholder min max step low high optimum
+ form required autocomplete novalidate pattern
+ list formenctype formmethod formnovalidate
+ formtarget reversed spellcheck open hidden
+ } ),
+ "/" => 1, # emit proper <hr /> XHTML
+ href => $safe_url_regexp,
+ src => $safe_url_regexp,
+ action => $safe_url_regexp,
+ formaction => $safe_url_regexp,
+ cite => $safe_url_regexp,
+ longdesc => $safe_url_regexp,
+ poster => $safe_url_regexp,
+ usemap => $safe_url_regexp
+ }],
+ );
+ return $_scrubber;
+}
+
+1
diff --git a/roles/wiki/templates/license.pm b/roles/wiki/templates/license.pm
new file mode 100644
index 0000000..651c039
--- /dev/null
+++ b/roles/wiki/templates/license.pm
@@ -0,0 +1,59 @@
+# A plugin for ikiwiki to implement adding a footer with licensing information
+# based on a default value taken out of a file.
+
+# Copyright © 2007, 2008 Thomas Schwinge <tschwinge@gnu.org>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+# Unless overridden with the `meta' plugin, a footer with licensing information
+# will be added to every page using a source file `license' (e.g.,
+# `license.mdwn') (using the same ``locating rules'' as for the sidebar
+# plugin).
+#
+# The state which page's license text was gathered from which source is not
+# tracked, so you'll need a full wiki-rebuild if the `license' file is changed.
+
+package IkiWiki::Plugin::license;
+
+use warnings;
+use strict;
+use IkiWiki 2.00;
+
+my %license;
+
+sub import
+{
+ hook (type => "scan", id => "license", call => \&scan);
+}
+
+sub scan (@)
+{
+ my %params = @_;
+ my $page = $params{page};
+
+ return if defined $pagestate{$page}{meta}{license};
+
+ my $content;
+ my $license_page = bestlink ($page, "license") || return;
+ my $license_file = $pagesources{$license_page} || return;
+
+ # Only an optimization to avoid reading the same file again and again.
+ $license{$license_file} = readfile (srcfile ($license_file))
+ unless defined $license{$license_file};
+
+ $pagestate{$page}{meta}{license} = $license{$license_file};
+}
+
+1