diff --git a/.travis.yml b/.travis.yml
index 41ca4cf2c3887201dae403a4e65d816de3718d0f..f8168067e6d160fa5c835826737f87bd4b7a8c9f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -35,10 +35,17 @@ install:
   - pip install -r requirements.txt
   - pip install .
   - pip install coveralls
+env:
+  global:
+    - PYTHONHASHSEED=0 
+  matrix:
+    # Enable this eventually after getting tensorflow to build on travis:
+    # - KERAS_BACKEND=theano KERAS_BACKEND=tensorflow
+    - KERAS_BACKEND=theano
 script:
   # download data and models, then run tests
   - mhcflurry-downloads fetch
   - mhcflurry-downloads info  # just to test this command works
-  - PYTHONHASHSEED=0 nosetests test --with-coverage --cover-package=mhcflurry  && ./lint.sh
+  - nosetests test --with-coverage --cover-package=mhcflurry  && ./lint.sh
 after_success:
   coveralls
diff --git a/Dockerfile b/Dockerfile
index 813d9e6f6c09c299f779b00de7d59ddf53283bda..a704f2f04e1580abe00ac810db5c5947165ab15d 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -56,6 +56,9 @@ RUN virtualenv venv-py3 --python=python3 && \
         scikit-learn \
         seaborn
 
+ENV KERAS_BACKEND theano
+# RUN venv-py3/bin/pip install --upgrade https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.10.0-cp35-cp35m-linux_x86_64.whl
+
 # Install mhcflurry and download data and models.
 COPY . ./mhcflurry
 RUN venv-py3/bin/pip install ./mhcflurry && venv-py3/bin/mhcflurry-downloads fetch
diff --git a/README.md b/README.md
index 6ce73bd743804d70f0d2393b490193780123fdce..1227198bef04c6408950bce7c2aa1fd9c6090804 100644
--- a/README.md
+++ b/README.md
@@ -31,7 +31,24 @@ From a checkout you can run the unit tests with:
 nosetests .
 ```
 
-## Making predictions
+## Making predictions from the command-line
+
+```shell
+$ mhcflurry-predict --alleles HLA-A0201 HLA-A0301 --peptides SIINFEKL SIINFEKD SIINFEKQ
+Predicting for 2 alleles and 3 peptides = 6 predictions
+allele,peptide,mhcflurry_prediction
+HLA-A0201,SIINFEKL,10672.34765625
+HLA-A0201,SIINFEKD,26042.716796875
+HLA-A0201,SIINFEKQ,26375.794921875
+HLA-A0301,SIINFEKL,25532.703125
+HLA-A0301,SIINFEKD,24997.876953125
+HLA-A0301,SIINFEKQ,28262.828125
+```
+
+You can also specify the input and output as CSV files. Run `mhcflurry-predict -h` for details.
+
+
+## Making predictions from Python
 
 ```python
 from mhcflurry import predict
@@ -47,7 +64,9 @@ The predictions returned by `predict` are affinities (KD) in nM.
 
 ## Training your own models
 
-This [unit test](https://github.com/hammerlab/mhcflurry/blob/master/test/test_class1_binding_predictor_A0205.py) gives a simple example of how to train a predictor in Python. There is also a script called `mhcflurry-class1-allele-specific-cv-and-train` that will perform cross validation and model selection given a CSV file of training data. Try `mhcflurry-class1-allele-specific-cv-and-train --help` for details.
+See the [class1_allele_specific_models.ipynb](https://github.com/hammerlab/mhcflurry/blob/master/examples/class1_allele_specific_models.ipynb) notebook for an overview of the Python API, including predicting, fitting, and scoring models.
+
+There is also a script called `mhcflurry-class1-allele-specific-cv-and-train` that will perform cross validation and model selection given a CSV file of training data. Try `mhcflurry-class1-allele-specific-cv-and-train --help` for details.
 
 ## Details on the downloaded class I allele-specific models
 
diff --git a/downloads-generation/models_class1_allele_specific_single/GENERATE.sh b/downloads-generation/models_class1_allele_specific_single/GENERATE.sh
index 889a961f41661d0100e21b68dd68ca2658d9ec7c..f49fc99c20d62ee71e7502a1b0eee291a3384250 100755
--- a/downloads-generation/models_class1_allele_specific_single/GENERATE.sh
+++ b/downloads-generation/models_class1_allele_specific_single/GENERATE.sh
@@ -13,6 +13,7 @@ DOWNLOAD_NAME=models_class1_allele_specific_single
 SCRATCH_DIR=/tmp/mhcflurry-downloads-generation
 SCRIPT_ABSOLUTE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")"
 SCRIPT_DIR=$(dirname "$SCRIPT_ABSOLUTE_PATH")
+export PYTHONUNBUFFERED=1
 
 mkdir -p "$SCRATCH_DIR"
 rm -rf "$SCRATCH_DIR/$DOWNLOAD_NAME"
diff --git a/downloads-generation/models_class1_allele_specific_single_kim2014_only/GENERATE.sh b/downloads-generation/models_class1_allele_specific_single_kim2014_only/GENERATE.sh
new file mode 100755
index 0000000000000000000000000000000000000000..943cc6fcce0f71ff30923754771d1557bd507ab6
--- /dev/null
+++ b/downloads-generation/models_class1_allele_specific_single_kim2014_only/GENERATE.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+if [[ $# -eq 0 ]] ; then
+    echo 'WARNING: This script is intended to be called with additional arguments to pass to mhcflurry-class1-allele-specific-cv-and-train'
+    echo 'At minimum you probably want to pass --dask-scheduler <IP:PORT> as training many models on one node is extremely '
+    echo 'slow.'
+fi
+
+set -e
+set -x
+
+DOWNLOAD_NAME=models_class1_allele_specific_single_kim2014_only
+SCRATCH_DIR=/tmp/mhcflurry-downloads-generation
+SCRIPT_ABSOLUTE_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/$(basename "${BASH_SOURCE[0]}")"
+SCRIPT_DIR=$(dirname "$SCRIPT_ABSOLUTE_PATH")
+export PYTHONUNBUFFERED=1
+
+mkdir -p "$SCRATCH_DIR"
+rm -rf "$SCRATCH_DIR/$DOWNLOAD_NAME"
+mkdir "$SCRATCH_DIR/$DOWNLOAD_NAME"
+
+# Send stdout and stderr to a logfile included with the archive.
+exec >  >(tee -ia "$SCRATCH_DIR/$DOWNLOAD_NAME/LOG.txt")
+exec 2> >(tee -ia "$SCRATCH_DIR/$DOWNLOAD_NAME/LOG.txt" >&2)
+
+# Log some environment info
+date
+pip freeze
+git rev-parse HEAD
+git status
+
+cd $SCRATCH_DIR/$DOWNLOAD_NAME
+
+mkdir models
+
+cp $SCRIPT_DIR/models.py $SCRIPT_DIR/imputer.json .
+python models.py > models.json
+
+time mhcflurry-class1-allele-specific-cv-and-train \
+    --model-architectures models.json \
+    --imputer-description imputer.json \
+    --train-data "$(mhcflurry-downloads path data_kim2014)/bdata.2009.mhci.public.1.txt" \
+    --test-data "$(mhcflurry-downloads path data_kim2014)/bdata.2013.mhci.public.blind.1.txt" \
+    --min-samples-per-allele 50 \
+    --out-cv-results cv.csv \
+    --out-production-results production.csv \
+    --out-models models \
+    --verbose \
+    "$@"
+
+cp $SCRIPT_ABSOLUTE_PATH .
+tar -cjf "../${DOWNLOAD_NAME}.tar.bz2" *
+
+echo "Created archive: $SCRATCH_DIR/$DOWNLOAD_NAME.tar.bz2"
diff --git a/downloads-generation/models_class1_allele_specific_single_kim2014_only/README.md b/downloads-generation/models_class1_allele_specific_single_kim2014_only/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6cecfebb82cf745063e5244b36877980801cb818
--- /dev/null
+++ b/downloads-generation/models_class1_allele_specific_single_kim2014_only/README.md
@@ -0,0 +1,4 @@
+# Class I allele specific models (single) trained and tested in Kim 2014 dataset
+
+This is a reimplementation of the analysis in [Predicting Peptide-MHC Binding Affinities With Imputed Training Data](http://biorxiv.org/content/early/2016/05/22/054775).
+
diff --git a/downloads-generation/models_class1_allele_specific_single_kim2014_only/imputer.json b/downloads-generation/models_class1_allele_specific_single_kim2014_only/imputer.json
new file mode 100644
index 0000000000000000000000000000000000000000..c17f86cc5c2ebecb0bfcf333cab86dbb8cf1b96a
--- /dev/null
+++ b/downloads-generation/models_class1_allele_specific_single_kim2014_only/imputer.json
@@ -0,0 +1,8 @@
+{
+    "imputation_method_name": "mice",
+    "n_burn_in": 5,
+    "n_imputations": 50,
+    "n_nearest_columns": 25,
+    "min_observations_per_peptide": 2,
+    "min_observations_per_allele": 2 
+}
diff --git a/downloads-generation/models_class1_allele_specific_single_kim2014_only/models.py b/downloads-generation/models_class1_allele_specific_single_kim2014_only/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..6375cd4510bfebadd4df529a6884c1eb1632f162
--- /dev/null
+++ b/downloads-generation/models_class1_allele_specific_single_kim2014_only/models.py
@@ -0,0 +1,16 @@
+import sys
+from mhcflurry.class1_allele_specific.train import HYPERPARAMETER_DEFAULTS
+import json
+
+models = HYPERPARAMETER_DEFAULTS.models_grid(
+    #impute=[False, True],
+    impute=[False],
+    activation=["tanh"],
+    layer_sizes=[[12], [64], [128]],
+    embedding_output_dim=[8, 32, 64],
+    dropout_probability=[0, .1, .25],
+    # fraction_negative=[0, .1, .2],
+    n_training_epochs=[250])
+
+sys.stderr.write("Models: %d\n" % len(models))
+print(json.dumps(models, indent=4))
diff --git a/examples/class1_allele_specific_models.ipynb b/examples/class1_allele_specific_models.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..0fda9cd6f10d65a21810e10db8abeec646015ad3
--- /dev/null
+++ b/examples/class1_allele_specific_models.ipynb
@@ -0,0 +1,1307 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "Using Theano backend.\n",
+      "/Users/tim/miniconda3/envs/py3k/lib/python3.5/site-packages/matplotlib/__init__.py:872: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.\n",
+      "  warnings.warn(self.msg_depr % (key, alt_key))\n"
+     ]
+    }
+   ],
+   "source": [
+    "import mhcflurry\n",
+    "import numpy\n",
+    "import seaborn\n",
+    "import logging\n",
+    "from matplotlib import pyplot\n",
+    "\n",
+    "% matplotlib inline\n",
+    "logging.basicConfig(level=\"DEBUG\")\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Making predictions\n",
+    "Note: if you haven't already, run `mhcflurry-downloads fetch` in a shell to download the trained models."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Simplest way to run predictions: `mhcflurry.predict()`"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Help on function predict in module mhcflurry.predict:\n",
+      "\n",
+      "predict(alleles, peptides, loaders=None)\n",
+      "    Make predictions across all combinations of the specified alleles and\n",
+      "    peptides.\n",
+      "    \n",
+      "    Parameters\n",
+      "    ----------\n",
+      "    alleles : list of str\n",
+      "        Names of alleles to make predictions for.\n",
+      "    \n",
+      "    peptides : list of str\n",
+      "        Peptide amino acid sequences.\n",
+      "    \n",
+      "    loaders : list of Class1AlleleSpecificPredictorLoader, optional\n",
+      "        Loaders to try. Will be tried in the order given.\n",
+      "    \n",
+      "    Returns DataFrame with columns \"Allele\", \"Peptide\", and \"Prediction\"\n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "help(mhcflurry.predict)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "<div>\n",
+       "<table border=\"1\" class=\"dataframe\">\n",
+       "  <thead>\n",
+       "    <tr style=\"text-align: right;\">\n",
+       "      <th></th>\n",
+       "      <th>Allele</th>\n",
+       "      <th>Peptide</th>\n",
+       "      <th>Prediction</th>\n",
+       "    </tr>\n",
+       "  </thead>\n",
+       "  <tbody>\n",
+       "    <tr>\n",
+       "      <th>0</th>\n",
+       "      <td>HLA-A0201</td>\n",
+       "      <td>SIINFEKL</td>\n",
+       "      <td>10672.347656</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>1</th>\n",
+       "      <td>HLA-A0201</td>\n",
+       "      <td>SIINFEQL</td>\n",
+       "      <td>7828.974121</td>\n",
+       "    </tr>\n",
+       "  </tbody>\n",
+       "</table>\n",
+       "</div>"
+      ],
+      "text/plain": [
+       "      Allele   Peptide    Prediction\n",
+       "0  HLA-A0201  SIINFEKL  10672.347656\n",
+       "1  HLA-A0201  SIINFEQL   7828.974121"
+      ]
+     },
+     "execution_count": 3,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "mhcflurry.predict(alleles=[\"HLA-A0201\"], peptides=[\"SIINFEKL\", \"SIINFEQL\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Instantiating a model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array([ 10672.34765625,  30577.02539062,  10565.78222656], dtype=float32)"
+      ]
+     },
+     "execution_count": 4,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "model = mhcflurry.class1_allele_specific.load.from_allele_name(\"HLA-A0201\")\n",
+    "model.predict([\"SIINFEKL\", \"SIQNPEKP\", \"SYNFPEPI\"])\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Instantiating a model from a custom set of models on disk"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "'/Users/tim/Library/Application Support/mhcflurry/4/0.0.8/models_class1_allele_specific_single/'"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "models_dir = mhcflurry.downloads.get_path(\"models_class1_allele_specific_single\")\n",
+    "models_dir"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array([ 10672.34765625,  30577.02539062,  10565.78222656], dtype=float32)"
+      ]
+     },
+     "execution_count": 6,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "# Make a Loader first\n",
+    "loader = mhcflurry.class1_allele_specific.load.Class1AlleleSpecificPredictorLoader(models_dir)\n",
+    "model = loader.from_allele_name(\"HLA-A0201\")\n",
+    "model.predict([\"SIINFEKL\", \"SIQNPEKP\", \"SYNFPEPI\"])"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Loading a `Dataset`"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "Dataset(n=192550, alleles=['ELA-A1', 'Gogo-B0101', 'H-2-DB', 'H-2-DD', 'H-2-KB', 'H-2-KBM8', 'H-2-KD', 'H-2-KK', 'H-2-LD', 'H-2-LQ', 'HLA-A0101', 'HLA-A0201', 'HLA-A0202', 'HLA-A0203', 'HLA-A0204', 'HLA-A0205', 'HLA-A0206', 'HLA-A0207', 'HLA-A0210', 'HLA-A0211', 'HLA-A0212', 'HLA-A0216', 'HLA-A0217', 'HLA-A0219', 'HLA-A0250', 'HLA-A0301', 'HLA-A0302', 'HLA-A0319', 'HLA-A1', 'HLA-A11', 'HLA-A1101', 'HLA-A1102', 'HLA-A2', 'HLA-A2301', 'HLA-A24', 'HLA-A2402', 'HLA-A2403', 'HLA-A2501', 'HLA-A26', 'HLA-A2601', 'HLA-A2602', 'HLA-A2603', 'HLA-A2902', 'HLA-A3', 'HLA-A3/11', 'HLA-A3001', 'HLA-A3002', 'HLA-A3101', 'HLA-A3201', 'HLA-A3207', 'HLA-A3215', 'HLA-A3301', 'HLA-A6601', 'HLA-A6801', 'HLA-A6802', 'HLA-A6823', 'HLA-A6901', 'HLA-A7401', 'HLA-A8001', 'HLA-B0702', 'HLA-B0801', 'HLA-B0802', 'HLA-B0803', 'HLA-B1401', 'HLA-B1402', 'HLA-B1501', 'HLA-B1502', 'HLA-B1503', 'HLA-B1509', 'HLA-B1517', 'HLA-B1542', 'HLA-B1801', 'HLA-B27', 'HLA-B2701', 'HLA-B2702', 'HLA-B2703', 'HLA-B2704', 'HLA-B2705', 'HLA-B2706', 'HLA-B2710', 'HLA-B2720', 'HLA-B3501', 'HLA-B3503', 'HLA-B3508', 'HLA-B3701', 'HLA-B3801', 'HLA-B39', 'HLA-B3901', 'HLA-B40', 'HLA-B4001', 'HLA-B4002', 'HLA-B4013', 'HLA-B4201', 'HLA-B4202', 'HLA-B44', 'HLA-B4402', 'HLA-B4403', 'HLA-B4501', 'HLA-B4506', 'HLA-B4601', 'HLA-B4801', 'HLA-B51', 'HLA-B5101', 'HLA-B5201', 'HLA-B5301', 'HLA-B5401', 'HLA-B5701', 'HLA-B5702', 'HLA-B5703', 'HLA-B58', 'HLA-B5801', 'HLA-B5802', 'HLA-B60', 'HLA-B62', 'HLA-B7', 'HLA-B7301', 'HLA-B8', 'HLA-B8101', 'HLA-B8301', 'HLA-BOLA102101', 'HLA-BOLA200801', 'HLA-BOLA201201', 'HLA-BOLA402401', 'HLA-BOLA601301', 'HLA-BOLA601302', 'HLA-BOLAHD6', 'HLA-C0303', 'HLA-C0401', 'HLA-C0501', 'HLA-C0602', 'HLA-C0702', 'HLA-C0802', 'HLA-C1', 'HLA-C1203', 'HLA-C1402', 'HLA-C1502', 'HLA-C4', 'HLA-E0101', 'HLA-E0103', 'HLA-EQCA100101', 'HLA-RT1A', 'HLA-RT1BL', 'HLA-SLA10401', 'Mamu-A01', 'Mamu-A02', 'Mamu-A07', 'Mamu-A100101', 'Mamu-A100201', 'Mamu-A101101', 'Mamu-A11', 'Mamu-A20102', 'Mamu-A2201', 'Mamu-A2601', 'Mamu-A70103', 'Mamu-B01', 'Mamu-B01704', 'Mamu-B03', 'Mamu-B04', 'Mamu-B06502', 'Mamu-B08', 'Mamu-B1001', 'Mamu-B17', 'Mamu-B3901', 'Mamu-B52', 'Mamu-B6601', 'Mamu-B8301', 'Mamu-B8701', 'Patr-A0101', 'Patr-A0301', 'Patr-A0401', 'Patr-A0602', 'Patr-A0701', 'Patr-A0901', 'Patr-B0101', 'Patr-B0901', 'Patr-B1301', 'Patr-B1701', 'Patr-B2401'])"
+      ]
+     },
+     "execution_count": 7,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "full_training_data = mhcflurry.dataset.Dataset.from_csv(\n",
+    "    mhcflurry.downloads.get_path(\"data_combined_iedb_kim2014\", \"combined_human_class1_dataset.csv\"))\n",
+    "full_training_data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "(179692, 137654, 27680)"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "kim2014_full = mhcflurry.dataset.Dataset.from_csv(\n",
+    "    mhcflurry.downloads.get_path(\"data_kim2014\", \"bdata.20130222.mhci.public.1.txt\"))\n",
+    "\n",
+    "kim2014_train = mhcflurry.dataset.Dataset.from_csv(\n",
+    "    mhcflurry.downloads.get_path(\"data_kim2014\", \"bdata.2009.mhci.public.1.txt\"))\n",
+    "kim2014_test = mhcflurry.dataset.Dataset.from_csv(\n",
+    "    mhcflurry.downloads.get_path(\"data_kim2014\", \"bdata.2013.mhci.public.blind.1.txt\"))\n",
+    "\n",
+    "len(kim2014_full), len(kim2014_train), len(kim2014_test)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Predicting affinities from a `Dataset`\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array([  3514.14550781,  12429.5390625 ,   4227.02197266, ...,\n",
+       "         5949.32763672,  17837.0859375 ,   6724.96728516], dtype=float32)"
+      ]
+     },
+     "execution_count": 9,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "model = mhcflurry.class1_allele_specific.load.from_allele_name(\"HLA-A0201\")\n",
+    "model.predict(kim2014_train.get_allele(\"HLA-A0201\").peptides)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Fit a model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Help on class Class1BindingPredictor in module mhcflurry.class1_allele_specific.class1_binding_predictor:\n",
+      "\n",
+      "class Class1BindingPredictor(mhcflurry.class1_allele_specific.class1_allele_specific_kmer_ic50_predictor_base.Class1AlleleSpecificKmerIC50PredictorBase)\n",
+      " |  Allele-specific Class I MHC binding predictor which uses\n",
+      " |  fixed-length (k-mer) index encoding for inputs and outputs\n",
+      " |  a value between 0 and 1 (where 1 is the strongest binder).\n",
+      " |  \n",
+      " |  Method resolution order:\n",
+      " |      Class1BindingPredictor\n",
+      " |      mhcflurry.class1_allele_specific.class1_allele_specific_kmer_ic50_predictor_base.Class1AlleleSpecificKmerIC50PredictorBase\n",
+      " |      mhcflurry.ic50_predictor_base.IC50PredictorBase\n",
+      " |      builtins.object\n",
+      " |  \n",
+      " |  Methods defined here:\n",
+      " |  \n",
+      " |  __getstate__(self)\n",
+      " |  \n",
+      " |  __init__(self, model=None, name=None, max_ic50=50000.0, allow_unknown_amino_acids=True, kmer_size=9, n_amino_acids=20, verbose=False, **hyperparameters)\n",
+      " |      Initialize self.  See help(type(self)) for accurate signature.\n",
+      " |  \n",
+      " |  __setstate__(self, state)\n",
+      " |  \n",
+      " |  fit_kmer_encoded_arrays(self, X, ic50, sample_weights=None, right_censoring_mask=None, X_pretrain=None, ic50_pretrain=None, sample_weights_pretrain=None, n_random_negative_samples=None, pretrain_decay=None, n_training_epochs=None, batch_size=None, verbose=False)\n",
+      " |      Train predictive model from index encoding of fixed length k-mer\n",
+      " |      peptides.\n",
+      " |      \n",
+      " |      Parameters\n",
+      " |      ----------\n",
+      " |      X : array\n",
+      " |          Training data with shape (n_samples, n_dims)\n",
+      " |      \n",
+      " |      ic50 : array\n",
+      " |          Training IC50 values with shape (n_samples,)\n",
+      " |      \n",
+      " |      sample_weights : array\n",
+      " |          Weight of each training sample with shape (n_samples,)\n",
+      " |      \n",
+      " |      right_censoring_mask : array, optional\n",
+      " |          Boolean array which indicates whether each IC50 value is actually\n",
+      " |          right censored (a lower bound on the true value). Censored values\n",
+      " |          are transformed during training by sampling between the observed\n",
+      " |          and maximum values on each iteration.\n",
+      " |      \n",
+      " |      X_pretrain : array\n",
+      " |          Extra samples used for soft pretraining of the predictor,\n",
+      " |          should have same number of dimensions as X.\n",
+      " |          During training the weights of these samples will decay after\n",
+      " |          each epoch.\n",
+      " |      \n",
+      " |      ic50_pretrain : array\n",
+      " |          IC50 values for extra samples, shape\n",
+      " |      \n",
+      " |      pretrain_decay : int -> float function\n",
+      " |          decay function for pretraining, mapping epoch number to decay\n",
+      " |          factor\n",
+      " |      \n",
+      " |      sample_weights_pretrain : array\n",
+      " |          Initial weights for the rows of X_pretrain. If not specified then\n",
+      " |          initialized to ones.\n",
+      " |      \n",
+      " |      n_random_negative_samples : int\n",
+      " |          Number of random samples to generate as negative examples.\n",
+      " |      \n",
+      " |      n_training_epochs : int\n",
+      " |      \n",
+      " |      verbose : bool\n",
+      " |      \n",
+      " |      batch_size : int\n",
+      " |  \n",
+      " |  get_weights(self)\n",
+      " |      Returns weights, which can be passed to set_weights later.\n",
+      " |  \n",
+      " |  predict_ic50_for_kmer_encoded_array(self, X)\n",
+      " |      Given an encoded array of amino acid indices,\n",
+      " |      returns a vector of IC50 predictions.\n",
+      " |  \n",
+      " |  predict_scores_for_kmer_encoded_array(self, X)\n",
+      " |      Given an encoded array of amino acid indices, returns a vector\n",
+      " |      of affinity scores (values between 0 and 1).\n",
+      " |  \n",
+      " |  set_weights(self, weights)\n",
+      " |      Reset the model weights.\n",
+      " |  \n",
+      " |  ----------------------------------------------------------------------\n",
+      " |  Data and other attributes defined here:\n",
+      " |  \n",
+      " |  fit_hyperparameter_defaults = <mhcflurry.hyperparameters.Hyperparamete...\n",
+      " |  \n",
+      " |  hyperparameter_defaults = <mhcflurry.hyperparameters.HyperparameterDef...\n",
+      " |  \n",
+      " |  network_hyperparameter_defaults = <mhcflurry.hyperparameters.Hyperpara...\n",
+      " |  \n",
+      " |  ----------------------------------------------------------------------\n",
+      " |  Methods inherited from mhcflurry.class1_allele_specific.class1_allele_specific_kmer_ic50_predictor_base.Class1AlleleSpecificKmerIC50PredictorBase:\n",
+      " |  \n",
+      " |  __repr__(self)\n",
+      " |      Return repr(self).\n",
+      " |  \n",
+      " |  __str__(self)\n",
+      " |      Return str(self).\n",
+      " |  \n",
+      " |  encode_peptides(self, peptides)\n",
+      " |      Parameters\n",
+      " |      ----------\n",
+      " |      peptides : str list\n",
+      " |          Peptide strings of any length\n",
+      " |      \n",
+      " |      Encode peptides of any length into fixed length vectors.\n",
+      " |      Returns 2d array of encoded peptides and 1d array indicating the\n",
+      " |      original peptide index for each row.\n",
+      " |  \n",
+      " |  fit_dataset(self, dataset, pretraining_dataset=None, sample_censored_affinities=False, **kwargs)\n",
+      " |      Fit the model parameters on the given training data.\n",
+      " |      \n",
+      " |      Parameters\n",
+      " |      ----------\n",
+      " |      dataset : Dataset\n",
+      " |      \n",
+      " |      pretraining_dataset : Dataset\n",
+      " |      \n",
+      " |      sample_censored_affinities : bool\n",
+      " |          If a column named 'inequality' is in the Dataset then every\n",
+      " |          peptide with a value of '>' on each training epoch, gets a\n",
+      " |          randomly sampled IC50 between its observed value and the\n",
+      " |          max_ic50 of the predictor. Default is False.\n",
+      " |      \n",
+      " |      **kwargs : dict\n",
+      " |          Extra arguments are passed on to the fit_encoded_kmer_arrays()\n",
+      " |          method.\n",
+      " |  \n",
+      " |  predict_ic50_for_kmer_peptides(self, peptides)\n",
+      " |  \n",
+      " |  predict_scores(self, peptides, combine_fn=<function mean at 0x109180ae8>)\n",
+      " |      Given a list of peptides of any length, returns an array of predicted\n",
+      " |      normalized affinity values. Unlike IC50, a higher value here\n",
+      " |      means a stronger affinity. Peptides of lengths other than 9 are\n",
+      " |      transformed into a set of k-mers either by deleting or inserting\n",
+      " |      amino acid characters. The prediction for a single peptide will be\n",
+      " |      the average of expanded k-mers.\n",
+      " |  \n",
+      " |  predict_scores_for_kmer_peptides(self, peptides)\n",
+      " |      Predict binding affinity for 9mer peptides\n",
+      " |  \n",
+      " |  ----------------------------------------------------------------------\n",
+      " |  Data descriptors inherited from mhcflurry.class1_allele_specific.class1_allele_specific_kmer_ic50_predictor_base.Class1AlleleSpecificKmerIC50PredictorBase:\n",
+      " |  \n",
+      " |  amino_acids\n",
+      " |      Amino acid alphabet used for encoding peptides, may include\n",
+      " |      \"X\" if allow_unknown_amino_acids is True.\n",
+      " |  \n",
+      " |  max_amino_acid_encoding_value\n",
+      " |  \n",
+      " |  ----------------------------------------------------------------------\n",
+      " |  Methods inherited from mhcflurry.ic50_predictor_base.IC50PredictorBase:\n",
+      " |  \n",
+      " |  fit_dictionary(self, peptide_to_ic50_dict, **kwargs)\n",
+      " |      Fit the model parameters using the given peptide->IC50 dictionary,\n",
+      " |      all samples are given the same weight.\n",
+      " |      \n",
+      " |      Parameters\n",
+      " |      ----------\n",
+      " |      peptide_to_ic50_dict : dict\n",
+      " |          Dictionary that maps peptides to IC50 values.\n",
+      " |  \n",
+      " |  fit_sequences(self, peptides, affinities, sample_weights=None, alleles=None, **kwargs)\n",
+      " |  \n",
+      " |  predict(self, peptides)\n",
+      " |      Predict IC50 affinities for peptides of any length\n",
+      " |  \n",
+      " |  ----------------------------------------------------------------------\n",
+      " |  Data descriptors inherited from mhcflurry.ic50_predictor_base.IC50PredictorBase:\n",
+      " |  \n",
+      " |  __dict__\n",
+      " |      dictionary for instance variables (if defined)\n",
+      " |  \n",
+      " |  __weakref__\n",
+      " |      list of weak references to the object (if defined)\n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "help(mhcflurry.class1_allele_specific.Class1BindingPredictor)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "Dataset(n=3040, alleles=['HLA-A3301'])"
+      ]
+     },
+     "execution_count": 11,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "train_data = kim2014_train.get_allele(\"HLA-A3301\")\n",
+    "train_data"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 12,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "{'activation': 'tanh',\n",
+       " 'batch_normalization': True,\n",
+       " 'batch_size': 128,\n",
+       " 'dropout_probability': 0.0,\n",
+       " 'embedding_output_dim': 32,\n",
+       " 'fraction_negative': 0.0,\n",
+       " 'init': 'glorot_uniform',\n",
+       " 'kmer_size': 9,\n",
+       " 'layer_sizes': [64],\n",
+       " 'loss': 'mse',\n",
+       " 'max_ic50': 50000.0,\n",
+       " 'n_training_epochs': 250,\n",
+       " 'optimizer': 'rmsprop',\n",
+       " 'output_activation': 'sigmoid',\n",
+       " 'pretrain_decay': 'numpy.exp(-epoch)'}"
+      ]
+     },
+     "execution_count": 12,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "# We'll use the default hyper parameters here. Could also specify them as kwargs:\n",
+    "new_model = mhcflurry.class1_allele_specific.Class1BindingPredictor()\n",
+    "new_model.hyperparameters"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 13,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "CPU times: user 1min 22s, sys: 1.13 s, total: 1min 24s\n",
+      "Wall time: 45.8 s\n"
+     ]
+    }
+   ],
+   "source": [
+    "# This will run faster if you have a GPU.\n",
+    "%time new_model.fit_dataset(train_data)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Evaluate the fit model on held-out test data"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Generate predictions"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<matplotlib.text.Text at 0x122e50400>"
+      ]
+     },
+     "execution_count": 14,
+     "metadata": {},
+     "output_type": "execute_result"
+    },
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAesAAAFtCAYAAAAnGkJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXl8I+d55/mtwkmC7G6S4NH3Rbq6JXWTLdtqy5IvWU4m\ndg6v48QfZ7127Gw2s5udTT5JZjPZmc3hzTjHzng+mczknjh2xnE8m0ni8ZHEtqKxJVlqSZZIttTd\n1WSzL94EeBM3qvaPAkAcBaBwF8j3+/m0RACFwlvX+3uf533e55F0XUcgEAgEAoF9kVvdAIFAIBAI\nBKURYi0QCAQCgc0RYi0QCAQCgc0RYi0QCAQCgc0RYi0QCAQCgc0RYi0QCAQCgc1xtroBAoEdURTl\nJHAb+Laqqu/M++wzwMcAv6qqq4qiaOm/s7b5GPBBVVV/IPX6CPCvgTcCSSAC/Iaqqv8ta/tPAteA\nReCqqqqfbuxR1gdFUf4B+HD28Vf4/T8Cfl9V1VfLbPfzwEOqqn68HvsTCNoJYVkLBMWJAG9QFOV4\n+g1FUTqBx4DsBAXFkhXoqe/0A98BvqGq6kVVVS8BPwX8saIo705t+1Hgl1RV/b46H0MzeE8dvi9Z\n3NZKYohK9icQtAXCshYIipMEvgh8BPiN1HsfAL4E/FzWduWE4X8FnlFV9S/Sb6iqOqkoygeAdUVR\nPg08ApxKCXuGfKs9/Rq4APwOsAN0Ar8I/HbW61eBBVVV/2Xqez8G/LCqqj+ct/8Hgd8F+gAN+LSq\nqn+uKMo7MDwBM8BDgBv4aVVVv5X3/T9N/fm0oijvxRDT/wAcB1zAX6qq+puKojhSv/MYEEvt9xPA\nLwFHgM8rivJRVVVfytq3M/WdJ4ElYBlYT332FuC3Uu06jDEQ+klFUX49e38YBslv529nfpkEAvsi\nLGuBoDg68DkMsU7zMeAzFAr004qivJL69yqGSzvNm4Dn8neuqupzqqq+rqrqzwEvA7+gqurvmLSh\n2OsHgQ+lLPVo3uvfAX5cUZT0M/5TwO9n7ygloF8CfkdV1VHgvcCnFEW5nNrkEeD/VVX1YeBPgV81\nOYZPpP58p6qqc8CfA/9JVdU3A5eB9yiK8kHg0dQ2o6nPZoALqqr+K2Ae+LFsoU7xvwHDwDnge4AT\nWZ/9M+D/VlX10dRx/5CiKJdM9vd/mG2XfxwCgd0RYi0QlCA176kpinJJUZRjQJeqqtdMNn2nqqoP\np/5dAn456zMNa8+amYWe/1726/uqqs6avVZVdQJDEN+nKMo54LCqqt/M29cbAI+qql9KfWcB+K/A\nP0l9fldV1aupv18Beku1PTVF8A7g/0kNWF7AsLDHgKtAQlGUK4qifBL4a1VVXyhz7E8Cf6GqalJV\n1RDw+azPfhzoURTll4DfAzqALpP9ldtOIGgLhFgLBOX5c+B/Sv378yLblHKFv4BhWeagKMpPKYry\nsxZ+X0pt7yLXst7O2y7/9e8BP4Hhbv4jk/2aPf8yhvsaIJz1vk7pY9QBR+rvR1VVvZQatDwKfEpV\n1Q0M0f55IAF8UVGUnymxP7PfTGT9/SzwfcB1DC/GXJH2Wd1OILA1QqwFguKkO/X/DPwI8KPAXxTf\nvCh/CLxDUZQPp99QFOWNGG7lyTLfXcZwowP8cKkNTfgr4BLGPPufmnyuAjFFUd6fatOR1G98o8Lf\nSQBuVVW3MAYmv5Da3yEM9/8PKYryPuAp4HlVVT+JMb0wmvV9V8Fe4e+BjyqK4lEUxQt8KGu/DwO/\nqKrq3wLHMNzljuz9WdhOIGgbhFgLBMXRAVRVncdYUnVTVdX17M9M/i5AVdU14J3ABxVFuaooygRG\n4NQnVFX9xzL7+Bng9xRFeRlD3BasNl5V1TiGYD9vtqxKVdUE8H7gZ1Nt+jrwq/lBZBb4G+BZRVEe\nAH4MeIuiKJPA88DnVVX9AvB3wGvAa4qivIRhcf9q6vt/i2FpP5m33z8Evpv63tMYbn1S1+A3gFcV\nRXkRI7juWQwhzuwPY5BTajuBoG2QRIlMgWBvoiiKD/g28E9NgrcEAkEb0fClW4qifBfYSL28rarq\nTzT6NwWC/Y6iKN8DfAH4EyHUAkH701DLWlEUD/AdVVXf2LAfEQgEAoFgj9Noy3oU8KXSETqAf6mq\n6pUG/6ZAIBAIBHuKRgeYhTCSKnwvRhanz2claRAIBAKBQGCBRlvWN4FpAFVVpxRFCWKk/Jsz21jX\ndV2SxBJIgUAgEOwbLIleo8X6Exg5jH86tYazmxJLTyRJYmVlq8FNan/6+7vFebKIOFfWEOfJGuI8\nWUecK2v093db2q7RYv2fgM8oivIMRsrFT6iqqjX4NwUCgUAg2FM0VKxTSRk+UnZDgUAgEAgERRHB\nXgKBQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ2Bwh1gKB\nQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ2Bwh1gKBQCAQ\ntABdt16EstElMgUCgUDQxmi6znOTC8yu7HCs38djFw8jS1Krm9XWJJJJwtEk4ViSgQFr3xFiLRAI\nBIKiPDe5wD++OgfAzdl1Eskk//Difda2YvR0u/nV//kRvE4hJeXQdY1ITCMSTRBNWLeo0wg3uEAg\nEAiKcn9lm+1QnNXNCNuhGH/51DTL6xHiSY3l9Qi/+icvtrqJNkYnnkiyGYqxsh5hYydWlVCDsKwF\nAkEbI1y01VHuvGV/fmdhk/XtKDqwo+toeu6+1rZizW18G6DpGpGoRjiaIJ6sTpzzEWItEAjalnwX\nLcDbRo80vR12GTSYtQPIvHe03we6zos3lllaDdPV6TI9b89OzPPl79wllkgSisTRNJAk0PXC3/S4\nhYPWQCcWN+aiI7EkJqeqJoRYCwSCtiMtSk99d5adSIKuThcAsys7LWmPXQYNZu0AMu+9cnMFgFgi\nSTSWBKCr01Vw3l68scxWyLCY05a00yGT1HSSeab1kb7O+h9IG6FpGpFYMmVF11uidxFiLRAI2o60\nKO1EEhlR6ep0cazf15L25ItdqwYN5doRSxgC7XY6iMaSqdelz5ssSegpO9HjktGBcNTYjwRshRN1\na3/70Fgr2gwh1gKBoO1Ii5Cvw+jCfF4nT1w6mnH7NoNsl3MoEkfXdaSU67tVg4Zj/b4cizrdjvR7\nbqcD2D1vg70dXD4/WHDeHjk/yNJqmFgiiaYZQVI6IEkS3V4nkeiuQB30uRp7UDZC0zTCsSSRSIJ4\n/uR9hSQ1nZn5DS4og5a2F2ItEAjajrQoSZJEV6eLJy4dbbrbOdvlDHC8v4tOrytnrrjZPHbxMHpq\nThpAB956YQggZ856LhAqObf++MXDSKnvXJ0JsLiaAHQSCY2kpmWEWgfiiWbYla2kfla0ruvMBXYY\nnwoweSvIdjjOB96tWPquEGuBQNB2pMUwP5CqmeS7mDu9Lj785EjT25GNLElIksROxHBNP/3qHBKV\nz5/LkpT5zrP/bj7zvg5EYlom2EySIBpP1qv5tkLTNELRJJFogkSNVvTqZoTx6QDjUwECG5Gq9iHE\nug3RNJ1nJuZbHnkqEBSj0dHR2WJSD6ppbzGXcyvbBPWfP4+brAtOR4XrOnhcjpr2by90orEkoWiS\nWLw2K3onEufqrSDj0wHuLW0XfN7T7WF02G95f0Ks25CnXrpni8hTgaAYdomOtko17W20dV/tOaxm\nEFFqYNDpcbIZime2lSBHxNJWfPuik0hqmfSfWg1WdCyR5MbdNcangty8v46Wt9atw+Pk4tk+xob9\nnBjsysQ4WEGIdRtyZ3Ez53WrIk8F1WGXNbmNxC7R0fkUO/fl2pv+XnAnRp/PnfleIwcg1Z7DSgcR\nmq7zma9eZ3ImiNvpKBgYvP/xU/zlP94ikdRwOmRAJ5Y1T52Oxm83NF0jWkP6z8x+NJ1b8xtMTAd4\n7fYqsXjuvpwOifMnexgb6Wfk2MHUOawcIdZtyKmhA0yk1ktC6yJPBdXRblZnNTTaRVwtxc59ufam\nv+dyyhm3cKOvWbXnsNJBxHOTC0zOBInGdtdeZw8M3n7pGA6HIyP+f/WtaWKJXWu6Su1pEUZkeyiq\nEYklTJO8WNqLrjMfDDE+tcLkrSBbWZ4HMLwPZ44eYGzYz4One/G6a5daIdZtyLvffIKtrUhLg2sE\n1WNXq7Oe2CEAzIxi575ce1txzZp1DmdXdjLrrsFw5ZYaGPi8LrZCu2Ld6bX/0q2kljSKaNS45Gp1\nM8LEdJDx6RVW1gsDxY70dTI64ufiWT8Hfe5amlyAEOs2RJYb634TNBa7Wp31pNEu4mo52u/jlZsr\nxBJJ3E6HsZSJwvZqem4Q51F/Z9OvWbPO4bF+H+r9NcAQ6otn+nIGBvneCEkCOSsavKfb2/A2VkOm\nylUsWVPEeigSZ3ImyMRUkLtLWwWfH+pyMzrsZ2zYz2Bv47K5CbEWCJqMXa3OfUGW31NH5+a9NeZM\nYgfyBepdl47yxKWjOXPWewWz+zE7hiLfi5BIJDMpSHUdDnTYKRrccHOHU27uao3oeELj+t01JqYD\nqPfMAsUcXDjTx+iwn5ND3U2JORFiLRA0Gbtanfsh8G0uEErlEXexHYpz9fYqvQe8BbED+QI1t7LD\nh58cob+/m5WVQuuqnSl3Px71d+Z4I/Kjv2/c3yzyzeaR1FICXcOaaE3TmZnfZHw6wOu3VwuscadD\n4tyJHsZG/Lzh+KGqA8WqRYi1QFABe1nQ9lvgW1p80mQL9H6YqrBM3v2dyCtWEYm1ZulWPdzcuq6z\nEAwxPhVg4lbANFDs9BEjUOyhM/UJFKsWIdYCQQXsZUHbb4FvoUic2UCuQJtttx+nKrIHpXOB7Yw3\nAow53ERyVxzrHUhVGp14Qkul/qzezb22ZQSKvToVYGU9XPD54b5Oxob9XByuf6AYgEuWcLsdeN3W\npxCEWAsEFbCXBW0/WJPZLt9itZ/zt0tv+8zEfME663anmKcoe1C6HYoTiSWQZQm308HxQR+35rZI\najoOWeLB072Nb2cdylCGIgmuzhgZxe4uFg8UGx32M9SAQDGXQ8bjduB1yykXemX3jxBrgaAC9pKg\nZXfU58/08WhWwYf9YE1WEjvQinXWzaCYpyh7EKrrGtF4EkmS0DSdZMKwsGXZEJv8JCD1QyeaKqAR\nrbKARjyhcePeGuNTAW7eXy+oxe11G4FiYyONCRTzOGXcbgceV3UCnY0Qa4GgAuzuHq1kTj27o769\nuMnW1uE9IUCNYK96VIodV/agNBRNktR0JAkisSQLq6Gc76xtRevapnSwWDiaKBBXK2iazu0FI1Ds\ntZnCQDGHLHHuZA+XGhAoJgFulwOPy7CgZbl++xZiLRCUoZ2CyiqZU9+rAtQImuFRacV9Vuy4sgel\n61tRIrEk6Do6RoCZQ85qVx2aaFjvxlx0NcFiuq6zuJoOFAuyuZObAlUCTh0+wNiIn4dO99LhqZ/0\nSZJRzMTjcuBxy8hSY6LEhVgLBGVop6CySgR4L7n0a6WcUKbFq5HrrFtxnxXzFGVPEcyubLMdjqNj\niF7vAQ/xhJ6Jpn/k3ECVv157AY317SgT0wFenQqwvFYYKDbUmw4U6+NQl6fKdhYiyxLelDi7nTJS\ngwQ6GyHWAkEZ2skCLSbAZmKU3VGfP9PHxdM9LWmzHSgnlGnxauQ662L3WSMtbivz9m9W/Nya2yCR\n1HA4ZN7zxqM4nc6qp4IyS66qLKARju4Git1ZKLwWB32pjGIj9Q0U243glnE5HdTFpVABQqwFgjK0\nkwVazFIqJkbpjnovJvuoBKtVtwLbUVbXwnR4nRzv76qrcBa7z2q1uGsV++m5TeJJDV2HeFJjem6T\ncycrjQDfzSwWrqKARjyhod5bYzyVUcwsUOyhM0bpyVOH6xcoVmsEdz0RYi0QlMHuQWXZFLOU2sk7\n0AqsVt0KRxOsb0Xp7nQzNbsB1M9VXew+q/Xa1Sr21++tZ8RV12FiZpX51bCl/dWy5ErTde4sbDI+\nZZSejMRMAsVSGcWUE/ULFEtHcHvdMg65tQKdjRBrwb6gFuvCrulBK6GdvAOtwGrVrXTwUyyRBFx1\nHfQUu89qvXZWvQbFno38ueREnuu68BwYS65WN8KsrEcqXnK1ENxhYjrAxHSQjbxAMYDTh7sZG+mv\na6CYx2lY0B63jEO2U67zXYRYC/YF7RQk1gjayTvQCsoNyNKC6XE5CEcSmTSlzRj01Hrt0m3XdZ2d\ncIK5wDbPTMybJkB55eYKV64vcfn8YObzI/5OtkKxTJWtvoOegv2bBYtJLpdloV7fjjI5bcxDL+Yt\nDQMY7OlgbMRIWFKPQLFGLrFqFEKsBfuC/e4GbgfvgJ2XyKUF0mzOuhasHHOt1y7dxivXl9gJJ9iJ\nJDLinJ0AZTsUZysUI5ZIZop1vG30CJfPD7K8FslEf3/Pm44jy3KqzZ08rPSzthmtOFgsHE3w2u1V\nxqcC3FnYLBD2Az43o2eNhCWH+2ofFEkSeF0OQ6QbuMSqUQixFuwLhBvY/tjZ+1HPaPBsgc7OT96o\nY063fXZlJ6diVn4ClFjCSH4ST2hsh+LcX9kG4PHRI0iSlDWgGCKZ1FNWdKKg+EUpEkmNG/fWmZgK\ncOPeWkGgmMfl4MKZXkZH/JweOpDJklb9sYPH7cTrknG7mrPEqlEIsRbsC4Qb2P7sF+9H9qBkdTOC\n2+lIFcpo7DGXS4Dy9y/eIxRJkExqbIVihPNKYUqSTjKpEdiI8OK1ZRZXQwz1GpZ1KQ+IESi2lcoo\nFjQNFFNOHGJs2I9yogeXszZBdciSEcHtknG7mr/EqlEIsRbsC9rBDdwM7Oxq3i/ej2xBdjsdmWA1\naOwxl0uAcn95m1g8kHF3G8FbOs9OzPPtiXniSZ3Xbq/x2u1VFlMJSO6kCmK8ySQxytzKNt96+T4T\n0wHTQLFTh7uN0pOn++j01iZF2VWsDLG3xz1dT4RYCwT7CLu4mkslabm/vE04muD+Sm4g1F4he1Di\n63Byrv8QnV5XgccnfY7ur2wTjiTo8Dg5PlD92u5yA9bjA11MzW0ALhwyHO7tJLAe4ebsBpGsYh0L\nwRBSlns6OyBsYzvKxK0g41PmgWIDPR2MpSpb9XTXFijmckiGi9sGa6CbgRBrgWAf0SpXc74468DT\nRZK0PDMxnxlQ1Hstsx0ws3DNxDc9sNoOxVnfjuJyyvi8RoT12xtwPt56YRBN07i3vE1PtxflVA8J\nTWewp4Nrt1eJJzVcDpkzRw5kLGuAvgNeXr6xzPh0gNvzJoFina6cjGJSDQOvelaxajeEWAsENqbe\nbutWuZrzLXpfntsze9DQjnPXlVwnq1My6ePeicTRNJ1Y3FgS9eL1pTqKtRFQFo4micQSKCd7UfKz\nk+Udx4mhbo4PdnPj3hpboTh/d+UuiWRhoNgbzw1w7sQhzhyuPlBMAtxOOWNBt8MSq0YhxFogsDH1\ndlu3KtCunOBmDxrqNaBo5vx8I6YX8s9DLRZpPpVkFltaDdHZ4ULXdWIJjedfW2QzFCMcLQwUe8Px\nQ4yN+Dl3oofBgW5WVysfaKXXQHvd7bnEqlEIsRYImkQ14lFvK7NVgXb5wvPIuYG85UC7g4Z6DSga\nNT9vdh2ruU5WK31dubbE3aWtVClGZ01VrqIxI2lJNJbMuKs1XecVdaVodHenx8nmTqxofemTQ0ag\n2IUzvXR6XVW1LL0G2pNycbfzEqtGIcRaUHfsHHHcSqoRj70SIW11nhbqN6BolDs9/zrquk4oEs8s\nw/J1OC1dJ6uVvh67eLjgeaqERDJJJKYRiiZMy1C+oq7wwrUlYDe6e+T4ISanA4xPB1gIFgaK9R/q\n4NKIn9HhPnq6vRW1J81eWgPdDBou1oqiDAAvA0+qqnqz0b8naD12iTi2G9WIx15ZH94Ki75RA538\n6/bijWV2UilIY4kk5/oPWbpOVu+Has5ddhnKcDxZ0nJOR21rmk4kluCp787yN9+eKQgU604Fil08\n28dCYIeltTC35jZ5WPFYHow7ZCmT4nMvrYFuBg0Va0VRnMAfAIVDM8GexS4Rx3YTtmrEQ6wPr55G\nDXTyr2MaI7GJi06vy5J41X8wYZShDEU1IlllKM0s5/S66ERSQ9N1VjcjBclKwAgUe/B0L2Mj/kyg\n2Ms3lrlyfdl0f2bIsoR3DyYpaTaNtqz/DfD7wC/VY2fCvdoeFOuEGn39zCz6Dzx5oG77r5W9YiWb\nYcdns1EDnfzrmL0MDayLbr3uh6Rm1ImORBMkTNzc+eudF4I73Fk0Sk9enVklHM3NVCZL6UCxPs6d\n7MkULSm2P7P11LIs0elxQrdbCHSdaJhYK4ry48CyqqrfUBTl/6rHPoV7tXaa0akW64Qaff3svuRn\nL1vJ++nZzL+Omq4jUbno1nI/6LpGNG4suYrFkyWrWw31dnJncSu1RCvBxHSQ519fKtjuxGAXYyN+\nLpzpw1ciUCy9v+zXxvGAN7XEyu1y0HPASyJqPW+4oDSNtKw/DmiKorwHGAM+pyjKD6qqulzqS/39\n3UU/C+7EcvLGBndiJbdvFJqm89RL97izuMmpoQO8+80nak44XynVHvc3rtzlmasLANxe3KS728t7\nLp+sZ9MAc4u20dfv/Jk+bi9u5ryG6s/VXqTUvVvLebLLs9kMzI6rGR4cXTfWRIciccLRBLLbgc/t\nopQdv74VRXLIRGIJVjejBZ8P9XXyyINDPPLAEP5DHZba8eRbTuHzuZlb2ebkYDfvfONxOrxOvG5n\nwfKyvXoPtIKGibWqqu9I/60oytPAT5UTaqBkRZs+n5t4Vhm2Pp+75go41ZCdYWni5gpbW5GmWhG1\nVP65PhPMOYfXZ4KMnekt8Y360ejrd/F0D1tbkYyFc/F0D1D6ntpvFLt3a60mZZdns9HUo+pWpWQH\ni1kpQxmJJXj99ioT00FuzW0UBop1uLg43MfYSD9H+lIZxTTN8ppohyzxxhE/jz84gMvpIBlLsB1L\nsJ23XSvOVTtidUDTrKVbVmuQl8Quc352d7eWopVLgRp9/ZrhZrbj3GwlNOretcuzWSn2vZ67mcXC\nWcFixUgkNaburzM+HeD63bWCjGJul8xDp3sZHfZz9sjBij2B6TzcHpe8Zwtl2J2miLWqqk/UYz92\nmfNr57WvrexU7XL9aqHd52Ybde+267W12/XUdI1IVEtlFittReu6zr2lbcanA0zeChYJFDtoZBQz\nCRQrh8sh4/XszzzcdkQkRamCdrUioH07VbvQzl4VaO97txHY43qaL7kqxvJ6mIkpI2HJ2lbhPPSJ\nwS4jo9jZ0oFi+ezm4TbSfDpkc4G2rzdibyPEugqEu9WcatvcTsfazl4VqO+9207XrRitvJ6aphGO\nJYlYyM+9GYoxOR1kYjrAXKBwQOE/6M1Utuo7YD2jWE4ebpd5oYz866zpOl/5zt1M3Wtd13n72FHL\nvymoDiHWWdTa+dSz87Kbe84K1bb52ckFvvzcnd2Hn8aUAKwHwjLdpR3v0Xyafz2N/NwhC0uuorEk\nr99ZZXwqwK35jQKLu6vDxejZPkZH/Bz1+ywX+jByjDuMfxYKZeRf51g8yVYolmnjizeWhVg3ASHW\nWdTa+dSz82qFe67WwUa1bX7x+lLuw1/XEoD1RUwj7GIPF3JtNOd66iSS6WCxpGl+7jRJTWPq/gav\nTgW4cXetYN7a7ZR3M4odOYijRKBYdoGOo32dvOWhITo9zorzcOdf12i8MNOZoPEIsc6i1s6nnp1X\nK9xztQ422t1FvFdolntaXO/SaLpG1MKSq+xAsau3goQKAsWMwhpjw37On+xJZQQrz+T0CuNTK0iy\nxHxgm65Od1UDk/zrfP5EDzfurWc8YY+cH6x4n4LKEWKdRa2dTz07r1a4W2sdbFTb5kfODbC0Gt59\n+KsuAWh/miGkzXJPW7nee2FeuzJ0YvEk4Vj5YLGV9TDj0wEmpgKsmgSKHR/YDRTr6rAWKCbLEt5U\noYz1rThJHYz/SFUbD/nX+dELQzx/dVFMBTUZIdZZ1CqQ9RTYdqxSVG2bHx89UrS28V6jGULaLPe0\nleu9F+a1rVAuP3earVCMyVtBxqcDzJlcl76DXsaG/YwN++k7aC1QLD/NZzqC+2i/D7UOxoPZdd6L\n19DuCLHOolaBbPf5zFYFT9n9vNXTOmyGkLY0wjnvXN1fzs1r1ex57UZa9lbzc0fjSa7dXmV8OsD0\nXGGgmC8VKDY27Odov7VAsUwtaLeMp0ihDBEMubcQYi3IYHfRbBX1tA7rKaTFhKiVnXT+uTrmzz2+\nZs9r19uy13WdRNKI5o5EExQzopOaxvSsESh2/e5aTipWMALFHjhlBIqdPVo6UCyNnK4F7ZItBYnV\n63nef1MZ9kSItcA22LVTqKc1XE8hLSZErRx05Z+bDq+TJy4dbZl1V69rl84strIeIrBROL8MhpDf\nX95mfCrA5EyQUMQkUOzYIUZH/DxgMVDMIUt43A463DIuZ2tKTe6XqQy7I8RaYBvs2inU0xqup5Ba\nEaJmD4Dyz9Xx/q6WXsParp1ONJ4kHE0SjRlubqdJRrBAKlBsfDpgWtnq2ICPvm4vLpfMiYFuLp7t\nK3kNXLKEx2OfPNx7YYneXkCItcA22LVTsOvcnxUhavYAyG7nqvL27K6JjsSSJIv4ubdCMa7OBBmf\nCpjep30HvIyN+Bkd7uPOwhYvXDPqR88HQgC8KW/FQ7pQhtdtvzzcYomePRBiLbANdu0U7DqXb0WI\nKhkA1cMKt9u5stoeK2uio/EkV15f5LnxWaZnNwrmq31eJxfPGgK9tBpiaS3MnYUtFoK553xx1RBs\nj4U83HbAbgOw/YoQa4FtEJ1CZVgRokoGQHadhmgchps7Ek0SiSdN10QnNZ3pWaP05LU7hYFiLofM\nA6d7GBv2M3zsIA5Z5uUby1y5vgzAncUthno6AGPO2uWUGT5ygIFDXtM83HbEbgOw/YoQa4FtaIdO\nwa5BcMWoZABk12mIepNIJonEjDKUZm5uXdeZXdnJBIrthOM5n0sSDB81Sk8+cKo3tXRql7TlDCDL\ncKDLzUNgAGNrAAAgAElEQVSne1lcC3PUb/97phzt9gzsFWwl1n/8pav0+dwtufjiBhRYod2sz0oG\nQHadhqgHVtzcwY2IESg2FSC4GSn4/Fi/j0cvHmH4cDfdne6iv3W0r5OF4A6yJLEZirG8Fub00AF+\n9InhtuxT8vtGHXi6jZ6BvYKtxPraTDDjZmr2xW+3TngvY+eB0162PvfeNEQq9WcJN/d2OM7kLaP0\nZH4CF4Debg+jI34uDfvxH+qgt9fH6mrhNZdTlay8bgffc/k4XZ1urlxfQts2fiPdt9ipT7H6nOX3\njT5vrmzspWfAzthKrNO04uLv5U643ahk4JTf4eTnLX7/E2+oa9v2svVpp2mIWgZs6dSfxdzcsXiS\na3fXGJ8KMD27XhAo1ulxcvFsH2Mjfo4PdBXNKCZJpPJwOwqSlLxt9AizKzvsZK21tlv2NqvPWbl2\n76VnwM7YUqxbcfH3cidsxl6xXvM7nJv315kN7GRed3d7GTvTW7e27T3rs77U676q1NOl6xqREm7u\npKZza26D8akA1+6sEjMJFDt3qocurwsdnSN9Po6ZCLWEjtdtCLSnTBaxVvcp5c6h1ecs/zgeOTew\nb3L52wlbifUDZ/oyc9bNZr91wnZ2+1fSyc2u7KDrOjvhBLFEkq2dGF2drkwne2dxs65i3Szr086D\nqVLU676yJiSGm3snmuSFqwvMBUMM9XbysNKPLEnous7cyo5R2epW8UCx0WE/D57q5epMMLMe+t6S\n4RJ/07kBJDCWWLkcDPX5CFo8hlb3KeXOodXnzOw42uFe3GtYFmtFUXoBTVXV9bIbV8lP/tAFVla2\nat5PNR1dM12AduiI7ez2r6STO9bv45WbK2yFYgBoTh0pLNHVaWSaOjV0oPENbgB2HkyVol73VSkh\nya9w9fKN5YzI3lncYjscJ6npTEwHCGwUBood9fsYHTbWQ2cHimVHcTtkifWtKAd9brzuXQu6kuVW\nrZ5WKCfGlTxnuq4zF9hmLrCNDjwuBLvplBRrRVEeBP458AOptxKKogB8Bfi0qqqvN7Z51WH3js5K\n+xot6M1y0TV64PTYxcNcub6UqYXd1eHE1+HiqL+LY/0+3v3mEwSDhYFDdsfOg6lS1Ou+yheSt14Y\nJBxNEIklicaTOdsuroZIajqRaIJQNMHXX7pfsL+ebg9jw35GR/wMHOow/c2jfZ0sBneQJImkpnFy\nqJsOj62cjxVRToytPmfPTS7w5e/czQyIl1bDSNirT90PFL0TFUX5LeAY8BfAP1NVdSv1fhfwDuDX\nFEW5o6rqLzSlpRVg947OSvsaPeBolouu0cchSxKXzw/mBPJcPj+Y+Q3ZQjUjO9Lq+c5qyb+vHr0w\nxDMT8xUPOg0hOUw8YVjRgY1oQTR3LJHk+p01bs1vsLQaLthHp8fJhVTpyROD5oFi2UFi6SjuvTIV\nVi/L/v7yNjuROAlNR8I473brU/cDpYaNX1RV9ZX8N1VV3Qa+CnxVUZQ3NaxlNWD3js5K+xo94GiW\ni64ZAyerAw87TD9YbUur5zurJf++emZivsLBmpGbOx0slsgL1U5qOjPzRqDY67cLA8VkWeLBUz2M\njfQzcuxgKs92Luk5aLMgMWEtFhKOJognNHRNRwc0Tbddn7ofKCrWZkJtss3L9W1OfbB7R2elfXYf\ncFilGcdRiTuvUiu/UQJfri2tnu+0SrnzY3Wwli5BGY0VRnMb86U7TEwZgWLbJoFiZ4+kM4r14HWb\nd2sep4zX48TjlpHL1IIW7NLhdXLQ5yYUNbxXJ4a6bden7gdKucE1IHtYm91D6aqqli/G2iLs3tFZ\naZ/dBxxWsdNxVGPlm4nqYxcP1yzgdp+qsUq5QUepwZqua8TiGuHYbgnKbFY3dzOKmQWKHfH7GBv2\nc3G4jwNFMoq5HDIdHsOKbpdc3HbjeH8XU7MbdPuMc/yW84MiuKwFlHKD/3vg7cDzwBeBZ1RVNa8X\nJ6g7xQRd03W+ceUu12eCLXflWsFOA6dqrHwzUa3HPPxe8ZyUG3QUDtaGiCcShFNu7vyEJDuReKb0\nZHr5VDY93R5Gh/2MDfsZ6DEPFLNzucl2xE4D7v1MKTf4zyqKIgGPAx8C/p2iKN8G/lJV1SvNamCz\nsdO8phnPTS7wzNUF4gnNlpHudqaaTsdMVOthFe+VDrDcoCMdKJaeh17diBbMQ8cSSW7cXWN8KsjN\n++toeZFkHR4nF870Mjbi58Rgt+nz6JQlPG4HHR6HEOg6Y6cB936m5LqElCX9DPCMoigy8E7g04qi\nHFVV9VTjm9d87L7sq1qhqGQQktA0Pvu1G9xf3ub4QBcfe+85nHvAhVhNp2Mmqs9NLtRsFVuvs2zv\nwWOpQUd6HjoSTRBL5s5Da5rOrfkNJqYDvHZ7lVg893OnQ+LcyR4uDfsZOX7INFDMkRZot4zL6UAI\ntGAvY2kRoaIobwQ+CLwfuAt8spGNahWarnPl+hKrmxHcTge+Dqft5hKP9fu4vbgJwHYozlxgm2cm\n5st24pUMQj77tRu8dMOox5tOFPET3/9A3Y7B7gKUjZmoNtMqtvvgsfD86ETjCcLRwnloXdeZD4YY\nn1ph8laQrVBeoBhw+sgBLo34efB0r2mgmCxLRrpPl4zb1XiBzr9X651rXiCwSqkAs8sYAv2DwG3g\nvwCPqaq62qS2NZ3nJhdYWg0TTQW8gP3mEh+7eJjubi/ffPEu26G45Yo+lVjk+dWHzKoR1UI9BSjd\nmd5f2SYcSdDhcXJ8oKuhA4BmugXbIxBNJ57QiMSShGNJtDw39+pmhInpIOPTK6ysFwaKHe7rZGzE\nz8Wzfg76CgPFZFlKrYVujkBnk3+v1jPXfDsNWgWtp5Rl/TxwH/hvQAAjQcr/nspghqqqe866nl3Z\nyaSpjCWSDPZ22GouMf1wB3eMTEK+Dmcm0YOVyjhWXbfHB7pyUi8eH+iqpdkFWF/OU74zS3em26E4\nW6EY3Z1upuY2AOuVuurRSTaq47VvIFrp9dChSJyrM6uMTwW4u1SYQvhQl9sIFBvxM9jTWfB5KwU6\nm/x7s5655u3uNRHYi1Ji/UmKL93ak6Q7RkOwXVy22RKF9MPtcsqsbUYBMoOLcp14Ja7bj733HEDO\nnHU9sSpAVjqzdGcaSySz/u8qOXh5dnKBLz93J5OiVAfeXmMn2aiO126BaEktmRHoeDJXoOMJjet3\n15iYDnDz/npBecoOj4MLZ/oYHfZzcqgwUEyWwJuK4m6lQGeTf6/WM9d8e3hNBHahVDT4rzaxHbag\nUR1jvayu7Ie5q9OFz+vM5MAu19ZKXLdOWa7rHHU+Vs+zlc4s3Zm6nQ6iMUN80+8X48XrS5k8x9FY\nkhevLSHltccu66btEImraYaLOxpLFiQs0TSdmYXNTEax/LzdToeEcqKHSyN+3mASKJbOJtbhduBx\n20Ogs8m/V+uZa96+XhOBHSkbYKYoyvcCvw70kvUkqap6poHtagnlOsZqRbdeVlf+w335/GAmOvmL\nT03vuXkvK51ZujM1m7O2uiZ9bTsq1k3nkakPHUsSixcGii0EQ4xPB5icDrBZJFBsbNjPQ2fMA8U8\nTtlYC+2xdzax/D6hnrnm7eY1sYqYa28NVqLBfxf4OeA1KEgytK+oVnTrZXWlH+bgTixT97sd572s\nttlKZ1ZqgPXMxLzpmvRHzg2wtBrOuMEPdbkJRXctwv27blonGk8SiSaJxJMFhTPWttKBYgGW1woL\nZxzu6zRKT57t42CXp+Bzt0PG4zHmoR2ybRMgNg07eE2qoR37nL2AFbEOqKr6lYa3pA2oVnTrZXWl\nH+7+/u5M3e96Bms1C6ttrrUzK/Y7j48eQZKkzLnQgadTnQ80dt20/dCJJ5Ksb0VYXo8URHKHIgkj\no9h0gLuLhYFiDlmiw+Pk8gODvPuNxwo+dzlkvBmBbp9kJXZ6XuyGmGtvDVbE+hlFUT4N/D2QWXeh\nquq3G9Yqm1Kt6DbS6qpnsFazaJbLOHtNevbv5AurpusFc9b1wL4dfmEkt+50ZYQ6ntC4cW+N8Snz\nQDGv28FBn5uEpuN2ykiSRDi6W6LUJUt4PO2d7rPW58W+17529tqUT7tgRawfSf3/UtZ7OvBE/Ztj\nb6oV3UZaXfUM1moWzXIZp9ekZ89Zm9Go62OnARLsBoqFo0niZhnF5jYYnw7w2kz5QLHxqQAvXFvK\nfH6834fP68Ttknnx2nKeSDXl8OpKrc+L3a59PdkbUz7tR1mxVlX1Xc1oSDtgR1en1TbZaTTcrPMo\nSxLvuXyyLutiq7GU0h38dihOLJHkyvWlot9rlCVmJVBsYjrA1durrG9Fc74rAacO72YU6/DsdhcP\nK/24HBKBjQjHBrp42+hhZEmuon61Pan1ebHT4Lje2LEf3A9YSjcqaH/adTRsF3diNZbSsX4fr9xc\nySwTW1wN8ZmvXqfT6yo4lvpaYqUDxda3o0xMB3h1yjxQbKi3M1N68lBeoFj2WujvvXyCfBf3XhGp\nWp8XOw2OBXsDIdYm2EUg6km7joarETFN13l2coEXry/hdjsYO9PH46NHarqG1YjQYxcPc+X60m7y\nFV1nciZI7wFvwbHULnJGoFg4ZUXnB4qFo7uBYncWCgPFero9XDjTy+iwn8N9ucIiSRjZxDwOPGWS\nlewVkar1eWnXwbHAvthKrP/4S1czS5JaKY7tMt+0FwcV+VQjYs+lMpRthWJIksT9xW2kGjvfakRI\nliQunx9kJ2IEX6ULxKTJPpbqRK50ys94QkO9t8b4dAD1nnmg2ENn+hgb9vPwg0Osr4VyPve4HEbR\nDLeMZHEttBApgaAxlCrk0Qn8MvAjwFFAA+aBvwP+laqqG/VuzLWZIPFUhqRWimO7uPJKDSr2ipBX\nI2KzKzuZ9KNgpCCt9RpWK0LZ3wtF4swGcgW6mv2XzCim69xe2GRiyig9GYnlBoo5ZAnlxCHGRvpR\njh/C5TREOH1vuGQJr9dZ9VrodvXg1Jt2GfAL2odSlvXnge8C7wAWUu8dBj4KfAF4b6Ma1WpxbBdX\nXqlBxV7pLKoRyWP9vkz6UQC301HzNaxGhPIHTI9eGOL5q4umx1Ju/7quEY1rhKOFgWIAC8EdxqcC\nTN4KspEq9JLNqcPdXBr289CZvpxAMTCycvm8TqQDnpR4t9+gzm60y4Bf0D6UEmtFVdX/Ie+9WeBT\niqK81sA2tVwc28WVV2pQsVc6i2pE8rGLh9ExcoC7XA58bgf3l63V/a6nR6J2z4cRKBaKJnnh6gJz\nwRBDvZ08rPQjS1ImUGxiOphTJS3NYE8HYyN+Rof9poFinlSgmMsp89K1pbJpWQXWaZcBfyn2indu\nr1BKrFcURfkR4L+qqqoBKIoiAR8CVhrRmAfO9GXmrFtJu7jySg0qjqYikdPBTUfbsLOoFlmSePvo\nEd4+eoTxmVW+9O1bAGVLZ0KhwOq6npPtLLvDKteZVef5MGpDh2PJTKDYyzeWM2uaZ+Y3ub2wyfp2\njDsLmwUW9gGfm9GzfYyNmASKYRTN8LodeFy789DF0rLaiXYTjnYZ8Jdir3jn9gqlxPojwO8Bf6Io\nSnp++gDwDPCxRjTmJ3/oQiaN5n6hlk6o5KAif71O/ut9wp2sDGa6rnPl+lLJc50vsC/eWM4EiOV3\nWOU6s0o8H/OBHbbDcdNAsfnADuFognA0QSSWLLCiPS6HEck94ufkYDfjUwG+q64w1LvDw0o/HS4H\nXo8Tj9u8aEY7eGHaTTjaZcBfimL3RbsNnPYKpUpk3gd+QFEUJ+DHGJivqKqaKPYdQeU0qhOaC4Qy\ndbnTr5uJXR7oU0MHmLhpOIJ2wgnjXyRR9FznC2w+2R1YOZErZV0d6/cxM7+OjoSm6/g6XGyHd6tX\nabrOnYUtJqYDjE8HMoGXaRyyxBuOH2JsxM+5Ez2ZQLG0Fe52ySyuhjjY6eJtY0dLnqNiaVntRDsM\nKPYaxQab7TZw2itYyWCWABab0JZ9Sb06oXxxPNriObNyD7SZmKe/V0+Bf/ebT7C1FWF2ZYe5wHbG\nSgbzc50vsEld56vfuWs6nVBuXrLQutJJJJNEYhrnT/awsRNjcXV3HhqMxCnjUwEmpgOmgWJ9B7w8\nfvEwF8700enNfXwdssTGdpQOj4NEUiep68xaGKRZTctqlUYM1PbCHHC7UWywKQZOraHU0q2Plvqi\nqqqfq39z2ot6dEr16oTyxfFdY0d44tLRls2ZlXugzcQcqPuIXZZ3BTM7FSaYn+t8gf32+FzuBlnT\nCdbmJXfXQkdjCeLJ3e+/6dwAABvbUZ6dXGB8KmAaKDbQ08HYsBEo1tNdPKOY2+XgxGA30/OVWcn1\nTMsKjbG89sIccLtRzJUvBk6toZRl/QTwQeC/ULiWQwf2vVjXo1OqVyeUL4ZzgRAffnKkqn3Vg3IP\ntJXReb3LfVZzrktNJxSflywu0GkisQSvzawyPh3g9rxJoFiny6gNPezncF8nUtbxZTKKuR24XbkJ\nS+wgao2wvPbCHPBewQ732H6k1Jz1jyuK0gs8q6rqnzaxTW1DPTqlenVCdhvtlnugi7W3keU+qznX\n1s+rTlJLZxMrrGoFkEhq3Ly/zvhUgBv31kjkibjH5eCh00ag2JnDB5CzylVJgNtCRjE7iJrd7kVB\nfbHDPbYfKTdn/VPA/9iMhrQjduqUahnt1nuO0cr+SrXXTuU+S5/XXYGOmWQTA+Nc3F3cYnwqwGu3\ng4SjhRnFzALF0nicsrEe2mMeyW1HhOUlENSfkmKtquoC8G+q3bmiKDLwx4CCka70n6qqeq3a/dkN\nu3RKtYptvecYreyv2Oi8WeU+rZ4zs3aWSveZZmk1xPi0ESi2vm0eKHawy835kz08+tBQzm+7ZAmP\nx0mHp7qUn61GWF4CQf0pGw2uKMov572lA2HguqqqXy3z9R8AdFVVH1cU5R3Ap4D3V9VSG1KPTqke\nVm2tYltvS7XRlm89BkmVnjNN14gWqQudZmMnxuStAONTARaChYFi/Yc6uDTiR5Lg6oyRt/vVqQAe\nl4M3nx+gw+2kw+OoKuVnvbwjdllyJxAIcrFSdWsYGMHIBw7ww8Am8LiiKO9QVfX/LPZFVVW/pCjK\nl1MvTwFrNbS1ZTSyA6uHVVurONbbnd/o6QFTa7fCa2TlnOl6ag66hEBHYglev73Kq1PmgWLdnS5G\nz/oZHfFzJBUo9pXv3AHS89AyW6EYA4e8litbmVEv74jZfh67eFgI+D5GDODsgRWxVoC3q6oaBVAU\n5Q+Ab6mq+qiiKBNAUbEGUFVVUxTlzzAs6g/W2N6W0IilKOkH4KnvzrITSeDrcGbSWlZKreJYb3d+\n9v6O9vvQdZ0vfHOqoQ96pdeo2DnTdY1Y3BDoSDxpmvgtEyg2HeDGXfNAsQdP9zI27OfMkdxAMYDj\n/T4WV3eQgHhSJxRJ8JdP3arp/NTLm2G2H5EEY39TSQpeQeOwItY9qe2iqdduoCv1tyVTIBVZPgC8\nqCjKeVVVw8W27e/vtrLLphLcieUE/gR3YjW38xtX7vLM1QUi8STb4TgOh8QBn5vzZ/os7Tt7m/c/\n8Qa6u73cWdzk1NAB3v3mEwUCUY4PPHmg4mPIRtN0nnrpXqYN73/iDciyxDeu3OVr37kNwO3FTbq7\nvbzn8knL+7F6LKWukdn5zD5nJwe7eWz0CLFYknA8geR20OF20ZG1va7r3Jrb4MXXF/nu9aWc5Cpg\nrOd+6Ewfjzw4xMVhP25X7lyzJOl0uJx4vU4eGO5nsL+bO4ub7ITi3FncRCJs6fwU4/yZvpwsZFbv\no2z6+7tN93NncbPu9387s9+OPf/ZGp8Jsh0ysu2Vu2f327lqJFbE+j8ALyuK8hXAAXwf8LuKovws\nMFnqi4qifAQ4pqrqbwIRIIkRaFYUO+YG7/O5c9I99vncNbfzeqp2d4fHSTKp43U5eNuFw1w83VN2\n3/393QXbjJ3pzSS1CAa3a2pbNTwzMc9Tr8yyE07wdOI+3722yMffdz5znGmuzwRLJt/ITlwycXOF\nzc2wpVF8sWtkdq4MdM4fP8DpwS6isQQz98xnaJbXwplAsbWtaMHnJwa7GBvxc+FMHz6vsRZ7eyuS\n+dztkPF6jGjuRCzBdswQ+fT1+sI3p0hUcH6KcfF0TyZT27F+n6X7KJv0eTLbz9ZWpO73f7tS/H7a\nu+Q/W7FY0tIzvR/PVTVYHdBYSTf67xVFeRp4EkgAH1RV9XVFUUYwCn2U4q+BzyiK8q3Ub/1M2p3e\nTjQi6jvbDdvV6eKJS0fb2rU4u7LDTjjBVsiIfJ6cCfLc5ELFLvpKCmlkYzWbWCyeTM1DJ9CK1DbZ\n3IkxeSvI+NQK86aBYl5Gh/2MDfvpPeAt+FyWwOtx0ulx4HQUBotlzwGGIvGcz6qd369XBLbZfuyy\n6kHQGvKvvw48XSYToKD+WIkGl4DHU/8cgKwoynVVVafKfVdV1RBGSc22phFLUfZaB3is38cL13ZT\nyLudDmZXdvjQu4eB3OPMD1h59MIQz19dzIhXek7MjGJzscWuka7rxBMJwqlAMa2IQqcDxSamg9ya\n2ygMFOtwcXG4j7FhP0f8voL2pZOWdHhyy0+akT0HqOs6x/u76PS6St4HrQzyEUux9jf511/TdST2\nTt/VLlhxg/82RjT4n2L0SR8HTgM/28B2CVpELak8b95fZ3ImiNvpoKvTEB+zjj7b1X1zdp2b99eZ\nDeyKcLZ4VTeKN9J9hqNJkqs7BDcL1zmDESg2NbvB+FSA63dXCwLFpFTe7Yff4Of7Lp80nTt3OSS8\nHicdbgeybC2aO3vAIUkSnV5X2dSwIshLYBfE4K01WBHr7wEuqaqqASiK8lXgakNbtQ9odYRlMVGu\nJZXnx9933rSSVj751vH95W2kLCHMFi/ro3jzfNyeztzCF7quc29pm/HpAFdvBQlF8wLFJImDXW7j\nu24HsiSh6+QItSxLeN0OOtzVrYkuNTVQ7LqISkcCwf7Gilg7U/9iWa+TxTcXWKHaudl6UUyUaxEF\nqyPufLE6PtCVY1lni1fpferEExrRuEY0miBebBIaWF4PZ0pPFg0UG/Zz4Wwf1++s8cK1pcxnQ72d\nSBji3eF24HE7qFSgsyk1BVLsutgpta1AIGg+VsT688B/VxQlnRTlw+wmSBFUSX7nm0+jLadiotwM\nUcgXq+w56/TrZybmi3gZdOKJJKFokhdeW+T+yk6mHnS+J2IzFOOV6SDPT84zFyg8n/6DXsZGjMpW\nfVmBYtm1pU8MdPHWC0N0ep11y81dagBS7Lq0U4xDuybRaNd2C/YHVqLBP6UoyqsYJTNl4F9bSDMq\nKEOrIyyLiXIzRMFMrLJf589pAzzywICR7jNuBIm9fGM5Y/3eWTSWh7zp3ADRWJLX76wyMR1gem6j\nIKlJV4eL0bN9jI74OWoSKAZGcY23jx6pOPVnI+ubt9M8YbvOr7druwX7g6JirSjK27Ne7gBfzv5M\nVdVvN7Jhe51WR1gWE2U7iMLsipHdy+GQ0DWdW/ObjBw/BBjn6RV1hedfWyQcTdDhNW7ha3dXmZ7b\n4PqdtYLylG6nzAOnehkb8XP26EEcRZKseJypNdElSlCWolRnb1XI28mCLka7zq+3a7sF+4NSlvWv\nlfhMx7C0BXWi2SJpB1EuxHBxD/Z4uTmrE44aous/6M2I9MStAMH1CLqusx2JE4omSCS1gsIZsgQj\nxw7x+KWjHO/rLMgolsYhS3SkBNrpqK3ClVlnnxbpK9eXWFoNAzovXDPSlX78fecLBNue16Uy2nV+\nvV3bXSvC/d8eFBVrVVXf1cyGCPYrqUQlcY1oLEk8qbETSeB0yDgdcPGsn4eVfl5RV3jh2hJrmxGi\ncSO+UdMhqeVa0ccHdgPFujpc9Pb6WF3NFVFJAm9qTbQh4tV1TNmd3FF/J6FInNXNCG6nA1+Hk2P9\nvoy1vboZIZQKIHTIUiZpTLsLsxnt6h1o13bXinD/twel3OB/BfyBqqrfLPL5+4BPqKr6w41qnGCv\nkpVJLJ6bqOQVdYUr15czryVgOxTnWxNzrG1GTbOO9R30MpbKKNZ3sDCjWBqPU8bjNlJ/1iNYLLuT\ne+XmCmAkg4klkpzrP8RjFw/zxaemM+/vsLtMLJ00Zi/Srt6Bdm13rQj3f3tQyg3+48CvKIryu8AE\nMIuRbvQU8CbgbzESpAgEFtCJpgQ6WiLV5+Kq4c7WdJ1INME/vjLLX397pmA7hyxxaqib97z5OMcH\nuopmPHPIRlCZ4eaufE10KbI7tVjCsPaN9KMuOr0uZEnKuFZ9HU4isQRJTcfndWUsb4Gg1exX93+7\nUcoNvg38c0VRPokxPz2CUYTjeeAnVFUVwy9BSTLlJuOlBTpN2qW9thUhEjWvHw3gdTv4Fx95GLfT\nfI45nXmswy0z2OsjUO6HqyS7k8tvi1l0/dGHfaDrzAVC+8rNKrA3+9X9325YWbq1BXypCW1pa0SQ\nhkF2PehoPFlWoHVdZ3Zlm1enjIxiBaUnU8IbjiYy4q3pOpPTQd50biBnW7No7mIWdz3IEWJ/J0gS\nc3WOrhf3laDR7Ff3f7thJSmKwAL7OUgjW6Aj8WTB2mYzAhtGRrHx6QCrm4UZxY71+xgb6efi2T6u\n31nlqy/cJZ7QkIBOtyPjLi9X4SqbUsJXjSjWq5Mr9dtW76tq2p/9nfNn+rh4ukcMBAQCmyLEuk60\nU5BGrR37sX4fb70wSCKhVyTQW6EYV2eCjE8FTM9P3wEvo8N9jI348R/syLz/sNLPd9VlFoIhnA4Z\nWYL17Sjq3TUeHx3CIVtbclVK+Fo52Cr121bvq2ran/2d24ubbG1F9s0AUyBoN6yUyPyPwJ+pqvpS\nE9rTtrRTkEa1HfvTr87ikGVuza2zHYpxSRko+R2AaDzJtXRGsdmNAre4z+vk4lk/YyN9HOs3DxR7\n9WaAeFLnULeHaCxBUoftcJz/PjGPLFu3bksJXysHW+nf2g7FiSWSXLm+lBlAWb2vqml/Ow0wBYL9\njtpzGnYAACAASURBVBXL+grwm4qiDACfA/5cVdXFMt/Zd7RTkEYlnXTaxX13aQtNh1jMiHqeC4a4\nVOQ7SU1nenadiekgr99ZJZ7IXQvtcso8cKqHsWE/w8cO4ihRWjJtRXd6nSSThiWv67tz0ZUITCnh\na+Vg61i/j1durrAVMmrlLK2GM2uwrd5XR/2dvHJzhVgiidvpMObQLfxuuwwwBYL9jpUAs88Bn1MU\n5ThGEY/vKIpyDfgTVVX/ttENbBeaGaShaXqJQhdZ26Vc1/eXtzOpOY/3d3HU31mmky5cZnWoy5NT\n73moN1cMjECxHcanAkzOBNkJx3M+lyQYPnqQsRE/D5zqxVMkoxgYs87uVNISj0vm5GA3t+Y3geJR\n11YoJXytHGw9dvEwV64vZYTW1+HMDEIs31f5199iDXIgZ85a0HhE0KCgGizNWSuKchr4CIZYTwN/\nA/yooigfUFX1ow1sn8CEp166Z8mNnXZ3b4fibIVidHe6mZrd4F2XjvLEpaM5nYWua8QSWtF10NmV\nqNJVrgCCGxHGpwOMTwUIbkYK2nC038fYsJ+LZ/vo7nSXPC6XLOH1OulwO5CzrO2cqOv+6pc/lRK+\nVkbEypLE5fODOZHwlVq5cys7dHW6AFfmtRmarvPs5AIvXjeKoDxyboAPvXuYwYEDrKxsVXcAgorY\nz8GoguqxMmf9HDAIfBb4J6qq3ku9/1lgrtR3BY3hzuJmzutiruD0++mEHcb/Xcyt7PDhJ0cyLu6t\nULzsOmhZkjJLpbbDca68vsT4dID7y9sF2/Z2exgd9jM24qf/UEfB59mk10R3emRczt3Un/vN+nj0\nwhA3769zf3mb4wNdPHphqKLvF3Np559HHfjyc3dyXO6SJPGBJw/U7VgEpRGxAoJqsGJZ/1tVVf86\n+w1FUU6qqnoXQ8QFTebU0AEmUuktobgVlu7ANU0noem4NB1J1whHY/znr6v0dXsYfUNhHWgzYvEk\n1+6uMTEVYGp2vUDYOz1OLp41IrlLZRRLU67C1X6zPp6/ushsYAdJlpgN7PD81cWKjreYGz//PPq8\nzszgDYwBnBCL5iJiBQTVUCo3+HEMM+eTiqK8xO4CVifwNeBc45snMOPdbz7B1lak7PzqYxcPc/P+\nOqubYQ52OpFkCUmWuTW/lRHTpE5BcpE0SU3n1twG41MBrt1ZJZYfKOaQOZ8KFBs5XjpQDCqrcLXf\nrI9aj7eYG99sP26ng2gqUNDtdAixaDLtFIwqsA/lSmS+CzgCZNeuTgBfaWSjBKWxslxJ1zXicQ2P\n28GhLi/JlCkciSZBglA4TjypMXErwMPKrnWt6zpzKzuMTweYuFU8UGx02M+Dp3rxuEuLriyBJ5X6\ns5IKV42yPuzqXm/U8ebv95FzAyBJOXPWQiyaS7n4CLveo4LWUio3+CcAFEX5RVVVf6t5TbKGuKEL\n0XWNaKrUZDpRyaEuT0aoAQ73dTIzv8lOxBDh4HqEV9QVTh85wPhUgInpAIGNwkCxQ11uHn1oiNFh\nPwfKBIrlR3On3dzGNZsviE43u3b5qTx14AvfnKr5Wue7hXVdR5Kklt9HjbK2zPYrSxJvzxMLqysM\nBI1nv00BCaxRyg3+v6iq+keAV1GUX87/XFXVTza0ZWWo5obeiwKfFuh0Lu78TGL5UdyX3uDnz/7u\nBvGghkOW0IFvvHSfrTwLGqDT60SWDNe1yynT4XaWFGqXI+3mzo3mTlMsOh0Kr1229fHMxLyla53Q\nND77tRuZIK2PvbdwpibfLfzijeVMFHYrO8ZGRaNb3a/VFQaCxrPfpoAE1ijlBpeK/G0Lqrmh23XE\nmj/I+P53DBOKJIjGk8TixatTQW4UNxgBRb3dXmaXdwqKZoARKHbhbB9jw34mbwW4u7Qb7Z3Ox52N\nJEGH20lHXjS3GcWi08tdO6vX+rNfu8FLN5Zz2vovPn45Z5t8t3C539ovWFlhsBcHu3ZEBKAJzCjl\nBv/D1P9/rXnNsU41N3S7jlifm1zg2xNz6DpM3ArwwvUlzp/oyZlrLkVS05mZNwLFXr+zSiyeGyjm\ndEicP9nL2IifkWMHUwUxYGU9nCPW2YlQykVzm5G+ZukAp3SCk3LX7mgqw1cmO1eR7fOXkZktK8t3\nC+vA06/urkDcrx2jlRUG7TrYbTdEAJrADCvrrD8G/Fsgnd5IAnRVVa1VT6gDZiP6am7o9hqx6sQT\nRhax2wubRGIaO+E4O5E4oWiCjW1jnWyxSG5d15kL7DAxFWDyVrDAzS1JcPbIQUaH+3jwdC9ed+Gt\nkO9Cf9O5ATq91ipcmZG+RmZz1qVPhV7w2uyeOD7QlWP9Hx/oKthVvltY03Uk9m7HaNUatrLCoF0H\nu+2GKFkpMMPKOutfAd6pquprjW5MMYqN6Cu9oe0/YjXSfIajSV54fYHZFUMke7o96EA8aVjEbqdh\nyZq5pVc3dzOKmQWKHfHvZhQ74CsdKCZLEm8+N2AaLFYN1XRCmq7z4o3lnFScc4GQ6T2RnqMuNWdd\njza1E9lxAi9cW+Tm/XU+/r7zBYJtZYVBNYNd4ToXCOqDFbGea6VQQ/1G9HbsmM3SfL58Y5kXrhlL\na+4sbnH5gUHe8sAgE7cCBNcj+DpcJJN6xi29E4lz9VaQ8ekA95YKXb896Yxiw34GekpnFEvjckh4\nPYWpP81oZIf83OQCS6shQpEEOySIxBIcvdRpek84ZZmf+P4Hctr1jSt3uT4TbJhQ2F2MZld2MgF9\nAJMzwUyRkEqpZrArXOfNx+73pKA6rIj1dxVF+Svg60DGVEsV+GgK7eW+Lk86zWckbp6HO99iXloN\n8f1vPcXDSj+vqCush2J0pSK0P/f3N7h5fwMtz1Xc4XFw4Uwfl0b6OTFYPqMYGGuivW4nHR4HLqd1\nN3cjO2RDlHfbkdR0sFg68rnJBZ65ukA8oTVMKNLHrus6r9xc4cr1JS6fH7RNB3ms38cL13aL5Lmd\njqYOdoXrvPmIAdLexIpYHwS2gEez3tMxymU2hUa5r5s5AjVbA12Mod5O7ixu5bw2dmKsm55fDfPs\nxLxpoNi5kz1cGvYzcvxQJlCsFMXWRFdCIzvktNg4ZOO6+LxGbvMPvXs481utnGNN7zO4ESEcS7IT\niWei7O3QQaaz2E3OBDPTCM0u/7mXBtrtgBgg7U2slMj8eDMaUopGua8bPQLVdI1ozFgDHUuUFmhN\n13lFXWFxNcRgTweXHxhkKfX3UF8nX3v+LhO3AmyF8gLFgNNHDnBpxF80UMyMStzc5Whkh1xMbKzc\nE8f6fdzOWpLUCKFI16IORxPoOsQTGtuhuG06SFmS+Pj7zueUSr2/ss0zE/NNsf7tHyey9xADpL1J\nqaQoX1FV9fsVRbkNhUt5VVU909CWNYH6j0B1kppGNKZbWgOdzSvqSs489YXTvfi8Lp69usjKerhg\n+4O+3YxiB8sEiqXZdXOXXxNdCY3skLPFptL9P3bxMN3d3pw563qTrkW9E4kTTxiJZmKJpK06yPTA\nJju5TLFkNI36bUHzEAOkvUkpM+zzqf//KLDchLY0nfqMQHUSyXSAWDITsV0pi6shNE0nHE0Qjib4\nh5fuF2zT6XEiyxJdnU5kSabT47Qk1NWsiS5HM6cQqs2lLEsS77l8krEzvQ1pV7ptl88Psh2OsxNO\nEEskuXimj8cuHrZdoI9wj+4PxABpb1JKrH9FUZT/D/hDVVUfblaDmkn1I9DdNdDRWJJEqULQZYgn\nNG7cW+P2wqbpUiyXU2YsVRv6tZkgd5e2cTokEknddPs0sizR4Tbmooutia5FTOwUxNLqthTLv201\nTWqzsLt71G6DG4HATpQS6+8AUUBSFCWZ9X7Tk6I0ikpGoDkR3PEkWhUCnZ6XXgju4JBlQtEEr99e\nJRpP5mwnSeBxOTJ5to8PdHH68AGCG5GcjGKDvZ28fGM5k7TkYaU/lfrTgddd3s1di8jZyUor1ZZm\nFKiwWp6y1Zas3d2jZvfjYxcPCwEXCCgt1n+mquonFEX5kqqqP9S0FlVBo0bk6QCxaNy8SEYl6LrO\nP353livXlwhHC8U+HSg2NuxndmU7p2NPW9DpjGLroRiHOt3ous4L15dxOSUWV0Mc7HTxtrGjlttU\ni5jYyUrLbouu64Qi8UyFrq4ub8usWzudI7C/e9Tsfmy110QgsAulxPo/AheApg2///hLV+nzuSsW\n2/o90NUHiBVjbSvKxHSA8ekAy2uFgWJDvZ2MDfsZHe7jYJcn8352x5VeupUuytHb62NtdYevv3Sf\nDreDpK6T1HRmA8Xd4mbUIiaPXTyMnsouBkYEoqbrLS8vGYrEmQ0Y5+7m7Do93d6cbZtp3drdkrUb\nZvej3bwTAkGrKCXW84qizAF9iqLMZL2fdoPXPRr82kyQeMII0KpEbGt7oI0AsWhcIxKtPkAsm1Ak\nwWu3g4xPBXLWS6eRZQmXQ6K/p4NHzg0WFOTIz8mdfg3gkCW6O104NA+nhrqZWah+aVItYiJLEpIk\nZdYUP/3qHBKtLy/5hW9O5X4o5Q63mmnd2t2StRtm9+Nzkwu28k4IBK2ilFh/H3AM+DLwg81pjkGl\no+fKLcTdALFYLEm8hgCxNOlAsfGpADfvrxuZtrLwuh08dKaXTreT2cA2qxtRNI3Mcq3sghz5ZS3T\niUs6PQ48bgcHfB6ioVjNllutYmJHqyf/Xnjb6FG2t6PCum0DzO5H4Z0QCAxKlcjUgHvAqKIovYAP\nQzccwGngbqMa1QgLsR4BYvloms7thU3GpwO8NlMYKOaQJc6d6GFsxI9yYjej2Fe+c4dIbNeCLxbV\n7ZAlOjzGumiHXBjP12rLzY6FHfLvhScfOUkwWJgvvRVtE1ROq+9xgcAuWCmR+SngpwEXEASOAC8D\nl+vdmPOne1ldC1ecYanYA13PALE0um4smRqfCjBxK8jmTqxgm9OHdzOKdXgKT3HRdKIUWtH1SlxS\nKVaEy46FHfLvBVluRezD/kUMeASCxmAlN+WHgePA7wC/DpwAfr4RjTl9+CCTUwGg2gxL6QAxY/1z\nLKHVHCCWZn07FSg2FWCpRKDYxeE+DmUFiplhNiddzopuNlaEy+6FHTRd5+sv3OGp/7+9Ow+P6yrz\nPP6t0uJFtuNFiu04i+3gvDEhjtMkMVkIZGu2wJOwPkBCswxLWJrpnqGHTLMFmp5Ad08zDGs3kAaG\nphOmWQZ4GJqEDJgQspHYSWNex9hJ7HiJLNuxJMuWSqr5495SSuVabklVqlOl3+d58kS36ureU1fl\n+573nHPPue8JAC5Yu5RLygSPXb2DZLPZ8clN7tmyr+bBppWD2Vg2yy0/3jI+NawqPCK1kyRY73H3\nw2b2CHCOu3/XzD5dj8I8ljePMyS9kWcZyTyzSEb+ALH8+bZzQbGaG+PQsQyPbO/jwW37eWzP8QPF\nTujqjJaeXNM9ITuuJL9PelYAWXQx9Qqq0/k4012b9/Dj3zzOof5jAOw7MDRhEFxh4FzRPZffbu0d\nX05y34GhxMtJFh7rwrOXcffDe48LypUqQc0czO/avIfN2/s4NjzKseGoSyiEcQwirSBJsH7azK4H\nHgDeZ2a7gUX1KMzKZQvYtLV3fLv0jTzL8MhovMTk6HGDuXIK59uGiQO5ihnJjOFPHOShbfvxJ0oM\nFFu1mPVreli5fP6kbqTpdIq5AWXRxdQrqE7ngKFdvYMTxhEMZ0YnBI/CwHnZuStYungOw5lROtvb\nmDe3I3GwKTzW1p2HJjxCBlFQrlQJauam+F29g3S2t40H6tDmSBdpZkmC9duA17v7N83s5cCXgQ/V\nozBXnH8q/f1Hi97IK60BXUzhwK1SA7nGslke23OYhx7dzyM7DnB0+PiBYnbqQtav6cFOWRiv9Vy9\nyWbRhdnWNZefkXjfyWZm9Qqq0zlg6OSeLjb9oY2h+PGyzva2CcGjMFA+2TvIhrVLxx9Hyx0jicJj\n7XxqgFRef3nu/UqVoBBH2Cd1ck8XvvMgwIQ50kVk6pIskbkb+Lv457r0Veek0xNv5GNjYwwNjyZa\nYrKYcgO5APb0DbJp2342bevj6SIDxVYun8/6Z3Vz9uolRQeKJVGLLLow23qid5A0FA3GpTKzaoN4\nPYLqdDfxXrxuOfPmzZrQZ50fPIoFzslWUgqPdcqJ88Yz69z7uTKVO35os55Vo9Qc6SIydZOLQHWS\nzWYZHRut2QCxYgO5cgPFNm3rK5ppn7hoDueu6Wbd6d0sml9+oFg5teyLzs+uBo6McP/v97Fw3ix+\nu7WXe7bsY0MchNKpVMlBUiE0r053GdKpFH/8vJWce/qSou+XCi6TKVPhsYr1WefKVO74IT9XXKmy\nNR2tJs3cpy8yFUEF696DR+g9dKxmx8sN5Bo6luGRHQf46o9+x2N7+o+rACzo6uSc05eMDxRLJfjH\nP5bN8oD3svkP0ej1dauXcMGzl9I1u6PmfdH52dZwZpQ5s9oZODJC/5Fhjo1k2HdgaDxolxokVa/m\n1Wpuns3cxFtJsUA1mcAV8nPFM7HCJxKKoIL1yGitHrSCzOgY/sQhHnp0P79/4uBxA8VmdUQziq1f\n082qZQuqeh4XosFrdz6wiyPHRuhsT3P30AgL582qy42jcO7rvQePjM8zns1C/5FhhjOjDB7NlBwk\nVa/m1WpunqE18db6xt/qWV8Ila0QyiDSCCWDtZmNwYQkdAQYA2YBh929LiPCpyIaKNbPpm37eXh7\nX8mBYuc8q5szT1006YFiAPufPkpbG7S3pRnOZBk8mqnbjSM/2xrLZtm84yC33/s4+w4MMZyJPmNn\ne5TJlxokVa/m1WIDq0otSRlaE2+tb/ytnvWFUNkKoQwijVBuutE0gJl9EbgL+Ja7Z83sVcCLp6l8\niYzPKLZtf/GBYsvms35NN89ZtYS5s6fWmJDriz79pAX8+44DZEaLjzSul3QqxVUbTmPdqkXctXkP\n92zZx74DQ8yb2wGUDsz1al4tvHkOHcuUDFi1KEO57LXYe9WUfap/v1bP+kKobIVQBpFGSBK5Nrj7\nDbkNd/9XM/twHcuUyNMDx9j0h2hlq2IDxdrbUtgpC3nphacdt0xitYqN6L543XKywL1boue4Lzjz\nxJrcOJI2peYCX27w2HQE5mIKb547eyfOw13rgFUuey323iuvXJC47FP9+7V61hdCf3oIZRBphCTB\netDM3gLcBqSB64nmCK/IzNqBrwErgU7gk+7+w8kVFY4OZ3hk+wEe2rafHbsPHzdQrLM9TUec+ba3\npTlh3qwpBepyI7rTqRSXnnMSlya8cSQNwtU2pTb65lV4/o2bdo9PFQvVB6xK16lc9lptZlvraxd6\n1tfqfeoirSxJsL4O+BzwWaI+69uJAnYS1wH73f1NZrYIeIhoyc3EMqNjbN35zECxzGiRgWKrFnPO\nmm4OHj7KPVueGn+vmilAc+oxR3fhnMm5iSOKBYpmb0qdasCqVFkpl70myWzrGbBqGfzrUc5W71MX\naWVJJkV5HHi5mS129wNVHv824Dvxz2miQWoVjWWzPL73mYFiQ8eOHyh2xikLWb9m4kCxsZMWkEql\nJjxXnUQKmNXZxpxZbczqmPxz0aVusNXMmdzsTan1XiO7XGXgwrOXsXXnIXY+NcApJ87jwrOXARP/\nLkeOjrCzd4BUKhV0wKpHYG32iqCEQS00jZFkicz1wL8Ac83secAvgde6+28r/a67H4mPMZ8oaP9l\nuf139w7wiwd2smnbfg4NHD9Q7LRluRnFFjN3dsdx7+cvkJFER1uURc/ubCOdPn5keLVfylI32Epz\nJuefZ0VPF5etP4kn9x9JlJm22j+cSpWVcpWBux/ey679g6TSKXbtH+Tuh/fyyqUnTPi77IvHN3TN\n7qBrTnuwAasegbXZK4ISBrXQNEaSZvDPAtcC/+zuu83sBuBLwAVJTmBmpwDfBT7n7reW2/fjX73n\nuNeWLZnLhrOWc/6zl9K9cE6SU5aVSmWZ09nO3NkddHa0lZ0A5Wf3PM7Gh/cAsGPvYebPn81VG06b\nsM/YWJY77nuCx/Ye5ok9/bS3p8fz8r7BYXp65rN29RK27z1MW1uKYyOjnHfmUq65/IzxZ7sLz/PS\ni1bxqqvOLPs5enrmJy5jM7nm8jOYP382j+09zMplC7ji/FMTPwPfNzg84XG8vvjJgNzrhweHyYyO\nkc3CwNAIbW0p1q5eMn4tQ7J29RJ25K1CV4tyVrq2IV6HEM3061Ts31mpazLTr1UtJQnWc919i5kB\n4O4/M7O/TXJwM1sK/BR4j7vfmbRQ8+d2cM7p3ZyzppuTlsQzio2NceDA5LOLWe1pZs9qZ3Znmsyx\nDIePZSr+zpbtfYxkxiZsr1+9eMI+GzftHq9lDhyJWvlzj1Et6eqkt7efdasWHbdASV/fwITjVjpP\nvp6e+fT29k/qd5vB+tWLxz9D/nWqZElX54RrsaSrc8LrQ8cypFMpOjrSpNMpuk+YzbpVi8avZUgK\nvzO1Kmepa5v/nZLSdJ2K/zsrdk10rZJJWqFJEqwPmNk5xBOkmNkbgaR91zcCC4EPm9lH4mO8xN2L\nzin6vOcs59mnLmT1SdXPKFZMOp1iTtwX3d6Wptq+6CTNhvnNk11z2pk3p4MV3fOqmg96Ks2TITdt\nNmLhDji+Pzv3/9wz6V1z2kmlUmxYuzTYLoNGj/IXKSX0px5aVSpbYSkrMzsd+DpwPjAEPAq80d23\n1rowT/YOZKeSPefM6sgNFkuTSk1+lrIkwSY/swa4/NwVVd9kqw1q+TXWkPusa3FtpqpZrlWjKQtK\nRtcpOV2rZHp65ie6CSXJrGe7+yVm1gW0ufvheKBZUOrxyFWS7KYWtcypZFEhZ2ChjT4uda0UxGW6\n6Lsmk1VubvCLgTbgK2b2NuI25Hiiky8BZ0xLCctIAbM725g9xUeupiLkYNloITfR59PoVpku+q7J\nZJXLrK8CXgAsBz6e93oG+HI9C1XJ+CNXs9pIT7KZWzXc2spdz529AxwZGuHgwDCpFKzo7uJg/1FS\nqRTZbJaxbDa46xxaC4C0Ln3XZLLKLeTxMQAzux74trtnzKwD6HT3af+GpVIwp7OdObPa4scG6jOb\n00wK4rX8rLnrOXBkhEMD0fjBtnSKjvY0szvbmTe3gzsf2k2qQktEI65/s7QAzKTvZqtqlu+ahCdJ\nn/Ux4EHgbOBU4P+Z2Xvd/Qd1LVks/5GrqQwWK1SqhjuTmqmq+axJ5+wezoySzWaj2hUwkhkjnR4F\nOibsV4sy1UqzjG6dSd/NVtUs3zUJT5Jg/SHgSgB3/4OZPRf4N6BuwXqqj1wlUaqGm7SZarqynGqX\nfaxGNU1ySefs7mxv40jqmWfYO9rT42tt5/arVZlqpVnGHagJtfk1y3dNwpMkWHe6+77chrs/ZWZ1\naXub3ZFm0bzOoqtc1VqpGm7SZqrpynKqXfaxGtU0ySWds7uwz/q8M08kDYmnT52pzYRJKn8z9dqI\nSLJg/Ssz+zbwrXj7tcDd9SjMkoVz6R0ZrbxjDZSq4SZtppquLKee56mmSW4qc3bXq0ytJEnlb6Ze\nGxFJFqzfA7wPeCfRqlm/BL5Qz0I1UtKgM11ZTj3PU02Azc+ch45m2PnUABs37a55838zNBPWowsk\nSaWsGa6NiNRHueesl7n7XmAp0VKXt+W9vQx4os5lC9p0ZTmhZFO5QJE/K9mjTz4NzLxBTvXoAlET\nt4iUUy6z/gpwNfALojm9UwX/X1330uUJ7bGVwixnLJtl46bdNS9faNlUsw1yGhur/d+lHtcglEqZ\niISp3HPWV8f/XzV9xSkt9MdWQi9frTRbBnjHfU80RRYcWqVMRMJSrhn8a+V+0d3fWvvilBZ6Rhd6\n+WplujPAqbaoPBavCZ3NZhkcynDHA7sAppRhKwsWkelWrhn8F/H/rwbmA/+LaKrR1wFP17lcxwk9\nowu9fLUy3Rlg0haLUkF95bIFbNray+BQhv4jwwDjx2vFxVNEpDWVawb/OoCZvRu40N3H4u3bgN9M\nT/GeEXo2E3r5kmrU2IBS503aYlEqqF9x/qn09x8dz6jnzU02k5qISEiSPLp1ArAY2B9vLwXm1a1E\nJYSezYRevqQa1fde6rxJWyxKBfV0+pm/S/7a2q3a8iEirSlJsP4ksNnM7iJaMnMD0XPX0oIa1fde\n6rxJWywqBfVWafkQkZmpYrB292+a2e3ARUSPbL3L3Z+qe8laWGiPoeWrpu+9lp+j1HmTtlhUCsat\n0vIhIjNTxWBtZp3AW4AziTLq95vZze4+XO/CtaqQH/OqJgOt5eeYauarYCwirSxJM/jngV7gj4im\nG30W8FXg+jqWq6WF/JhXNUGvlp+jVsG2MNu/5vIzpnxMEZFGS7JA9HPd/b8CI+5+BPgT4Nz6Fqu1\nFTYtN+tgpxA/Ry7b37rrED9/8EnuuG9Gz4orIi0iSWadjZvCs/F2d97PMgn5Tb4rerrIZrN8+/ZH\ng+u/riTEQVuF2f1jew+zfvXiupwr5LEHItJakgTrzwC3A8vM7DPAtcBNdS1Vi8tv8t24aTc/f2g3\nEF7/dSUh9hMXDlRbuaw2a38XE/LYAxFpLUmC9U+AB4DLiB7derm7b65rqVpA0qwr5P7rZlSY7V9x\n/qn09Q3U5Vz624nIdEkSrDe6+1rgd/UuTCtJmnXNlGlKp0thtp9O169ZWn87EZkuSYL1JjO7HrgX\nGMq96O4auVNGpawrl3nv7B3g5O4u5sxq55QT5wXR7yvJ1KPPXv3gIlJMkmC9If4v37SvZ91sKmVd\n+Zk3wOXnrlB/Z5OpR5+9+sFFpJgkM5gFsZ51s6mUdeVn2tlslnu27GtoNqWMLgzqBxeRYsqtZ30S\n8DlgDfAr4EZ3P1Rqf5moUtaVn3kPDmWi/45mGpJNjWWz3PLjLWze3kdne1tLZ3ShV0rUDy4ixZTL\nrG8hGgX+D0RrWP890bSjUgP5mfeT+wcYPJoZf2+6s6m7Nu9h8/Y+jg2Pcmx4tCFlmC6hNzOHFssm\nAAAAD7lJREFU+Oy6iDReuWC9wt1fBGBmdwAPTU+RZobjnrVu4PKNu3oH6WxvGw/Uw5nRls3oQm9m\nDvHZdRFpvHLBenyhDncfMTMt3FEnjc6mTu7pwnceBKJAvW71kpbN6NTMLCLNKMlo8Jxgphgdy2b5\n1abd3Pv7aKXOC9Yu5ZLA+h6rUetsqtp+2WKVhVpcyxD7hxtdMRIRmYxywfosM9uet70i3k4BWXdv\n2KNbd23eww9//Tj9R6Jkf9+BIVKE1ffYSNX2y9ar6TW/HL7zIFt3HmLu7I6GBm41M4tIMyoXrINd\nW3BX7yDDmdHx7eHMaHB9j40USr9s/nkHhzJs3t7H4gWzgxzYJSISspLB2t0fn86CVOPknq4JA6I6\n29vU95gnlH7Z/HIMZ0bpbG8bf0+Vq8bL76ZYu3oJ61Ytang3hYgUV02fdTAuXrecbDY7oc9afY/P\nCKVfNr8cR46OsGv/MwFalavGy++m2LH3MP39R9XaIRKopgzW6VSKS9ev4NL1KxpdlCCF0i+bX45i\ng82ksULpLhGRypoyWLe6Ro+irsf5Q6lAyDNC6S4RkcoUrAPU6Fm2Gn1+mR753RS5PmsRCZOC9RTU\nKwNudPNko88v0yO/taOnZz69vf0NLpGIlJJudAGaWS4D3brrED9/8Enu2rynJsctbI6c7ubJRp9f\nREQmUmY9BfXKQBs9mrvR5xcRkYkUrKegXgN0Gj0Yq9HnFxGRiRSsp0AZqIiITAcF6ylQBnq8Rj92\nJiLSihSsJbEkgXi6HvtSpUBEZhIFa0ksSSCerse+ypVFc16LSKtRsJbEkgTi6ZoVq1xZNOe1iLQa\nBesWV8vm4iSBeLoG3ZUriyZ1EZFWo2Dd4mrZh5wkEE/XoLtyZdGc1yLSapoqWGtQUfVqmWWGNPq9\nXFk057WItJqmCtZaYKJ6MzHL1JzXItJq6h6szWwDcLO7XzbVY6kvsnqauEVEpPnVNVib2QeA64GB\nWhxvJmaJUxVS07U0N3VDiTROvVfd2gZcW6uDXXj2Mk7u7iI7luXk7i4uPHtZrQ4tIhXUa5U5Eams\nrsHa3b8HZGp1vLsf3suu/YOk0il27R/k7of3MpbNsnHTbr59+6Ns3LSbsWy2VqcTkTzqhhJpnOAG\nmPX0zC/5Xt/gMB3t6Qnbm3ccZOPDUQ1/x97DzJ8/m6s2nFb3cjZaueskE+laJVPpOq1dvYQdew9P\n2J6J13YmfubJ0rWqnekK1ok7tsqN3F3S1clIZmzC9pbtfRNe27K9j/WrF0+ymM1BI5yT07VKJsl1\nWrdqEf39R8f7rNetWjTjrq2+T8npWiWTtEIzXcG6Jm3TxUY237V5jwadiUwDDVYUaZy6B2t3fxy4\nqBbHKnaz0KNJIiLS6oLrs66WavsiItLq6v3oloiIiEyRgrWIiEjgFKxFREQC1/R91jOVpn4UEZk5\nFKyblFYgk8kqVtETkbApWDcpTf0ok1WsovfKKxc0skjBUcuVhEbBuklpBbLS8m+0a1cvYd2qRbrR\n5lFFrzK1XEloFKyblCaDKS3/Rrtj72H6+4/qRptHFb3KVKGR0ChYNylNBlOabrTlqaJXmSo0EhoF\na2k5utGWp4peZarQSGgUrKXl5N9oc33WItVQhUZCo2AtLSf/Rqtl+kSkFWgGMxERkcApWIuIiARO\nwVpERCRw6rNuEproQ0Rk5lKwbhKa6ENEZOZSM3iT0EQfIiIzl4J1kyic2EMTfYiIzBxqBm8SmuhD\nRGTmUrBuEproQ0Rk5lIzuIiISOAUrEVERAKnYC0iIhI4BWsREZHAKViLiIgETsFaREQkcArWIiIi\ngVOwFhERCZyCtYiISOA0g1mA8pfDPLmni4vXLddymCIiM5iCdYDyl8PcuusQgJbDFBGZwdQMHiAt\nhykiIvkUrAOk5TBFRCSfmsEDlL8cZq7PWkREZi4F6wDlL4cpIiKiZnAREZHAKViLiIgETsFaREQk\ncArWIiIigVOwFhERCZyCtYiISOAUrEVERAKnYC0iIhI4BWsREZHAKViLiIgETsFaREQkcArWIiIi\ngVOwFhERCZyCtYiISOAUrEVERAKnYC0iIhI4BWsREZHAtdfz4GaWAr4AnAMcBf6Du2+v5zlFRERa\nTb0z62uAWe5+EXAj8N/rfD4REZGWU+9gfQnwfwHc/R7gvDqfT0REpOXUO1gvAJ7O286YmfrJRURE\nqlDXPmvgMDA/bzvt7mNl9k/19Mwv87bk6Dolp2uVjK5TMrpOyela1U69s9y7gJcCmNnzgIfrfD4R\nEZGWU+/M+nvAVWZ2V7z9ljqfT0REpOWkstlso8sgIiIiZWiwl4iISOAUrEVERAKnYC0iIhK4eg8w\nq0hTklbPzDYAN7v7ZY0uS4jMrB34GrAS6AQ+6e4/bGihAhXPe/CPgAFjwLvc/XeNLVW4zOxE4H7g\nSnff2ujyhMjMHuCZ+TV2uPvbGlmekJnZB4FXAB3AF9z9llL7hpBZa0rSKpjZB4hurrMaXZaAXQfs\nd/dLgZcAn2tweUL2ciDr7pcAHwb+usHlCVZcCfwScKTRZQmVmc0CcPfL4/8UqEswsxcAF8ax74XA\nKeX2DyFYa0rS6mwDrm10IQJ3G1Hggeg7PtLAsgTN3X8AvCPeXAkcbFxpgve3wBeB3Y0uSMDOAbrM\n7KdmdnvcCijFvQh4xMy+D/wf4Efldg4hWGtK0iq4+/eATKPLETJ3P+Lug2Y2H/gO8JeNLlPI3H3M\nzP4J+B/AtxpcnCCZ2ZuBp9z9Z0CqwcUJ2RHgb9z9RcANwLd0Py+pG3gu8Gqia/XP5XYO4SJWOyWp\nSEVmdgrwc+Dr7n5ro8sTOnd/M3AG8BUzm9Pg4oToLUQTPN0JrAe+Efdfy0RbiSt87v4o0Acsb2iJ\nwtUH/NTdM/H4h6Nm1l1q5xCCtaYknRzV7ksws6XAT4G/cPevN7o8ITOz6+JBLhAN8BwlGmgmedz9\nBe5+WTyo8yHgTe7+VKPLFaC3An8HYGYnESViexpaonD9CngxjF+ruUQBvKiGjwZHU5JOlqaeK+1G\nYCHwYTP7CNG1eom7H2tssYL0XeAWM/sF0f3g/bpOFenfXmlfJfo+bSSq9L1VLaXFufuPzez5ZnYv\nUfL1bncv+d3SdKMiIiKBC6EZXERERMpQsBYREQmcgrWIiEjgFKxFREQCp2AtIiISOAVrERGRwIXw\nnLXItDGz04AdwJfd/Ya819cDvwXe7O7faFT5yolnz/qou/8y4f43AW8kWshkAPgQcCtwFtHqdntL\n/N5zgXe6+zvM7O3A4WpmgYunLv0g8E6iRUI+nvR3ixzrrcDz3f0t8XYH0bO85xFNbfmGpKtfxQsn\n3Anc6O6fynv9GqLnzV8IPAB8A3h1uWdeRaabMmuZifqAF8fLs+a8Dmi1GamuA17s7p8BXk8UoP+L\nu19dKlADuPsD7p5b3OMiqljhzcxeBjxZ7vgJjzPLzG4GPsPESUj+FBhw92cDf0YUWKvxJPCqgtde\nS/y3d/dB4GfAuyZTbpF6UWYtM9EA8CBwKfCL+LWrgNtzO5jZi4GbiP6N7ADe7u4Hzew1wJ8Ds4E5\nRAHwV2b258CbiKbrvNfdbzCzPwFemJcV3gl8lGi2ok8TVZYfAd4LfJ4o420DPuXut5pZJ/AVosn+\nHweWFH4QM2sjWgnqLGAp4ETB6O+Bk4Hvm9mtwAXAF8zs/UTrx78AuIxousPFwGqieYrfG2egHwP+\nimit3cvM7BBRRrvK3QfiFoofu/tzCor0F8Dbi5TzauAT8WffTpS595rZC4HPEq2M9hvg2fGUnpfG\nv/oBIH/lppcRr6jm7hvNbImZnezuu/LO9VFgBbAGOBX4qrvnlv7cBiwws9Pc/fF4HvTTgfw1vG+N\ny/LFws8h0ijKrGWmug14DYCZnQdsAobj7W7gvwF/7O7PBf4N+HScib8DeJm7nwt8CvhAHDA/SBRU\nzwPGzCy3eEGpptQ1wGVxIP8QcL+7n08URD9kZiuB9xE1I59FlFE+q8hxLgKOufvF8THnEk2tegPR\nUo4vcfdPAPcDb3P3nxSU6UKiJVfXAa8ws7Ny5Xb3O4iW7vuIu+eW8Ht1/P6bgAnzrpvZImBNYbO0\nmfUQrQP9CndfD/wa+Fy8PvQ3gNfH13kkVzZ3/5m7f5BovvJ8JzFxrum9RJWSQmcDVwLPAz5oZgvy\n3vsO8d+eKPhPWJrQ3Q8C/WZ2dpHjijSEgrXMRFngh8BL4u3XEWVTuWbxDUQZ2Z1m9iDwHuD0uA/z\nlURN6DcBbwbmufso0YI09xNlzp9390qLF7i7D8Q/Xwm8Kz7XL4ky9rOI+lBvi3feFp+j8CAbgS+a\n2buJlrh8FjAvb5dUkZ/zX/t1vKToEFHGu7hMmW8Bro9/fgPwzYL3T6f4Ws8XAPe4+854+x+IPvPZ\nwD53//f49a+VOXfhZ8hXbO7pO9191N17ibo9TohfzxJd01xTeOHfPucJosqPSBAUrGVGivsmHzKz\n5xM1B9+e93YbsNHd/yjOoM8HXmNmXcB9wEqi5vPPEt/k3f1anunn/Gl83CwTg0BH3s9DBee7zt3P\njc93EdGqYVkm/hsdLfwcZvYKoiUJB4iC3UaqW5EtP3MtLO8E8cC2FWZ2LbC9SL/0GMXXWk9zfKWh\nLd63rYqyAuwCluVtL6d4BaEwIx8/v7tvB9rNbC2wosQAtRG0+pgERMFaZrLvADcTNUHn35jvAS40\ns1xm9VHgb4jWex6N+z/vJMrM28ys28y2AA+7+8eIms3XAfuBtQBmtip+rZifA++O91sObAZOIapA\nvMHMUnEf8UVFfvcK4NZ4BPtTRH291QbAcjJMrGR8g6iSckuRfXdQvEn6HmCDmZ0ab7+T6DP/HliY\n1/T+BiqvaPUToiZ4zOwSYCi/v7oK/0o0HuAHJd5fRdS/LRIEBWuZyX4InAP8S7yd6y/dR7Qu721m\ntglYD/wnon7th8zMiR7x6QdOc/f9wJeB+83sPqLlOf+JKNjuMrPfEw342liiHDcBc8zs4fh3/rO7\n7yAaCNZPNPjpyxRf6/0fiQL6A8D/Bu4mCjTjn6fCz1R4/XbgRjN7Zbx9K1Ez/XFBLu7r/YOZnVnw\n+lNEff3fjz/jpcAN7j5C1Kz+zfi6nczEFodi/icw28weIRopfl2F/Ut9rtuI+rNvLdzHzE4AFrj7\nIwmOLTIttESmiCQSD7C7ATjD3f9jiX2uBl7g7h9IeLybgY+5+5CZ/RlwUpLfrScz+1NgxN01GlyC\nocxaRJL6LlGLwydK7eDuPwKWmdmyUvvk7ZsFDhC1SDwIPB/46/K/VV/xuIQriFoyRIKhzFpERCRw\nyqxFREQCp2AtIiISOAVrERGRwClYi4iIBE7BWkREJHAK1iIiIoH7/8JmZn8R53k+AAAAAElFTkSu\nQmCC\n",
+      "text/plain": [
+       "<matplotlib.figure.Figure at 0x122384da0>"
+      ]
+     },
+     "metadata": {},
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "test_data = kim2014_test.get_allele(\"HLA-A3301\")\n",
+    "predictions = new_model.predict(test_data.peptides)\n",
+    "\n",
+    "seaborn.set_context('notebook')\n",
+    "seaborn.regplot(numpy.log10(test_data.affinities), numpy.log10(predictions))\n",
+    "pyplot.xlim(xmin=0)\n",
+    "pyplot.ylim(ymin=0)\n",
+    "pyplot.xlabel(\"Measured affinity (log10 nM)\")\n",
+    "pyplot.ylabel(\"Predicted affinity (log10 nM)\")\n",
+    "pyplot.title(\"MHCflurry on test data\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Calculate AUC, F1, and Kendall's Tau scores"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 15,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Help on function make_scores in module mhcflurry.class1_allele_specific.scoring:\n",
+      "\n",
+      "make_scores(ic50_y, ic50_y_pred, sample_weight=None, threshold_nm=500, max_ic50=50000)\n",
+      "    Calculate AUC, F1, and Kendall Tau scores.\n",
+      "    \n",
+      "    Parameters\n",
+      "    -----------\n",
+      "    ic50_y : float list\n",
+      "        true IC50s (i.e. affinities)\n",
+      "    \n",
+      "    ic50_y_pred : float list\n",
+      "        predicted IC50s\n",
+      "    \n",
+      "    sample_weight : float list [optional]\n",
+      "    \n",
+      "    threshold_nm : float [optional]\n",
+      "    \n",
+      "    max_ic50 : float [optional]\n",
+      "    \n",
+      "    Returns\n",
+      "    -----------\n",
+      "    dict with entries \"auc\", \"f1\", \"tau\"\n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "help(mhcflurry.class1_allele_specific.scoring.make_scores)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "{'auc': 0.84099099099099106,\n",
+       " 'f1': 0.65531914893617027,\n",
+       " 'tau': 0.43387627983717181}"
+      ]
+     },
+     "execution_count": 16,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "mhcflurry.class1_allele_specific.scoring.make_scores(test_data.affinities, predictions)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    " "
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Cross validation for hyperparameter selection"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Help on function cross_validation_folds in module mhcflurry.class1_allele_specific.cross_validation:\n",
+      "\n",
+      "cross_validation_folds(train_data, alleles=None, n_folds=3, drop_similar_peptides=False, imputer=None, impute_kwargs={'min_observations_per_allele': 2, 'min_observations_per_peptide': 2}, parallel_backend=None)\n",
+      "    Split a Dataset into n_folds cross validation folds for each allele,\n",
+      "    optionally performing imputation.\n",
+      "    \n",
+      "    Parameters\n",
+      "    -----------\n",
+      "    train_data : mhcflurry.Dataset\n",
+      "    \n",
+      "    alleles : string list, optional\n",
+      "        Alleles to run cross validation on. Default: all alleles in\n",
+      "        train_data.\n",
+      "    \n",
+      "    n_folds : int, optional\n",
+      "        Number of cross validation folds for each allele.\n",
+      "    \n",
+      "    drop_similar_peptides : boolean, optional\n",
+      "        For each fold, remove peptides from the test data that are similar\n",
+      "        to peptides in the train data. Similarity is defined as in the\n",
+      "        similar_peptides function.\n",
+      "    \n",
+      "    imputer : fancyimpute.Solver, optional\n",
+      "        Imputer to use. If not specified, no imputation is done.\n",
+      "    \n",
+      "    impute_kwargs : dict, optional\n",
+      "        Additional kwargs to pass to mhcflurry.Dataset.impute_missing_values.\n",
+      "    \n",
+      "    parallel_backend : mhcflurry.parallelism.ParallelBackend, optional\n",
+      "        Futures implementation to use for running on multiple threads,\n",
+      "        processes, or nodes\n",
+      "    \n",
+      "    Returns\n",
+      "    -----------\n",
+      "    list of AlleleSpecificTrainTestFold of length num alleles * n_folds\n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "help(mhcflurry.class1_allele_specific.cross_validation.cross_validation_folds)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 18,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "[AlleleSpecificTrainTestFold(allele='HLA-A3301', train=Dataset(n=2026, alleles=['HLA-A3301']), imputed_train=None, test=Dataset(n=1014, alleles=['HLA-A3301'])),\n",
+       " AlleleSpecificTrainTestFold(allele='HLA-A3301', train=Dataset(n=2027, alleles=['HLA-A3301']), imputed_train=None, test=Dataset(n=1013, alleles=['HLA-A3301'])),\n",
+       " AlleleSpecificTrainTestFold(allele='HLA-A3301', train=Dataset(n=2027, alleles=['HLA-A3301']), imputed_train=None, test=Dataset(n=1013, alleles=['HLA-A3301']))]"
+      ]
+     },
+     "execution_count": 18,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "folds = mhcflurry.class1_allele_specific.cross_validation.cross_validation_folds(train_data)\n",
+    "folds"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "{'activation': 'tanh',\n",
+       " 'batch_normalization': True,\n",
+       " 'batch_size': 128,\n",
+       " 'dropout_probability': 0.0,\n",
+       " 'embedding_output_dim': 32,\n",
+       " 'fraction_negative': 0.0,\n",
+       " 'impute': False,\n",
+       " 'init': 'glorot_uniform',\n",
+       " 'kmer_size': 9,\n",
+       " 'layer_sizes': [64],\n",
+       " 'loss': 'mse',\n",
+       " 'max_ic50': 50000.0,\n",
+       " 'n_training_epochs': 250,\n",
+       " 'optimizer': 'rmsprop',\n",
+       " 'output_activation': 'sigmoid',\n",
+       " 'pretrain_decay': 'numpy.exp(-epoch)'}"
+      ]
+     },
+     "execution_count": 19,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "# Take a look at what hyperparameters are available for searching over.\n",
+    "mhcflurry.class1_allele_specific.train.HYPERPARAMETER_DEFAULTS.defaults"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 20,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Searching over 2 models.\n",
+      "First model: \n",
+      "{'output_activation': 'sigmoid', 'pretrain_decay': 'numpy.exp(-epoch)', 'n_training_epochs': 250, 'embedding_output_dim': 32, 'optimizer': 'rmsprop', 'loss': 'mse', 'fraction_negative': 0.1, 'batch_normalization': True, 'dropout_probability': 0.0, 'init': 'glorot_uniform', 'activation': 'tanh', 'batch_size': 128, 'impute': False, 'kmer_size': 9, 'max_ic50': 50000.0, 'layer_sizes': [8]}\n"
+     ]
+    }
+   ],
+   "source": [
+    "models_to_search = mhcflurry.class1_allele_specific.train.HYPERPARAMETER_DEFAULTS.models_grid(\n",
+    "    fraction_negative=[.1],\n",
+    "    layer_sizes=[[8], [12]])\n",
+    "print(\"Searching over %d models.\" % len(models_to_search))\n",
+    "print(\"First model: \\n%s\" % models_to_search[0])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 21,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Help on function train_across_models_and_folds in module mhcflurry.class1_allele_specific.train:\n",
+      "\n",
+      "train_across_models_and_folds(folds, model_descriptions, cartesian_product_of_folds_and_models=True, return_predictors=False, folds_per_task=1, parallel_backend=None)\n",
+      "    Train and optionally test any number of models across any number of folds.\n",
+      "    \n",
+      "    Parameters\n",
+      "    -----------\n",
+      "    folds : list of AlleleSpecificTrainTestFold\n",
+      "    \n",
+      "    model_descriptions : list of dict\n",
+      "        Models to test\n",
+      "    \n",
+      "    cartesian_product_of_folds_and_models : boolean, optional\n",
+      "        If true, then a predictor is treained for each fold and model\n",
+      "        description.\n",
+      "        If false, then len(folds) must equal len(model_descriptions), and\n",
+      "        the i'th model is trained on the i'th fold.\n",
+      "    \n",
+      "    return_predictors : boolean, optional\n",
+      "        Include the trained predictors in the result.\n",
+      "    \n",
+      "    parallel_backend : mhcflurry.parallelism.ParallelBackend, optional\n",
+      "        Futures implementation to use for running on multiple threads,\n",
+      "        processes, or nodes\n",
+      "    \n",
+      "    Returns\n",
+      "    -----------\n",
+      "    pandas.DataFrame\n",
+      "\n"
+     ]
+    }
+   ],
+   "source": [
+    "help(mhcflurry.class1_allele_specific.train.train_across_models_and_folds)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 22,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "<div>\n",
+       "<table border=\"1\" class=\"dataframe\">\n",
+       "  <thead>\n",
+       "    <tr style=\"text-align: right;\">\n",
+       "      <th></th>\n",
+       "      <th>allele</th>\n",
+       "      <th>fold_num</th>\n",
+       "      <th>model_num</th>\n",
+       "      <th>train_size</th>\n",
+       "      <th>test_size</th>\n",
+       "      <th>imputed_train_size</th>\n",
+       "      <th>train_tau</th>\n",
+       "      <th>train_auc</th>\n",
+       "      <th>train_f1</th>\n",
+       "      <th>test_tau</th>\n",
+       "      <th>...</th>\n",
+       "      <th>model_fraction_negative</th>\n",
+       "      <th>model_batch_normalization</th>\n",
+       "      <th>model_dropout_probability</th>\n",
+       "      <th>model_init</th>\n",
+       "      <th>model_activation</th>\n",
+       "      <th>model_batch_size</th>\n",
+       "      <th>model_impute</th>\n",
+       "      <th>model_kmer_size</th>\n",
+       "      <th>model_max_ic50</th>\n",
+       "      <th>model_layer_sizes</th>\n",
+       "    </tr>\n",
+       "  </thead>\n",
+       "  <tbody>\n",
+       "    <tr>\n",
+       "      <th>0</th>\n",
+       "      <td>HLA-A3301</td>\n",
+       "      <td>0</td>\n",
+       "      <td>0</td>\n",
+       "      <td>2026</td>\n",
+       "      <td>1014</td>\n",
+       "      <td>None</td>\n",
+       "      <td>0.710233</td>\n",
+       "      <td>0.989589</td>\n",
+       "      <td>0.902256</td>\n",
+       "      <td>0.429803</td>\n",
+       "      <td>...</td>\n",
+       "      <td>0.1</td>\n",
+       "      <td>True</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>glorot_uniform</td>\n",
+       "      <td>tanh</td>\n",
+       "      <td>128</td>\n",
+       "      <td>False</td>\n",
+       "      <td>9</td>\n",
+       "      <td>50000.0</td>\n",
+       "      <td>[8]</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>1</th>\n",
+       "      <td>HLA-A3301</td>\n",
+       "      <td>0</td>\n",
+       "      <td>1</td>\n",
+       "      <td>2026</td>\n",
+       "      <td>1014</td>\n",
+       "      <td>None</td>\n",
+       "      <td>0.747597</td>\n",
+       "      <td>0.993938</td>\n",
+       "      <td>0.919708</td>\n",
+       "      <td>0.425610</td>\n",
+       "      <td>...</td>\n",
+       "      <td>0.1</td>\n",
+       "      <td>True</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>glorot_uniform</td>\n",
+       "      <td>tanh</td>\n",
+       "      <td>128</td>\n",
+       "      <td>False</td>\n",
+       "      <td>9</td>\n",
+       "      <td>50000.0</td>\n",
+       "      <td>[12]</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>2</th>\n",
+       "      <td>HLA-A3301</td>\n",
+       "      <td>1</td>\n",
+       "      <td>0</td>\n",
+       "      <td>2027</td>\n",
+       "      <td>1013</td>\n",
+       "      <td>None</td>\n",
+       "      <td>0.705507</td>\n",
+       "      <td>0.990185</td>\n",
+       "      <td>0.882466</td>\n",
+       "      <td>0.430678</td>\n",
+       "      <td>...</td>\n",
+       "      <td>0.1</td>\n",
+       "      <td>True</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>glorot_uniform</td>\n",
+       "      <td>tanh</td>\n",
+       "      <td>128</td>\n",
+       "      <td>False</td>\n",
+       "      <td>9</td>\n",
+       "      <td>50000.0</td>\n",
+       "      <td>[8]</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>3</th>\n",
+       "      <td>HLA-A3301</td>\n",
+       "      <td>1</td>\n",
+       "      <td>1</td>\n",
+       "      <td>2027</td>\n",
+       "      <td>1013</td>\n",
+       "      <td>None</td>\n",
+       "      <td>0.745532</td>\n",
+       "      <td>0.993875</td>\n",
+       "      <td>0.924812</td>\n",
+       "      <td>0.395103</td>\n",
+       "      <td>...</td>\n",
+       "      <td>0.1</td>\n",
+       "      <td>True</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>glorot_uniform</td>\n",
+       "      <td>tanh</td>\n",
+       "      <td>128</td>\n",
+       "      <td>False</td>\n",
+       "      <td>9</td>\n",
+       "      <td>50000.0</td>\n",
+       "      <td>[12]</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>4</th>\n",
+       "      <td>HLA-A3301</td>\n",
+       "      <td>2</td>\n",
+       "      <td>0</td>\n",
+       "      <td>2027</td>\n",
+       "      <td>1013</td>\n",
+       "      <td>None</td>\n",
+       "      <td>0.709275</td>\n",
+       "      <td>0.992395</td>\n",
+       "      <td>0.894531</td>\n",
+       "      <td>0.441365</td>\n",
+       "      <td>...</td>\n",
+       "      <td>0.1</td>\n",
+       "      <td>True</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>glorot_uniform</td>\n",
+       "      <td>tanh</td>\n",
+       "      <td>128</td>\n",
+       "      <td>False</td>\n",
+       "      <td>9</td>\n",
+       "      <td>50000.0</td>\n",
+       "      <td>[8]</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>5</th>\n",
+       "      <td>HLA-A3301</td>\n",
+       "      <td>2</td>\n",
+       "      <td>1</td>\n",
+       "      <td>2027</td>\n",
+       "      <td>1013</td>\n",
+       "      <td>None</td>\n",
+       "      <td>0.743498</td>\n",
+       "      <td>0.994674</td>\n",
+       "      <td>0.873518</td>\n",
+       "      <td>0.439221</td>\n",
+       "      <td>...</td>\n",
+       "      <td>0.1</td>\n",
+       "      <td>True</td>\n",
+       "      <td>0.0</td>\n",
+       "      <td>glorot_uniform</td>\n",
+       "      <td>tanh</td>\n",
+       "      <td>128</td>\n",
+       "      <td>False</td>\n",
+       "      <td>9</td>\n",
+       "      <td>50000.0</td>\n",
+       "      <td>[12]</td>\n",
+       "    </tr>\n",
+       "  </tbody>\n",
+       "</table>\n",
+       "<p>6 rows × 31 columns</p>\n",
+       "</div>"
+      ],
+      "text/plain": [
+       "      allele  fold_num  model_num  train_size  test_size imputed_train_size  \\\n",
+       "0  HLA-A3301         0          0        2026       1014               None   \n",
+       "1  HLA-A3301         0          1        2026       1014               None   \n",
+       "2  HLA-A3301         1          0        2027       1013               None   \n",
+       "3  HLA-A3301         1          1        2027       1013               None   \n",
+       "4  HLA-A3301         2          0        2027       1013               None   \n",
+       "5  HLA-A3301         2          1        2027       1013               None   \n",
+       "\n",
+       "   train_tau  train_auc  train_f1  test_tau        ...         \\\n",
+       "0   0.710233   0.989589  0.902256  0.429803        ...          \n",
+       "1   0.747597   0.993938  0.919708  0.425610        ...          \n",
+       "2   0.705507   0.990185  0.882466  0.430678        ...          \n",
+       "3   0.745532   0.993875  0.924812  0.395103        ...          \n",
+       "4   0.709275   0.992395  0.894531  0.441365        ...          \n",
+       "5   0.743498   0.994674  0.873518  0.439221        ...          \n",
+       "\n",
+       "   model_fraction_negative  model_batch_normalization  \\\n",
+       "0                      0.1                       True   \n",
+       "1                      0.1                       True   \n",
+       "2                      0.1                       True   \n",
+       "3                      0.1                       True   \n",
+       "4                      0.1                       True   \n",
+       "5                      0.1                       True   \n",
+       "\n",
+       "  model_dropout_probability      model_init  model_activation  \\\n",
+       "0                       0.0  glorot_uniform              tanh   \n",
+       "1                       0.0  glorot_uniform              tanh   \n",
+       "2                       0.0  glorot_uniform              tanh   \n",
+       "3                       0.0  glorot_uniform              tanh   \n",
+       "4                       0.0  glorot_uniform              tanh   \n",
+       "5                       0.0  glorot_uniform              tanh   \n",
+       "\n",
+       "  model_batch_size model_impute  model_kmer_size  model_max_ic50  \\\n",
+       "0              128        False                9         50000.0   \n",
+       "1              128        False                9         50000.0   \n",
+       "2              128        False                9         50000.0   \n",
+       "3              128        False                9         50000.0   \n",
+       "4              128        False                9         50000.0   \n",
+       "5              128        False                9         50000.0   \n",
+       "\n",
+       "  model_layer_sizes  \n",
+       "0               [8]  \n",
+       "1              [12]  \n",
+       "2               [8]  \n",
+       "3              [12]  \n",
+       "4               [8]  \n",
+       "5              [12]  \n",
+       "\n",
+       "[6 rows x 31 columns]"
+      ]
+     },
+     "execution_count": 22,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "results_df = mhcflurry.class1_allele_specific.train.train_across_models_and_folds(\n",
+    "    folds,\n",
+    "    models_to_search,\n",
+    "    return_predictors=True)\n",
+    "results_df"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 23,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "0    Class1BindingPredictor(name=None, max_ic50=500...\n",
+       "1    Class1BindingPredictor(name=None, max_ic50=500...\n",
+       "2    Class1BindingPredictor(name=None, max_ic50=500...\n",
+       "3    Class1BindingPredictor(name=None, max_ic50=500...\n",
+       "4    Class1BindingPredictor(name=None, max_ic50=500...\n",
+       "5    Class1BindingPredictor(name=None, max_ic50=500...\n",
+       "Name: predictor, dtype: object"
+      ]
+     },
+     "execution_count": 23,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "# The trained predictors are in the 'predictor' column\n",
+    "results_df.predictor"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 24,
+   "metadata": {
+    "collapsed": false
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "model_num\n",
+       "0    0.859859\n",
+       "1    0.847004\n",
+       "Name: test_auc, dtype: float64"
+      ]
+     },
+     "execution_count": 24,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "# Which model had the best average AUC across folds?\n",
+    "results_df.groupby(\"model_num\").test_auc.mean()"
+   ]
+  }
+ ],
+ "metadata": {
+  "anaconda-cloud": {},
+  "kernelspec": {
+   "display_name": "Python [py3k]",
+   "language": "python",
+   "name": "Python [py3k]"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.5.2"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/mhcflurry/__init__.py b/mhcflurry/__init__.py
index 3a4433007cf15ab2c7eda935ff9fb820d6050e53..dc67b8bc3bff77fee5d5d845de4f7535eb73a480 100644
--- a/mhcflurry/__init__.py
+++ b/mhcflurry/__init__.py
@@ -18,8 +18,6 @@ from .predict import predict
 from .package_metadata import __version__
 from . import parallelism
 
-parallelism.configure_joblib()
-
 __all__ = [
     "Class1BindingPredictor",
     "predict",
diff --git a/mhcflurry/class1_allele_specific/cross_validation.py b/mhcflurry/class1_allele_specific/cross_validation.py
index b02ea0ede5fd731a7c2e258565be1f48dd558af8..5ceeb4b729e6f13c8dd9fa2ff4d5e12839ace6e1 100644
--- a/mhcflurry/class1_allele_specific/cross_validation.py
+++ b/mhcflurry/class1_allele_specific/cross_validation.py
@@ -20,11 +20,10 @@ from __future__ import (
 import collections
 import logging
 
-from joblib import Parallel, delayed
-
 import pepdata
 
 from .train import impute_and_select_allele, AlleleSpecificTrainTestFold
+from ..parallelism import get_default_backend
 
 gbmr4_transformer = pepdata.reduced_alphabet.make_alphabet_transformer("gbmr4")
 
@@ -100,9 +99,7 @@ def cross_validation_folds(
             'min_observations_per_peptide': 2,
             'min_observations_per_allele': 2,
         },
-        n_jobs=1,
-        verbose=0,
-        pre_dispatch='2*n_jobs'):
+        parallel_backend=None):
     '''
     Split a Dataset into n_folds cross validation folds for each allele,
     optionally performing imputation.
@@ -129,30 +126,22 @@ def cross_validation_folds(
     impute_kwargs : dict, optional
         Additional kwargs to pass to mhcflurry.Dataset.impute_missing_values.
 
-    n_jobs : integer, optional
-        The number of jobs to run in parallel. If -1, then the number of jobs
-        is set to the number of cores.
-
-    verbose : integer, optional
-        The joblib verbosity. If non zero, progress messages are printed. Above
-        50, the output is sent to stdout. The frequency of the messages
-        increases with the verbosity level. If it more than 10, all iterations
-        are reported.
-
-    pre_dispatch : {"all", integer, or expression, as in "3*n_jobs"}
-        The number of joblib batches (of tasks) to be pre-dispatched. Default
-        is "2*n_jobs".
+    parallel_backend : mhcflurry.parallelism.ParallelBackend, optional
+        Futures implementation to use for running on multiple threads,
+        processes, or nodes
 
     Returns
     -----------
     list of AlleleSpecificTrainTestFold of length num alleles * n_folds
 
     '''
+    if parallel_backend is None:
+        parallel_backend = get_default_backend()
+
     if alleles is None:
         alleles = train_data.unique_alleles()
 
-    result = []
-    imputation_tasks = []
+    result_folds = []
     for allele in alleles:
         logging.info("Allele: %s" % allele)
         cv_iter = train_data.cross_validation_iterator(
@@ -176,31 +165,27 @@ def cross_validation_folds(
                 test_split = full_test_split
 
             if imputer is not None:
-                imputation_tasks.append(delayed(impute_and_select_allele)(
+                imputation_future = parallel_backend.submit(
+                    impute_and_select_allele,
                     all_allele_train_split,
                     imputer=imputer,
                     allele=allele,
-                    **impute_kwargs))
+                    **impute_kwargs)
+            else:
+                imputation_future = None
 
             train_split = all_allele_train_split.get_allele(allele)
             fold = AlleleSpecificTrainTestFold(
                 allele=allele,
                 train=train_split,
-                imputed_train=None,
+                imputed_train=imputation_future,
                 test=test_split)
-            result.append(fold)
-
-    if imputer is not None:
-        imputation_results = Parallel(
-            n_jobs=n_jobs,
-            verbose=verbose,
-            pre_dispatch=pre_dispatch)(imputation_tasks)
-
-        result = [
-            result_fold._replace(
-                imputed_train=imputation_result)
-            for (imputation_result, result_fold)
-            in zip(imputation_results, result)
-            if imputation_result is not None
-        ]
-    return result
+            result_folds.append(fold)
+
+    return [
+        result_fold._replace(imputed_train=(
+            result_fold.imputed_train.result()
+            if result_fold.imputed_train is not None
+            else None))
+        for result_fold in result_folds
+    ]
diff --git a/mhcflurry/class1_allele_specific/cv_and_train_command.py b/mhcflurry/class1_allele_specific/cv_and_train_command.py
index 29615bb1419f0cd2c1f5635e5b6d8a19bb3c851e..075e8e3761c5421efc3f7f6d2f30ed30d2e8b565 100644
--- a/mhcflurry/class1_allele_specific/cv_and_train_command.py
+++ b/mhcflurry/class1_allele_specific/cv_and_train_command.py
@@ -22,19 +22,14 @@ What it does:
 
 Features:
  * Supports imputation as a hyperparameter that can be searched over
- * Parallelized with joblib
+ * Parallelized with concurrent.futures
 
 Note:
 
-The joblib-based parallelization is primary intended to be used with an
-alternative joblib backend such as dask-distributed that supports
+The parallelization is primary intended to be used with an
+alternative concurrent.futures Executor such as dask-distributed that supports
 multi-node parallelization. Theano in particular seems to have deadlocks
 when running with single-node parallelization.
-
-Also, when using the multiprocessing backend for joblib (the default),
-the 'fork' mode causes a library we use to hang. We have to instead use
-the 'spawn' or 'forkserver' modes. See:
-https://pythonhosted.org/joblib/parallel.html#bad-interaction-of-multiprocessing-and-third-party-libraries
 '''
 from __future__ import (
     print_function,
@@ -52,8 +47,8 @@ import hashlib
 import pickle
 
 import numpy
-import joblib
 
+from .. import parallelism
 from ..dataset import Dataset
 from ..imputation_helpers import imputer_from_name
 from .cross_validation import cross_validation_folds
@@ -142,18 +137,17 @@ parser.add_argument(
     help="Host and port of dask distributed scheduler")
 
 parser.add_argument(
-    "--joblib-num-jobs",
-    type=int,
-    default=1,
+    "--num-local-processes",
     metavar="N",
-    help="Number of joblib workers. Set to -1 to use as many jobs as cores. "
-    "Default: %(default)s")
+    type=int,
+    help="Processes (exclusive with --dask-scheduler and --num-local-threads)")
 
 parser.add_argument(
-    "--joblib-pre-dispatch",
-    metavar="STRING",
-    default='2*n_jobs',
-    help="Tasks to initially dispatch to joblib. Default: %(default)s")
+    "--num-local-threads",
+    metavar="N",
+    type=int,
+    default=1,
+    help="Threads (exclusive with --dask-scheduler and --num-local-processes)")
 
 parser.add_argument(
     "--min-samples-per-allele",
@@ -178,28 +172,35 @@ parser.add_argument(
 
 def run(argv=sys.argv[1:]):
     args = parser.parse_args(argv)
-    if not args.quiet:
-        logging.basicConfig(level="INFO")
     if args.verbose:
-        logging.basicConfig(level="DEBUG")
+        logging.root.setLevel(level="DEBUG")
+    elif not args.quiet:
+        logging.root.setLevel(level="INFO")
+
+    logging.info("Running with arguments: %s" % args)
+
+    # Set parallel backend
     if args.dask_scheduler:
-        import distributed.joblib  # for side effects
-        backend = joblib.parallel_backend(
-            'distributed',
-            scheduler_host=args.dask_scheduler)
-        with backend:
-            active_backend = joblib.parallel.get_active_backend()[0]
-            logging.info(
-                "Running with dask scheduler: %s [%d cores]" % (
-                    args.dask_scheduler,
-                    active_backend.effective_n_jobs()))
-
-            go(args)
+        backend = parallelism.DaskDistributedParallelBackend(
+            args.dask_scheduler)
     else:
-        go(args)
+        if args.num_local_processes:
+            backend = parallelism.ConcurrentFuturesParallelBackend(
+                args.num_local_processes,
+                processes=True)
+        else:
+            backend = parallelism.ConcurrentFuturesParallelBackend(
+                args.num_local_threads,
+                processes=False)
+
+    parallelism.set_default_backend(backend)
+    logging.info("Using parallel backend: %s" % backend)
+    go(args)
 
 
 def go(args):
+    backend = parallelism.get_default_backend()
+
     model_architectures = json.loads(args.model_architectures.read())
     logging.info("Read %d model architectures" % len(model_architectures))
     if args.max_models:
@@ -251,10 +252,7 @@ def go(args):
         imputer=imputer,
         impute_kwargs=impute_kwargs,
         drop_similar_peptides=True,
-        alleles=args.alleles,
-        n_jobs=args.joblib_num_jobs,
-        pre_dispatch=args.joblib_pre_dispatch,
-        verbose=1 if not args.quiet else 0)
+        alleles=args.alleles)
 
     logging.info(
         "Training %d model architectures across %d folds = %d models"
@@ -266,10 +264,7 @@ def go(args):
     cv_results = train_across_models_and_folds(
         cv_folds,
         model_architectures,
-        folds_per_task=args.cv_folds_per_task,
-        n_jobs=args.joblib_num_jobs,
-        verbose=1 if not args.quiet else 0,
-        pre_dispatch=args.joblib_pre_dispatch)
+        folds_per_task=args.cv_folds_per_task)
     logging.info(
         "Completed cross validation in %0.2f seconds" % (time.time() - start))
 
@@ -311,7 +306,6 @@ def go(args):
     logging.info("")
     train_folds = []
     train_models = []
-    imputation_tasks = []
     for (allele_num, allele) in enumerate(cv_results.allele.unique()):
         best_index = best_architectures_by_allele[allele]
         architecture = model_architectures[best_index]
@@ -321,14 +315,14 @@ def go(args):
             (allele, best_index, architecture))
 
         if architecture['impute']:
-            imputation_task = joblib.delayed(impute_and_select_allele)(
+            imputation_future = backend.submit(
+                impute_and_select_allele,
                 train_data,
                 imputer=imputer,
                 allele=allele,
                 **impute_kwargs)
-            imputation_tasks.append(imputation_task)
         else:
-            imputation_task = None
+            imputation_future = None
 
         test_data_this_allele = None
         if test_data is not None:
@@ -344,29 +338,17 @@ def go(args):
             # the imputations so we have to queue up the tasks first.
             # If we are not doing imputation then the imputation_task
             # is None.
-            imputed_train=imputation_task,
+            imputed_train=imputation_future,
             test=test_data_this_allele)
         train_folds.append(fold)
 
-    if imputation_tasks:
-        logging.info(
-            "Waiting for %d full-data imputation tasks to complete"
-            % len(imputation_tasks))
-        imputation_results = joblib.Parallel(
-            n_jobs=args.joblib_num_jobs,
-            verbose=1 if not args.quiet else 0,
-            pre_dispatch=args.joblib_pre_dispatch)(imputation_tasks)
-
-        train_folds = [
-            train_fold._replace(
-                # Now we replace imputed_train with the actual imputed
-                # dataset.
-                imputed_train=imputation_results.pop(0)
-                if (train_fold.imputed_train is not None) else None)
-            for train_fold in train_folds
-        ]
-        assert not imputation_results
-        del imputation_tasks
+    train_folds = [
+        result_fold._replace(imputed_train=(
+            result_fold.imputed_train.result()
+            if result_fold.imputed_train is not None
+            else None))
+        for result_fold in train_folds
+    ]
 
     logging.info("Training %d production models" % len(train_folds))
     start = time.time()
@@ -374,10 +356,7 @@ def go(args):
         train_folds,
         train_models,
         cartesian_product_of_folds_and_models=False,
-        return_predictors=args.out_models_dir is not None,
-        n_jobs=args.joblib_num_jobs,
-        verbose=1 if not args.quiet else 0,
-        pre_dispatch=args.joblib_pre_dispatch)
+        return_predictors=args.out_models_dir is not None)
     logging.info(
         "Completed production training in %0.2f seconds"
         % (time.time() - start))
diff --git a/mhcflurry/class1_allele_specific/load.py b/mhcflurry/class1_allele_specific/load.py
index 8ee0f1d2f4e6806e35a083a4ac24879f964d4ff7..619bd0f314ac817cf005668643241b3e156b1b6c 100644
--- a/mhcflurry/class1_allele_specific/load.py
+++ b/mhcflurry/class1_allele_specific/load.py
@@ -25,7 +25,7 @@ from os.path import join
 import pandas
 
 from ..downloads import get_path
-from ..common import normalize_allele_name
+from ..common import normalize_allele_name, UnsupportedAllele
 
 CACHED_LOADER = None
 
@@ -113,7 +113,7 @@ class Class1AlleleSpecificPredictorLoader(object):
             try:
                 predictor_name = self.df.ix[allele_name].predictor_name
             except KeyError:
-                raise ValueError(
+                raise UnsupportedAllele(
                     "No models for allele '%s'. Alleles with models: %s"
                     " in models file: %s" % (
                         allele_name,
diff --git a/mhcflurry/class1_allele_specific/train.py b/mhcflurry/class1_allele_specific/train.py
index c59719b341657f32f0fc280fcb59b0a196ddad9e..7e5824b88163e60d22a846c32adb6b0582297457 100644
--- a/mhcflurry/class1_allele_specific/train.py
+++ b/mhcflurry/class1_allele_specific/train.py
@@ -28,11 +28,11 @@ import pandas
 
 import mhcflurry
 
-from joblib import Parallel, delayed
-
 from .scoring import make_scores
 from .class1_binding_predictor import Class1BindingPredictor
 from ..hyperparameters import HyperparameterDefaults
+from ..parallelism import get_default_backend
+
 
 TRAIN_HYPERPARAMETER_DEFAULTS = HyperparameterDefaults(impute=False)
 HYPERPARAMETER_DEFAULTS = (
@@ -239,9 +239,7 @@ def train_across_models_and_folds(
         cartesian_product_of_folds_and_models=True,
         return_predictors=False,
         folds_per_task=1,
-        n_jobs=1,
-        verbose=0,
-        pre_dispatch='2*n_jobs'):
+        parallel_backend=None):
     '''
     Train and optionally test any number of models across any number of folds.
 
@@ -261,24 +259,17 @@ def train_across_models_and_folds(
     return_predictors : boolean, optional
         Include the trained predictors in the result.
 
-    n_jobs : integer, optional
-        The number of jobs to run in parallel. If -1, then the number of jobs
-        is set to the number of cores.
-
-    verbose : integer, optional
-        The joblib verbosity. If non zero, progress messages are printed. Above
-        50, the output is sent to stdout. The frequency of the messages
-        increases with the verbosity level. If it more than 10, all iterations
-        are reported.
-
-    pre_dispatch : {"all", integer, or expression, as in "3*n_jobs"}
-        The number of joblib batches (of tasks) to be pre-dispatched. Default
-        is "2*n_jobs".
+    parallel_backend : mhcflurry.parallelism.ParallelBackend, optional
+        Futures implementation to use for running on multiple threads,
+        processes, or nodes
 
     Returns
     -----------
     pandas.DataFrame
     '''
+    if parallel_backend is None:
+        parallel_backend = get_default_backend()
+
     if cartesian_product_of_folds_and_models:
         tasks_per_model = int(math.ceil(float(len(folds)) / folds_per_task))
         fold_index_groups = [[] for _ in range(tasks_per_model)]
@@ -307,15 +298,16 @@ def train_across_models_and_folds(
     logging.info("Training %d architectures on %d folds = %d tasks." % (
         len(model_descriptions), len(folds), len(task_model_and_fold_indices)))
 
-    task_results = Parallel(
-        n_jobs=n_jobs,
-        verbose=verbose,
-        pre_dispatch=pre_dispatch)(
-        delayed(train_and_test_one_model)(
+    def train_and_test_one_model_task(model_and_fold_nums_pair):
+        (model_num, fold_nums) = model_and_fold_nums_pair
+        return train_and_test_one_model(
             model_descriptions[model_num],
             [folds[i] for i in fold_nums],
             return_predictor=return_predictors)
-        for (model_num, fold_nums) in task_model_and_fold_indices)
+
+    task_results = parallel_backend.map(
+        train_and_test_one_model_task,
+        task_model_and_fold_indices)
 
     logging.info("Done.")
 
diff --git a/mhcflurry/common.py b/mhcflurry/common.py
index d67d34cdb791e419961289456f5317c443098421..809109c3cdbf95f1353cfcca6f885208f8a7975e 100644
--- a/mhcflurry/common.py
+++ b/mhcflurry/common.py
@@ -20,6 +20,10 @@ from collections import defaultdict
 import numpy as np
 
 
+class UnsupportedAllele(Exception):
+    pass
+
+
 def parse_int_list(s):
     return [int(part.strip()) for part in s.split(",")]
 
diff --git a/mhcflurry/dataset.py b/mhcflurry/dataset.py
index f70273dccf9d502e04b6b451df4f7caa55643d2a..43fff5c96eff9ea0d25f896098254aab711c9029 100644
--- a/mhcflurry/dataset.py
+++ b/mhcflurry/dataset.py
@@ -70,7 +70,9 @@ class Dataset(object):
 
         for expected_column_name in {"allele", "peptide", "affinity"}:
             if expected_column_name not in columns:
-                raise ValueError("Missing column '%s' from DataFrame")
+                raise ValueError(
+                    "Missing column '%s' from DataFrame" %
+                    expected_column_name)
         # make allele and peptide columns the index, and copy it
         # so we can add a column without any observable side-effect in
         # the calling code
diff --git a/mhcflurry/parallelism.py b/mhcflurry/parallelism.py
index 00c8302f12a47ed2dbd21836a7e00d095ba5f81c..18008b4e9057c3b4e0c3b9c6d1a2db81522beb5f 100644
--- a/mhcflurry/parallelism.py
+++ b/mhcflurry/parallelism.py
@@ -1,30 +1,83 @@
-import multiprocessing
+from concurrent import futures
 import logging
 
-import joblib.parallel
+DEFAULT_BACKEND = None
 
 
-def configure_joblib(multiprocessing_mode="spawn"):
+class ParallelBackend(object):
     """
-    Set joblib's default multiprocessing mode.
+    Thin wrapper of futures implementations. Designed to support
+    concurrent.futures as well as dask.distributed's workalike implementation.
+    """
+    def __init__(self, executor, module, verbose=1):
+        self.executor = executor
+        self.module = module
+        self.verbose = verbose
+
+    def submit(self, func, *args, **kwargs):
+        if self.verbose > 0:
+            logging.debug("Submitting: %s %s %s" % (func, args, kwargs))
+        return self.executor.submit(func, *args, **kwargs)
+
+    def map(self, func, iterable):
+        fs = [
+            self.executor.submit(func, arg) for arg in iterable
+        ]
+        return self.wait(fs)
+
+    def wait(self, fs):
+        result_dict = {}
+        for finished_future in self.module.as_completed(fs):
+            result = finished_future.result()
+            logging.info("%3d / %3d tasks completed" % (
+                len(result_dict), len(fs)))
+            result_dict[finished_future] = result
 
-    The default used in joblib is "fork" which causes a library we use to
-    deadlock. This function defaults to setting the multiprocessing mode
-    to "spawn", which does not deadlock. On Python 3.4, you can also try
-    the "forkserver" mode which does not deadlock and has better
-    performance.
+        return [result_dict[future] for future in fs]
+
+
+class DaskDistributedParallelBackend(ParallelBackend):
+    """
+    ParallelBackend that uses dask.distributed
+    """
+    def __init__(self, scheduler_ip_and_port, verbose=1):
+        from dask import distributed  # pylint: disable=import-error
+        executor = distributed.Executor(scheduler_ip_and_port)
+        ParallelBackend.__init__(self, executor, distributed, verbose=verbose)
+        self.scheduler_ip_and_port = scheduler_ip_and_port
 
-    See: https://pythonhosted.org/joblib/parallel.html#bad-interaction-of-multiprocessing-and-third-party-libraries
+    def __str__(self):
+        return "<Dask distributed backend, scheduler=%s, total_cores=%d>" % (
+            self.scheduler_ip_and_port,
+            sum(self.executor.ncores().values()))
 
-    Parameters
-    -------------
-    multiprocessing_mode : string, one of "spawn", "fork", or "forkserver"
 
+class ConcurrentFuturesParallelBackend(ParallelBackend):
+    """
+    ParallelBackend that uses Python's concurrent.futures module.
+    Can use either threads or processes.
     """
-    if hasattr(multiprocessing, "get_context"):
-        joblib.parallel.DEFAULT_MP_CONTEXT = multiprocessing.get_context(
-            multiprocessing_mode)
-    else:
-        logging.warn(
-            "You will probably get deadlocks on Python earlier than 3.4 "
-            "if you set n_jobs to anything other than 1.")
+    def __init__(self, num_workers=1, processes=False, verbose=1):
+        if processes:
+            executor = futures.ProcessPoolExecutor(num_workers)
+        else:
+            executor = futures.ThreadPoolExecutor(num_workers)
+        ParallelBackend.__init__(self, executor, futures, verbose=verbose)
+        self.num_workers = num_workers
+        self.processes = processes
+
+    def __str__(self):
+        return "<Concurrent futures %s parallel backend, num workers = %d>" % (
+            ("processes" if self.processes else "threads"), self.num_workers)
+
+
+def set_default_backend(backend):
+    global DEFAULT_BACKEND
+    DEFAULT_BACKEND = backend
+
+
+def get_default_backend():
+    global DEFAULT_BACKEND
+    if DEFAULT_BACKEND is None:
+        set_default_backend(ConcurrentFuturesParallelBackend())
+    return DEFAULT_BACKEND
diff --git a/mhcflurry/predict.py b/mhcflurry/predict.py
index fee483d39e9ed7a01373c5a614319e0a6b2ee99d..6534971ef67c20c15c0645800c6ab8af828f6ccb 100644
--- a/mhcflurry/predict.py
+++ b/mhcflurry/predict.py
@@ -17,11 +17,14 @@ from collections import OrderedDict
 import pandas as pd
 
 from .class1_allele_specific import load
-from .common import normalize_allele_name
+from .common import normalize_allele_name, UnsupportedAllele
 
 
-def predict(alleles, peptides):
+def predict(alleles, peptides, loaders=None):
     """
+    Make predictions across all combinations of the specified alleles and
+    peptides.
+
     Parameters
     ----------
     alleles : list of str
@@ -30,8 +33,15 @@ def predict(alleles, peptides):
     peptides : list of str
         Peptide amino acid sequences.
 
+    loaders : list of Class1AlleleSpecificPredictorLoader, optional
+        Loaders to try. Will be tried in the order given.
+
     Returns DataFrame with columns "Allele", "Peptide", and "Prediction"
     """
+    if loaders is None:
+        loaders = [
+            load.get_loader_for_downloaded_models(),
+        ]
     result_dict = OrderedDict([
         ("Allele", []),
         ("Peptide", []),
@@ -39,7 +49,22 @@ def predict(alleles, peptides):
     ])
     for allele in alleles:
         allele = normalize_allele_name(allele)
-        model = load.from_allele_name(allele)
+        exceptions = {}  # loader -> UnsupportedAllele exception
+        model = None
+        for loader in loaders:
+            try:
+                model = loader.from_allele_name(allele)
+                break
+            except UnsupportedAllele as e:
+                exceptions[loader] = e
+        if model is None:
+            raise UnsupportedAllele(
+                "No loaders support allele '%s'. Errors were:\n%s" % (
+                    allele,
+                    "\n".join(
+                        ("\t%-20s : %s" % (k, v))
+                        for (k, v) in exceptions.items())))
+
         for i, ic50 in enumerate(model.predict(peptides)):
             result_dict["Allele"].append(allele)
             result_dict["Peptide"].append(peptides[i])
diff --git a/mhcflurry/predict_command.py b/mhcflurry/predict_command.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a010402ecb357d296a574c2c89a93ec116051ad
--- /dev/null
+++ b/mhcflurry/predict_command.py
@@ -0,0 +1,159 @@
+# Copyright (c) 2016. Mount Sinai School of Medicine
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+'''
+Run MHCflurry predictor on specified peptide/allele pairs.
+
+Examples:
+
+Write a CSV file containing the contents of INPUT.csv plus an
+additional column giving MHCflurry binding affinity predictions:
+
+    mhcflurry-predict INPUT.csv --out RESULT.csv
+
+The input CSV file is expected to contain columns 'allele' and 'peptide'.
+The predictions are written to a column called 'mhcflurry_prediction'.
+These default column names may be changed with the --allele-column,
+--peptide-column, and --prediction-column options.
+
+If --out is not specified, results are writtent to standard out.
+
+You can also run on alleles and peptides specified on the commandline, in
+which case predictions are written for all combinations of alleles and
+peptides:
+
+    mhcflurry-predict --alleles HLA-A0201 H-2Kb --peptides SIINFEKL DENDREKLLL
+'''
+from __future__ import (
+    print_function,
+    division,
+    absolute_import,
+)
+import sys
+import argparse
+import logging
+import pandas
+import itertools
+
+from .downloads import get_path
+from . import class1_allele_specific
+
+parser = argparse.ArgumentParser(
+    description=__doc__,
+    formatter_class=argparse.RawDescriptionHelpFormatter)
+
+parser.add_argument(
+    "input",
+    metavar="FILE.csv",
+    nargs="?",
+    help="Input CSV")
+
+parser.add_argument(
+    "--out",
+    metavar="FILE.csv",
+    help="Output CSV")
+
+parser.add_argument(
+    "--alleles",
+    metavar="ALLELE",
+    nargs="+",
+    help="Alleles to predict (exclusive with --input)")
+
+parser.add_argument(
+    "--peptides",
+    metavar="PEPTIDE",
+    nargs="+",
+    help="Peptides to predict (exclusive with --input)")
+
+parser.add_argument(
+    "--allele-column",
+    metavar="NAME",
+    default="allele",
+    help="Input column name for alleles. Default: '%(default)s'")
+
+parser.add_argument(
+    "--peptide-column",
+    metavar="NAME",
+    default="peptide",
+    help="Input column name for peptides. Default: '%(default)s'")
+
+parser.add_argument(
+    "--prediction-column",
+    metavar="NAME",
+    default="mhcflurry_prediction",
+    help="Output column name for predictions. Default: '%(default)s'")
+
+parser.add_argument(
+    "--models-class1-allele-specific-single",
+    metavar="DIR",
+    default=get_path("models_class1_allele_specific_single"),
+    help="Directory containing class1 allele specific single models. "
+    "Default: '%(default)s'")
+
+
+def run(argv=sys.argv[1:]):
+    args = parser.parse_args(argv)
+
+    if args.input:
+        if args.alleles or args.peptides:
+            parser.error(
+                "If an input file is specified, do not specify --alleles "
+                "or --peptides")
+        df = pandas.read_csv(args.input)
+        print("Read input CSV with %d rows, columns are: %s" % (
+            len(df), ", ".join(df.columns)))
+        for col in [args.allele_column, args.peptide_column]:
+            if col not in df.columns:
+                raise ValueError(
+                    "No such column '%s' in CSV. Columns are: %s" % (
+                        col, ", ".join(["'%s'" % c for c in df.columns])))
+    else:
+        if not args.alleles or not args.peptides:
+            parser.error(
+                "Specify either an input CSV file or both the "
+                "--alleles and --peptides arguments")
+
+        pairs = list(itertools.product(args.alleles, args.peptides))
+        df = pandas.DataFrame({
+            "allele": [p[0] for p in pairs],
+            "peptide": [p[1] for p in pairs],
+        })
+        print("Predicting for %d alleles and %d peptides = %d predictions" % (
+            len(args.alleles), len(args.peptides), len(df)))
+
+    class1_allele_specific_loader = (
+        class1_allele_specific.load.Class1AlleleSpecificPredictorLoader(
+            args.models_class1_allele_specific_single))
+
+    predictions = {}  # allele -> peptide -> value
+    for (allele, sub_df) in df.groupby(args.allele_column):
+        logging.info("Running %d predictions for allele %s" % (
+            len(sub_df), allele))
+        model = class1_allele_specific_loader.from_allele_name(allele)
+        peptides = sub_df[args.peptide_column].values
+        predictions[allele] = dict(
+            (peptide, prediction)
+            for (peptide, prediction)
+            in zip(peptides, model.predict(peptides)))
+
+    logging.info("Collecting result")
+    df[args.prediction_column] = [
+        predictions[row[args.allele_column]][row[args.peptide_column]]
+        for (_, row) in df.iterrows()
+    ]
+
+    if args.out:
+        df.to_csv(args.out, index=False)
+        print("Wrote: %s" % args.out)
+    else:
+        df.to_csv(sys.stdout, index=False)
diff --git a/setup.py b/setup.py
index 32f478f49b9cc341dd6af1344e1354040965ce89..796b1ccb4d4fb5b3c56cccf4192609217d79a46a 100644
--- a/setup.py
+++ b/setup.py
@@ -54,6 +54,7 @@ if __name__ == '__main__':
         entry_points={
             'console_scripts': [
                 'mhcflurry-downloads = mhcflurry.downloads_command:run',
+                'mhcflurry-predict = mhcflurry.predict_command:run',
                 'mhcflurry-class1-allele-specific-cv-and-train = '
                     'mhcflurry.class1_allele_specific.cv_and_train_command:run'
             ]
@@ -81,8 +82,7 @@ if __name__ == '__main__':
             'h5py',
             'typechecks',
             'pepdata',
-            'joblib',
-            'cherrypy',  # for multi-threaded web server
+            'futures',
             'bottle',
             'six',
         ],
diff --git a/test/test_class1_allele_specific_cv_and_train_command.py b/test/test_class1_allele_specific_cv_and_train_command.py
index 204920b29ab525d9451ad5563f7e11469fe2cfbd..f6e52d87a4aa73ba6555f4d0c86c493b58a48a85 100644
--- a/test/test_class1_allele_specific_cv_and_train_command.py
+++ b/test/test_class1_allele_specific_cv_and_train_command.py
@@ -58,9 +58,9 @@ def test_small_run():
         "--out-production-results", join(temp_dir, "production.csv"),
         "--out-models", join(temp_dir, "models"),
         "--cv-num-folds", "2",
-        "--joblib-num-jobs", "1",
         "--alleles", "HLA-A0201", "HLA-A0301",
         "--verbose",
+        "--num-local-threads", "1",
     ]
     print("Running cv_and_train_command with args: %s " % str(args))
 
diff --git a/test/test_cross_validation.py b/test/test_cross_validation.py
index eaa1348a20f18eaf275f3532a27a0155b446f7fc..cf95333b81c993ef0282af397abd85658f737d8b 100644
--- a/test/test_cross_validation.py
+++ b/test/test_cross_validation.py
@@ -28,10 +28,7 @@ def test_imputation():
         n_folds=3,
         imputer=imputer,
         drop_similar_peptides=True,
-        alleles=["HLA-A0201", "HLA-A0202"],
-        n_jobs=2,
-        verbose=5,
-    )
+        alleles=["HLA-A0201", "HLA-A0202"])
 
     eq_(set(x.allele for x in folds), {"HLA-A0201", "HLA-A0202"})
     eq_(len(folds), 6)
@@ -70,11 +67,7 @@ def test_cross_validation_no_imputation():
         n_training_epochs=[3])
     print(models)
 
-    df = train_across_models_and_folds(
-        folds,
-        models,
-        n_jobs=2,
-        verbose=50)
+    df = train_across_models_and_folds(folds, models)
     print(df)
     assert df.test_auc.mean() > 0.6
 
@@ -92,10 +85,7 @@ def test_cross_validation_with_imputation():
         n_folds=3,
         imputer=imputer,
         drop_similar_peptides=True,
-        alleles=["HLA-A0201", "HLA-A0202"],
-        n_jobs=3,
-        verbose=5,
-    )
+        alleles=["HLA-A0201", "HLA-A0202"])
 
     eq_(set(x.allele for x in folds), {"HLA-A0201", "HLA-A0202"})
     eq_(len(folds), 6)
@@ -112,10 +102,6 @@ def test_cross_validation_with_imputation():
         n_training_epochs=[3])
     print(models)
 
-    df = train_across_models_and_folds(
-        folds,
-        models,
-        n_jobs=3,
-        verbose=5)
+    df = train_across_models_and_folds(folds, models)
     print(df)
     assert df.test_auc.mean() > 0.6
diff --git a/test/test_predict_command.py b/test/test_predict_command.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cb64d27b753b2675f632006e0b46011d52263e8
--- /dev/null
+++ b/test/test_predict_command.py
@@ -0,0 +1,62 @@
+import tempfile
+import os
+
+import pandas
+from numpy.testing import assert_equal
+
+from mhcflurry import predict_command
+
+TEST_CSV = '''
+Allele,Peptide,Experiment
+HLA-A0201,SYNFEKKL,17
+HLA-B4403,AAAAAAAAA,17
+HLA-B4403,PPPPPPPP,18
+'''.strip()
+
+
+def test_csv():
+    args = ["--allele-column", "Allele", "--peptide-column", "Peptide"]
+    deletes = []
+    try:
+        with tempfile.NamedTemporaryFile(delete=False, suffix=".csv") as fd:
+            fd.write(TEST_CSV.encode())
+            deletes.append(fd.name)
+        fd_out = tempfile.NamedTemporaryFile(delete=False, suffix=".csv")
+        deletes.append(fd_out.name)
+        full_args = [fd.name] + args + ["--out", fd_out.name]
+        print("Running with args: %s" % full_args)
+        predict_command.run(full_args)
+        result = pandas.read_csv(fd_out.name)
+        print(result)
+    finally:
+        for delete in deletes:
+            os.unlink(delete)
+
+    assert_equal(result.shape, (3, 4))
+
+
+def test_no_csv():
+    args = [
+        "--alleles", "HLA-A0201", "H-2Kb",
+        "--peptides", "SIINFEKL", "DENDREKLLL", "PICKLE",
+        "--prediction-column", "prediction",
+    ]
+
+    deletes = []
+    try:
+        fd_out = tempfile.NamedTemporaryFile(delete=False, suffix=".csv")
+        deletes.append(fd_out.name)
+        full_args = args + ["--out", fd_out.name]
+        print("Running with args: %s" % full_args)
+        predict_command.run(full_args)
+        result = pandas.read_csv(fd_out.name)
+        print(result)
+    finally:
+        for delete in deletes:
+            os.unlink(delete)
+
+    assert_equal(result.shape, (6, 3))
+    sub_result1 = result.ix[result.peptide == "SIINFEKL"].set_index("allele")
+    assert (
+        sub_result1.ix["H-2Kb"].prediction <
+        sub_result1.ix["HLA-A0201"].prediction)