diff --git a/.travis.yml b/.travis.yml
index ff4be129290b4a2657b16f1cf26a8bbda70eb7f9..30e3555e8b1b79318f90fd3e0354de52181bf46d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -21,7 +21,6 @@ addons:
   apt:
     packages:
       - pandoc
-      - libssl1.0.0
 install:
   - >
       conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION
@@ -40,6 +39,22 @@ env:
 
 script:
   # download data and models, then run tests
-  - mhcflurry-downloads fetch data_curated models_class1 models_class1_pan allele_sequences
+  - mkdir -p /tmp/downloads
+  # We download using wget to avoid sporadic SSL error on travis from Python.
+  -
+    wget
+      $(mhcflurry-downloads url data_curated)
+      $(mhcflurry-downloads url models_class1)
+      $(mhcflurry-downloads url models_class1_pan)
+      $(mhcflurry-downloads url allele_sequences)
+      -P /tmp/downloads
+  - ls -lh /tmp/downloads
+  -
+    mhcflurry-downloads fetch
+    data_curated
+    models_class1
+    models_class1_pan
+    allele_sequences
+    --already-downloaded-dir /tmp/downloads
   - mhcflurry-downloads info  # just to test this command works
   - nosetests --with-timer -sv test
diff --git a/mhcflurry/downloads_command.py b/mhcflurry/downloads_command.py
index 249269aea94f3a8b3bdcec5f7fce26f41c4e804d..321c32b4af4f839be8536e5af3c15095ae87782a 100644
--- a/mhcflurry/downloads_command.py
+++ b/mhcflurry/downloads_command.py
@@ -7,10 +7,13 @@ Fetch the default downloads:
     $ mhcflurry-downloads fetch
 
 Fetch a specific download:
-    $ mhcflurry-downloads fetch data_kim2014
+    $ mhcflurry-downloads fetch models_class1_pan
 
 Get the path to a download:
-    $ mhcflurry-downloads path data_kim2014
+    $ mhcflurry-downloads path models_class1_pan
+
+Get the URL of a download:
+    $ mhcflurry-downloads url models_class1_pan
 
 Summarize available and fetched downloads:
     $ mhcflurry-downloads info
@@ -32,10 +35,14 @@ from tempfile import NamedTemporaryFile
 from tqdm import tqdm
 tqdm.monitor_interval = 0  # see https://github.com/tqdm/tqdm/issues/481
 
+import posixpath
+
 try:
     from urllib.request import urlretrieve
+    from urllib.parse import urlsplit
 except ImportError:
     from urllib import urlretrieve
+    from urlparse import urlsplit
 
 from .downloads import (
     get_current_release,
@@ -78,6 +85,10 @@ parser_fetch.add_argument(
     "--release",
     default=get_current_release(),
     help="Release to download. Default: %(default)s")
+parser_fetch.add_argument(
+    "--already-downloaded-dir",
+    metavar="DIR",
+    help="Don't download files, get them from DIR")
 
 parser_info = subparsers.add_parser('info')
 
@@ -87,6 +98,12 @@ parser_path.add_argument(
     nargs="?",
     default='')
 
+parser_url = subparsers.add_parser('url')
+parser_url.add_argument(
+    "download_name",
+    nargs="?",
+    default='')
+
 
 def run(argv=sys.argv[1:]):
     args = parser.parse_args(argv)
@@ -99,6 +116,7 @@ def run(argv=sys.argv[1:]):
         "fetch": fetch_subcommand,
         "info": info_subcommand,
         "path": path_subcommand,
+        "url": url_subcommand,
         None: lambda args: parser.print_help(),
     }
     command_functions[args.subparser_name](args)
@@ -204,20 +222,28 @@ def fetch_subcommand(args):
         temp = NamedTemporaryFile(delete=False, suffix=".tar.bz2")
         try:
             for (url_num, url) in enumerate(urls):
-                qprint("Downloading [part %d/%d]: %s" % (
-                    url_num + 1, len(urls), url))
-                (downloaded_path, _) = urlretrieve(
-                    url,
-                    temp.name if len(urls) == 1 else None,
-                    reporthook=TqdmUpTo(
-                        unit='B', unit_scale=True, miniters=1).update_to)
-                qprint("Downloaded to: %s" % quote(downloaded_path))
+                delete_downloaded = True
+                if args.already_downloaded_dir:
+                    filename = posixpath.basename(urlsplit(url).path)
+                    downloaded_path = os.path.join(
+                        args.already_downloaded_dir, filename)
+                    delete_downloaded = False
+                else:
+                    qprint("Downloading [part %d/%d]: %s" % (
+                        url_num + 1, len(urls), url))
+                    (downloaded_path, _) = urlretrieve(
+                        url,
+                        temp.name if len(urls) == 1 else None,
+                        reporthook=TqdmUpTo(
+                            unit='B', unit_scale=True, miniters=1).update_to)
+                    qprint("Downloaded to: %s" % quote(downloaded_path))
 
                 if downloaded_path != temp.name:
-                    qprint("Appending to: %s" % temp.name)
+                    qprint("Copying to: %s" % temp.name)
                     with open(downloaded_path, "rb") as fd:
                         copyfileobj(fd, temp, length=64*1024*1024)
-                    os.remove(downloaded_path)
+                    if delete_downloaded:
+                        os.remove(downloaded_path)
 
             temp.close()
             tar = tarfile.open(temp.name, 'r:bz2')
@@ -291,4 +317,21 @@ def info_subcommand(args):
 
 
 def path_subcommand(args):
+    """
+    Print the local path to a download
+    """
     print(get_path(args.download_name))
+
+
+def url_subcommand(args):
+    """
+    Print the URL(s) for a download
+    """
+    downloads = get_current_release_downloads()
+    download = downloads[args.download_name]["metadata"]
+    urls = []
+    if download.get("url"):
+        urls.append(download["url"])
+    if download.get("part_urls"):
+        urls.extend(download["part_urls"])
+    print("\n".join(urls))